Libav 0.7.1
|
00001 /* 00002 * AMR narrowband decoder 00003 * Copyright (c) 2006-2007 Robert Swain 00004 * Copyright (c) 2009 Colin McQuillan 00005 * 00006 * This file is part of Libav. 00007 * 00008 * Libav is free software; you can redistribute it and/or 00009 * modify it under the terms of the GNU Lesser General Public 00010 * License as published by the Free Software Foundation; either 00011 * version 2.1 of the License, or (at your option) any later version. 00012 * 00013 * Libav is distributed in the hope that it will be useful, 00014 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00016 * Lesser General Public License for more details. 00017 * 00018 * You should have received a copy of the GNU Lesser General Public 00019 * License along with Libav; if not, write to the Free Software 00020 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00021 */ 00022 00023 00043 #include <string.h> 00044 #include <math.h> 00045 00046 #include "avcodec.h" 00047 #include "get_bits.h" 00048 #include "libavutil/common.h" 00049 #include "celp_math.h" 00050 #include "celp_filters.h" 00051 #include "acelp_filters.h" 00052 #include "acelp_vectors.h" 00053 #include "acelp_pitch_delay.h" 00054 #include "lsp.h" 00055 #include "amr.h" 00056 00057 #include "amrnbdata.h" 00058 00059 #define AMR_BLOCK_SIZE 160 ///< samples per frame 00060 #define AMR_SAMPLE_BOUND 32768.0 ///< threshold for synthesis overflow 00061 00071 #define AMR_SAMPLE_SCALE (2.0 / 32768.0) 00072 00074 #define PRED_FAC_MODE_12k2 0.65 00075 00076 #define LSF_R_FAC (8000.0 / 32768.0) ///< LSF residual tables to Hertz 00077 #define MIN_LSF_SPACING (50.0488 / 8000.0) ///< Ensures stability of LPC filter 00078 #define PITCH_LAG_MIN_MODE_12k2 18 ///< Lower bound on decoded lag search in 12.2kbit/s mode 00079 00081 #define MIN_ENERGY -14.0 00082 00088 #define SHARP_MAX 0.79449462890625 00089 00091 #define AMR_TILT_RESPONSE 22 00092 00093 #define AMR_TILT_GAMMA_T 0.8 00094 00095 #define AMR_AGC_ALPHA 0.9 00096 00097 typedef struct AMRContext { 00098 AMRNBFrame frame; 00099 uint8_t bad_frame_indicator; 00100 enum Mode cur_frame_mode; 00101 00102 int16_t prev_lsf_r[LP_FILTER_ORDER]; 00103 double lsp[4][LP_FILTER_ORDER]; 00104 double prev_lsp_sub4[LP_FILTER_ORDER]; 00105 00106 float lsf_q[4][LP_FILTER_ORDER]; 00107 float lsf_avg[LP_FILTER_ORDER]; 00108 00109 float lpc[4][LP_FILTER_ORDER]; 00110 00111 uint8_t pitch_lag_int; 00112 00113 float excitation_buf[PITCH_DELAY_MAX + LP_FILTER_ORDER + 1 + AMR_SUBFRAME_SIZE]; 00114 float *excitation; 00115 00116 float pitch_vector[AMR_SUBFRAME_SIZE]; 00117 float fixed_vector[AMR_SUBFRAME_SIZE]; 00118 00119 float prediction_error[4]; 00120 float pitch_gain[5]; 00121 float fixed_gain[5]; 00122 00123 float beta; 00124 uint8_t diff_count; 00125 uint8_t hang_count; 00126 00127 float prev_sparse_fixed_gain; 00128 uint8_t prev_ir_filter_nr; 00129 uint8_t ir_filter_onset; 00130 00131 float postfilter_mem[10]; 00132 float tilt_mem; 00133 float postfilter_agc; 00134 float high_pass_mem[2]; 00135 00136 float samples_in[LP_FILTER_ORDER + AMR_SUBFRAME_SIZE]; 00137 00138 } AMRContext; 00139 00141 static void weighted_vector_sumd(double *out, const double *in_a, 00142 const double *in_b, double weight_coeff_a, 00143 double weight_coeff_b, int length) 00144 { 00145 int i; 00146 00147 for (i = 0; i < length; i++) 00148 out[i] = weight_coeff_a * in_a[i] 00149 + weight_coeff_b * in_b[i]; 00150 } 00151 00152 static av_cold int amrnb_decode_init(AVCodecContext *avctx) 00153 { 00154 AMRContext *p = avctx->priv_data; 00155 int i; 00156 00157 avctx->sample_fmt = AV_SAMPLE_FMT_FLT; 00158 00159 // p->excitation always points to the same position in p->excitation_buf 00160 p->excitation = &p->excitation_buf[PITCH_DELAY_MAX + LP_FILTER_ORDER + 1]; 00161 00162 for (i = 0; i < LP_FILTER_ORDER; i++) { 00163 p->prev_lsp_sub4[i] = lsp_sub4_init[i] * 1000 / (float)(1 << 15); 00164 p->lsf_avg[i] = p->lsf_q[3][i] = lsp_avg_init[i] / (float)(1 << 15); 00165 } 00166 00167 for (i = 0; i < 4; i++) 00168 p->prediction_error[i] = MIN_ENERGY; 00169 00170 return 0; 00171 } 00172 00173 00185 static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf, 00186 int buf_size) 00187 { 00188 GetBitContext gb; 00189 enum Mode mode; 00190 00191 init_get_bits(&gb, buf, buf_size * 8); 00192 00193 // Decode the first octet. 00194 skip_bits(&gb, 1); // padding bit 00195 mode = get_bits(&gb, 4); // frame type 00196 p->bad_frame_indicator = !get_bits1(&gb); // quality bit 00197 skip_bits(&gb, 2); // two padding bits 00198 00199 if (mode < MODE_DTX) 00200 ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1, 00201 amr_unpacking_bitmaps_per_mode[mode]); 00202 00203 return mode; 00204 } 00205 00206 00209 00217 static void interpolate_lsf(float lsf_q[4][LP_FILTER_ORDER], float *lsf_new) 00218 { 00219 int i; 00220 00221 for (i = 0; i < 4; i++) 00222 ff_weighted_vector_sumf(lsf_q[i], lsf_q[3], lsf_new, 00223 0.25 * (3 - i), 0.25 * (i + 1), 00224 LP_FILTER_ORDER); 00225 } 00226 00238 static void lsf2lsp_for_mode12k2(AMRContext *p, double lsp[LP_FILTER_ORDER], 00239 const float lsf_no_r[LP_FILTER_ORDER], 00240 const int16_t *lsf_quantizer[5], 00241 const int quantizer_offset, 00242 const int sign, const int update) 00243 { 00244 int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector 00245 float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector 00246 int i; 00247 00248 for (i = 0; i < LP_FILTER_ORDER >> 1; i++) 00249 memcpy(&lsf_r[i << 1], &lsf_quantizer[i][quantizer_offset], 00250 2 * sizeof(*lsf_r)); 00251 00252 if (sign) { 00253 lsf_r[4] *= -1; 00254 lsf_r[5] *= -1; 00255 } 00256 00257 if (update) 00258 memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r)); 00259 00260 for (i = 0; i < LP_FILTER_ORDER; i++) 00261 lsf_q[i] = lsf_r[i] * (LSF_R_FAC / 8000.0) + lsf_no_r[i] * (1.0 / 8000.0); 00262 00263 ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER); 00264 00265 if (update) 00266 interpolate_lsf(p->lsf_q, lsf_q); 00267 00268 ff_acelp_lsf2lspd(lsp, lsf_q, LP_FILTER_ORDER); 00269 } 00270 00276 static void lsf2lsp_5(AMRContext *p) 00277 { 00278 const uint16_t *lsf_param = p->frame.lsf; 00279 float lsf_no_r[LP_FILTER_ORDER]; // LSFs without the residual vector 00280 const int16_t *lsf_quantizer[5]; 00281 int i; 00282 00283 lsf_quantizer[0] = lsf_5_1[lsf_param[0]]; 00284 lsf_quantizer[1] = lsf_5_2[lsf_param[1]]; 00285 lsf_quantizer[2] = lsf_5_3[lsf_param[2] >> 1]; 00286 lsf_quantizer[3] = lsf_5_4[lsf_param[3]]; 00287 lsf_quantizer[4] = lsf_5_5[lsf_param[4]]; 00288 00289 for (i = 0; i < LP_FILTER_ORDER; i++) 00290 lsf_no_r[i] = p->prev_lsf_r[i] * LSF_R_FAC * PRED_FAC_MODE_12k2 + lsf_5_mean[i]; 00291 00292 lsf2lsp_for_mode12k2(p, p->lsp[1], lsf_no_r, lsf_quantizer, 0, lsf_param[2] & 1, 0); 00293 lsf2lsp_for_mode12k2(p, p->lsp[3], lsf_no_r, lsf_quantizer, 2, lsf_param[2] & 1, 1); 00294 00295 // interpolate LSP vectors at subframes 1 and 3 00296 weighted_vector_sumd(p->lsp[0], p->prev_lsp_sub4, p->lsp[1], 0.5, 0.5, LP_FILTER_ORDER); 00297 weighted_vector_sumd(p->lsp[2], p->lsp[1] , p->lsp[3], 0.5, 0.5, LP_FILTER_ORDER); 00298 } 00299 00305 static void lsf2lsp_3(AMRContext *p) 00306 { 00307 const uint16_t *lsf_param = p->frame.lsf; 00308 int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector 00309 float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector 00310 const int16_t *lsf_quantizer; 00311 int i, j; 00312 00313 lsf_quantizer = (p->cur_frame_mode == MODE_7k95 ? lsf_3_1_MODE_7k95 : lsf_3_1)[lsf_param[0]]; 00314 memcpy(lsf_r, lsf_quantizer, 3 * sizeof(*lsf_r)); 00315 00316 lsf_quantizer = lsf_3_2[lsf_param[1] << (p->cur_frame_mode <= MODE_5k15)]; 00317 memcpy(lsf_r + 3, lsf_quantizer, 3 * sizeof(*lsf_r)); 00318 00319 lsf_quantizer = (p->cur_frame_mode <= MODE_5k15 ? lsf_3_3_MODE_5k15 : lsf_3_3)[lsf_param[2]]; 00320 memcpy(lsf_r + 6, lsf_quantizer, 4 * sizeof(*lsf_r)); 00321 00322 // calculate mean-removed LSF vector and add mean 00323 for (i = 0; i < LP_FILTER_ORDER; i++) 00324 lsf_q[i] = (lsf_r[i] + p->prev_lsf_r[i] * pred_fac[i]) * (LSF_R_FAC / 8000.0) + lsf_3_mean[i] * (1.0 / 8000.0); 00325 00326 ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER); 00327 00328 // store data for computing the next frame's LSFs 00329 interpolate_lsf(p->lsf_q, lsf_q); 00330 memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r)); 00331 00332 ff_acelp_lsf2lspd(p->lsp[3], lsf_q, LP_FILTER_ORDER); 00333 00334 // interpolate LSP vectors at subframes 1, 2 and 3 00335 for (i = 1; i <= 3; i++) 00336 for(j = 0; j < LP_FILTER_ORDER; j++) 00337 p->lsp[i-1][j] = p->prev_lsp_sub4[j] + 00338 (p->lsp[3][j] - p->prev_lsp_sub4[j]) * 0.25 * i; 00339 } 00340 00342 00343 00346 00350 static void decode_pitch_lag_1_6(int *lag_int, int *lag_frac, int pitch_index, 00351 const int prev_lag_int, const int subframe) 00352 { 00353 if (subframe == 0 || subframe == 2) { 00354 if (pitch_index < 463) { 00355 *lag_int = (pitch_index + 107) * 10923 >> 16; 00356 *lag_frac = pitch_index - *lag_int * 6 + 105; 00357 } else { 00358 *lag_int = pitch_index - 368; 00359 *lag_frac = 0; 00360 } 00361 } else { 00362 *lag_int = ((pitch_index + 5) * 10923 >> 16) - 1; 00363 *lag_frac = pitch_index - *lag_int * 6 - 3; 00364 *lag_int += av_clip(prev_lag_int - 5, PITCH_LAG_MIN_MODE_12k2, 00365 PITCH_DELAY_MAX - 9); 00366 } 00367 } 00368 00369 static void decode_pitch_vector(AMRContext *p, 00370 const AMRNBSubframe *amr_subframe, 00371 const int subframe) 00372 { 00373 int pitch_lag_int, pitch_lag_frac; 00374 enum Mode mode = p->cur_frame_mode; 00375 00376 if (p->cur_frame_mode == MODE_12k2) { 00377 decode_pitch_lag_1_6(&pitch_lag_int, &pitch_lag_frac, 00378 amr_subframe->p_lag, p->pitch_lag_int, 00379 subframe); 00380 } else 00381 ff_decode_pitch_lag(&pitch_lag_int, &pitch_lag_frac, 00382 amr_subframe->p_lag, 00383 p->pitch_lag_int, subframe, 00384 mode != MODE_4k75 && mode != MODE_5k15, 00385 mode <= MODE_6k7 ? 4 : (mode == MODE_7k95 ? 5 : 6)); 00386 00387 p->pitch_lag_int = pitch_lag_int; // store previous lag in a uint8_t 00388 00389 pitch_lag_frac <<= (p->cur_frame_mode != MODE_12k2); 00390 00391 pitch_lag_int += pitch_lag_frac > 0; 00392 00393 /* Calculate the pitch vector by interpolating the past excitation at the 00394 pitch lag using a b60 hamming windowed sinc function. */ 00395 ff_acelp_interpolatef(p->excitation, p->excitation + 1 - pitch_lag_int, 00396 ff_b60_sinc, 6, 00397 pitch_lag_frac + 6 - 6*(pitch_lag_frac > 0), 00398 10, AMR_SUBFRAME_SIZE); 00399 00400 memcpy(p->pitch_vector, p->excitation, AMR_SUBFRAME_SIZE * sizeof(float)); 00401 } 00402 00404 00405 00408 00412 static void decode_10bit_pulse(int code, int pulse_position[8], 00413 int i1, int i2, int i3) 00414 { 00415 // coded using 7+3 bits with the 3 LSBs being, individually, the LSB of 1 of 00416 // the 3 pulses and the upper 7 bits being coded in base 5 00417 const uint8_t *positions = base_five_table[code >> 3]; 00418 pulse_position[i1] = (positions[2] << 1) + ( code & 1); 00419 pulse_position[i2] = (positions[1] << 1) + ((code >> 1) & 1); 00420 pulse_position[i3] = (positions[0] << 1) + ((code >> 2) & 1); 00421 } 00422 00430 static void decode_8_pulses_31bits(const int16_t *fixed_index, 00431 AMRFixed *fixed_sparse) 00432 { 00433 int pulse_position[8]; 00434 int i, temp; 00435 00436 decode_10bit_pulse(fixed_index[4], pulse_position, 0, 4, 1); 00437 decode_10bit_pulse(fixed_index[5], pulse_position, 2, 6, 5); 00438 00439 // coded using 5+2 bits with the 2 LSBs being, individually, the LSB of 1 of 00440 // the 2 pulses and the upper 5 bits being coded in base 5 00441 temp = ((fixed_index[6] >> 2) * 25 + 12) >> 5; 00442 pulse_position[3] = temp % 5; 00443 pulse_position[7] = temp / 5; 00444 if (pulse_position[7] & 1) 00445 pulse_position[3] = 4 - pulse_position[3]; 00446 pulse_position[3] = (pulse_position[3] << 1) + ( fixed_index[6] & 1); 00447 pulse_position[7] = (pulse_position[7] << 1) + ((fixed_index[6] >> 1) & 1); 00448 00449 fixed_sparse->n = 8; 00450 for (i = 0; i < 4; i++) { 00451 const int pos1 = (pulse_position[i] << 2) + i; 00452 const int pos2 = (pulse_position[i + 4] << 2) + i; 00453 const float sign = fixed_index[i] ? -1.0 : 1.0; 00454 fixed_sparse->x[i ] = pos1; 00455 fixed_sparse->x[i + 4] = pos2; 00456 fixed_sparse->y[i ] = sign; 00457 fixed_sparse->y[i + 4] = pos2 < pos1 ? -sign : sign; 00458 } 00459 } 00460 00476 static void decode_fixed_sparse(AMRFixed *fixed_sparse, const uint16_t *pulses, 00477 const enum Mode mode, const int subframe) 00478 { 00479 assert(MODE_4k75 <= mode && mode <= MODE_12k2); 00480 00481 if (mode == MODE_12k2) { 00482 ff_decode_10_pulses_35bits(pulses, fixed_sparse, gray_decode, 5, 3); 00483 } else if (mode == MODE_10k2) { 00484 decode_8_pulses_31bits(pulses, fixed_sparse); 00485 } else { 00486 int *pulse_position = fixed_sparse->x; 00487 int i, pulse_subset; 00488 const int fixed_index = pulses[0]; 00489 00490 if (mode <= MODE_5k15) { 00491 pulse_subset = ((fixed_index >> 3) & 8) + (subframe << 1); 00492 pulse_position[0] = ( fixed_index & 7) * 5 + track_position[pulse_subset]; 00493 pulse_position[1] = ((fixed_index >> 3) & 7) * 5 + track_position[pulse_subset + 1]; 00494 fixed_sparse->n = 2; 00495 } else if (mode == MODE_5k9) { 00496 pulse_subset = ((fixed_index & 1) << 1) + 1; 00497 pulse_position[0] = ((fixed_index >> 1) & 7) * 5 + pulse_subset; 00498 pulse_subset = (fixed_index >> 4) & 3; 00499 pulse_position[1] = ((fixed_index >> 6) & 7) * 5 + pulse_subset + (pulse_subset == 3 ? 1 : 0); 00500 fixed_sparse->n = pulse_position[0] == pulse_position[1] ? 1 : 2; 00501 } else if (mode == MODE_6k7) { 00502 pulse_position[0] = (fixed_index & 7) * 5; 00503 pulse_subset = (fixed_index >> 2) & 2; 00504 pulse_position[1] = ((fixed_index >> 4) & 7) * 5 + pulse_subset + 1; 00505 pulse_subset = (fixed_index >> 6) & 2; 00506 pulse_position[2] = ((fixed_index >> 8) & 7) * 5 + pulse_subset + 2; 00507 fixed_sparse->n = 3; 00508 } else { // mode <= MODE_7k95 00509 pulse_position[0] = gray_decode[ fixed_index & 7]; 00510 pulse_position[1] = gray_decode[(fixed_index >> 3) & 7] + 1; 00511 pulse_position[2] = gray_decode[(fixed_index >> 6) & 7] + 2; 00512 pulse_subset = (fixed_index >> 9) & 1; 00513 pulse_position[3] = gray_decode[(fixed_index >> 10) & 7] + pulse_subset + 3; 00514 fixed_sparse->n = 4; 00515 } 00516 for (i = 0; i < fixed_sparse->n; i++) 00517 fixed_sparse->y[i] = (pulses[1] >> i) & 1 ? 1.0 : -1.0; 00518 } 00519 } 00520 00529 static void pitch_sharpening(AMRContext *p, int subframe, enum Mode mode, 00530 AMRFixed *fixed_sparse) 00531 { 00532 // The spec suggests the current pitch gain is always used, but in other 00533 // modes the pitch and codebook gains are joinly quantized (sec 5.8.2) 00534 // so the codebook gain cannot depend on the quantized pitch gain. 00535 if (mode == MODE_12k2) 00536 p->beta = FFMIN(p->pitch_gain[4], 1.0); 00537 00538 fixed_sparse->pitch_lag = p->pitch_lag_int; 00539 fixed_sparse->pitch_fac = p->beta; 00540 00541 // Save pitch sharpening factor for the next subframe 00542 // MODE_4k75 only updates on the 2nd and 4th subframes - this follows from 00543 // the fact that the gains for two subframes are jointly quantized. 00544 if (mode != MODE_4k75 || subframe & 1) 00545 p->beta = av_clipf(p->pitch_gain[4], 0.0, SHARP_MAX); 00546 } 00548 00549 00552 00565 static float fixed_gain_smooth(AMRContext *p , const float *lsf, 00566 const float *lsf_avg, const enum Mode mode) 00567 { 00568 float diff = 0.0; 00569 int i; 00570 00571 for (i = 0; i < LP_FILTER_ORDER; i++) 00572 diff += fabs(lsf_avg[i] - lsf[i]) / lsf_avg[i]; 00573 00574 // If diff is large for ten subframes, disable smoothing for a 40-subframe 00575 // hangover period. 00576 p->diff_count++; 00577 if (diff <= 0.65) 00578 p->diff_count = 0; 00579 00580 if (p->diff_count > 10) { 00581 p->hang_count = 0; 00582 p->diff_count--; // don't let diff_count overflow 00583 } 00584 00585 if (p->hang_count < 40) { 00586 p->hang_count++; 00587 } else if (mode < MODE_7k4 || mode == MODE_10k2) { 00588 const float smoothing_factor = av_clipf(4.0 * diff - 1.6, 0.0, 1.0); 00589 const float fixed_gain_mean = (p->fixed_gain[0] + p->fixed_gain[1] + 00590 p->fixed_gain[2] + p->fixed_gain[3] + 00591 p->fixed_gain[4]) * 0.2; 00592 return smoothing_factor * p->fixed_gain[4] + 00593 (1.0 - smoothing_factor) * fixed_gain_mean; 00594 } 00595 return p->fixed_gain[4]; 00596 } 00597 00607 static void decode_gains(AMRContext *p, const AMRNBSubframe *amr_subframe, 00608 const enum Mode mode, const int subframe, 00609 float *fixed_gain_factor) 00610 { 00611 if (mode == MODE_12k2 || mode == MODE_7k95) { 00612 p->pitch_gain[4] = qua_gain_pit [amr_subframe->p_gain ] 00613 * (1.0 / 16384.0); 00614 *fixed_gain_factor = qua_gain_code[amr_subframe->fixed_gain] 00615 * (1.0 / 2048.0); 00616 } else { 00617 const uint16_t *gains; 00618 00619 if (mode >= MODE_6k7) { 00620 gains = gains_high[amr_subframe->p_gain]; 00621 } else if (mode >= MODE_5k15) { 00622 gains = gains_low [amr_subframe->p_gain]; 00623 } else { 00624 // gain index is only coded in subframes 0,2 for MODE_4k75 00625 gains = gains_MODE_4k75[(p->frame.subframe[subframe & 2].p_gain << 1) + (subframe & 1)]; 00626 } 00627 00628 p->pitch_gain[4] = gains[0] * (1.0 / 16384.0); 00629 *fixed_gain_factor = gains[1] * (1.0 / 4096.0); 00630 } 00631 } 00632 00634 00635 00638 00649 static void apply_ir_filter(float *out, const AMRFixed *in, 00650 const float *filter) 00651 { 00652 float filter1[AMR_SUBFRAME_SIZE], 00653 filter2[AMR_SUBFRAME_SIZE]; 00654 int lag = in->pitch_lag; 00655 float fac = in->pitch_fac; 00656 int i; 00657 00658 if (lag < AMR_SUBFRAME_SIZE) { 00659 ff_celp_circ_addf(filter1, filter, filter, lag, fac, 00660 AMR_SUBFRAME_SIZE); 00661 00662 if (lag < AMR_SUBFRAME_SIZE >> 1) 00663 ff_celp_circ_addf(filter2, filter, filter1, lag, fac, 00664 AMR_SUBFRAME_SIZE); 00665 } 00666 00667 memset(out, 0, sizeof(float) * AMR_SUBFRAME_SIZE); 00668 for (i = 0; i < in->n; i++) { 00669 int x = in->x[i]; 00670 float y = in->y[i]; 00671 const float *filterp; 00672 00673 if (x >= AMR_SUBFRAME_SIZE - lag) { 00674 filterp = filter; 00675 } else if (x >= AMR_SUBFRAME_SIZE - (lag << 1)) { 00676 filterp = filter1; 00677 } else 00678 filterp = filter2; 00679 00680 ff_celp_circ_addf(out, out, filterp, x, y, AMR_SUBFRAME_SIZE); 00681 } 00682 } 00683 00696 static const float *anti_sparseness(AMRContext *p, AMRFixed *fixed_sparse, 00697 const float *fixed_vector, 00698 float fixed_gain, float *out) 00699 { 00700 int ir_filter_nr; 00701 00702 if (p->pitch_gain[4] < 0.6) { 00703 ir_filter_nr = 0; // strong filtering 00704 } else if (p->pitch_gain[4] < 0.9) { 00705 ir_filter_nr = 1; // medium filtering 00706 } else 00707 ir_filter_nr = 2; // no filtering 00708 00709 // detect 'onset' 00710 if (fixed_gain > 2.0 * p->prev_sparse_fixed_gain) { 00711 p->ir_filter_onset = 2; 00712 } else if (p->ir_filter_onset) 00713 p->ir_filter_onset--; 00714 00715 if (!p->ir_filter_onset) { 00716 int i, count = 0; 00717 00718 for (i = 0; i < 5; i++) 00719 if (p->pitch_gain[i] < 0.6) 00720 count++; 00721 if (count > 2) 00722 ir_filter_nr = 0; 00723 00724 if (ir_filter_nr > p->prev_ir_filter_nr + 1) 00725 ir_filter_nr--; 00726 } else if (ir_filter_nr < 2) 00727 ir_filter_nr++; 00728 00729 // Disable filtering for very low level of fixed_gain. 00730 // Note this step is not specified in the technical description but is in 00731 // the reference source in the function Ph_disp. 00732 if (fixed_gain < 5.0) 00733 ir_filter_nr = 2; 00734 00735 if (p->cur_frame_mode != MODE_7k4 && p->cur_frame_mode < MODE_10k2 00736 && ir_filter_nr < 2) { 00737 apply_ir_filter(out, fixed_sparse, 00738 (p->cur_frame_mode == MODE_7k95 ? 00739 ir_filters_lookup_MODE_7k95 : 00740 ir_filters_lookup)[ir_filter_nr]); 00741 fixed_vector = out; 00742 } 00743 00744 // update ir filter strength history 00745 p->prev_ir_filter_nr = ir_filter_nr; 00746 p->prev_sparse_fixed_gain = fixed_gain; 00747 00748 return fixed_vector; 00749 } 00750 00752 00753 00756 00767 static int synthesis(AMRContext *p, float *lpc, 00768 float fixed_gain, const float *fixed_vector, 00769 float *samples, uint8_t overflow) 00770 { 00771 int i; 00772 float excitation[AMR_SUBFRAME_SIZE]; 00773 00774 // if an overflow has been detected, the pitch vector is scaled down by a 00775 // factor of 4 00776 if (overflow) 00777 for (i = 0; i < AMR_SUBFRAME_SIZE; i++) 00778 p->pitch_vector[i] *= 0.25; 00779 00780 ff_weighted_vector_sumf(excitation, p->pitch_vector, fixed_vector, 00781 p->pitch_gain[4], fixed_gain, AMR_SUBFRAME_SIZE); 00782 00783 // emphasize pitch vector contribution 00784 if (p->pitch_gain[4] > 0.5 && !overflow) { 00785 float energy = ff_dot_productf(excitation, excitation, 00786 AMR_SUBFRAME_SIZE); 00787 float pitch_factor = 00788 p->pitch_gain[4] * 00789 (p->cur_frame_mode == MODE_12k2 ? 00790 0.25 * FFMIN(p->pitch_gain[4], 1.0) : 00791 0.5 * FFMIN(p->pitch_gain[4], SHARP_MAX)); 00792 00793 for (i = 0; i < AMR_SUBFRAME_SIZE; i++) 00794 excitation[i] += pitch_factor * p->pitch_vector[i]; 00795 00796 ff_scale_vector_to_given_sum_of_squares(excitation, excitation, energy, 00797 AMR_SUBFRAME_SIZE); 00798 } 00799 00800 ff_celp_lp_synthesis_filterf(samples, lpc, excitation, AMR_SUBFRAME_SIZE, 00801 LP_FILTER_ORDER); 00802 00803 // detect overflow 00804 for (i = 0; i < AMR_SUBFRAME_SIZE; i++) 00805 if (fabsf(samples[i]) > AMR_SAMPLE_BOUND) { 00806 return 1; 00807 } 00808 00809 return 0; 00810 } 00811 00813 00814 00817 00823 static void update_state(AMRContext *p) 00824 { 00825 memcpy(p->prev_lsp_sub4, p->lsp[3], LP_FILTER_ORDER * sizeof(p->lsp[3][0])); 00826 00827 memmove(&p->excitation_buf[0], &p->excitation_buf[AMR_SUBFRAME_SIZE], 00828 (PITCH_DELAY_MAX + LP_FILTER_ORDER + 1) * sizeof(float)); 00829 00830 memmove(&p->pitch_gain[0], &p->pitch_gain[1], 4 * sizeof(float)); 00831 memmove(&p->fixed_gain[0], &p->fixed_gain[1], 4 * sizeof(float)); 00832 00833 memmove(&p->samples_in[0], &p->samples_in[AMR_SUBFRAME_SIZE], 00834 LP_FILTER_ORDER * sizeof(float)); 00835 } 00836 00838 00839 00842 00849 static float tilt_factor(float *lpc_n, float *lpc_d) 00850 { 00851 float rh0, rh1; // autocorrelation at lag 0 and 1 00852 00853 // LP_FILTER_ORDER prior zeros are needed for ff_celp_lp_synthesis_filterf 00854 float impulse_buffer[LP_FILTER_ORDER + AMR_TILT_RESPONSE] = { 0 }; 00855 float *hf = impulse_buffer + LP_FILTER_ORDER; // start of impulse response 00856 00857 hf[0] = 1.0; 00858 memcpy(hf + 1, lpc_n, sizeof(float) * LP_FILTER_ORDER); 00859 ff_celp_lp_synthesis_filterf(hf, lpc_d, hf, AMR_TILT_RESPONSE, 00860 LP_FILTER_ORDER); 00861 00862 rh0 = ff_dot_productf(hf, hf, AMR_TILT_RESPONSE); 00863 rh1 = ff_dot_productf(hf, hf + 1, AMR_TILT_RESPONSE - 1); 00864 00865 // The spec only specifies this check for 12.2 and 10.2 kbit/s 00866 // modes. But in the ref source the tilt is always non-negative. 00867 return rh1 >= 0.0 ? rh1 / rh0 * AMR_TILT_GAMMA_T : 0.0; 00868 } 00869 00878 static void postfilter(AMRContext *p, float *lpc, float *buf_out) 00879 { 00880 int i; 00881 float *samples = p->samples_in + LP_FILTER_ORDER; // Start of input 00882 00883 float speech_gain = ff_dot_productf(samples, samples, 00884 AMR_SUBFRAME_SIZE); 00885 00886 float pole_out[AMR_SUBFRAME_SIZE + LP_FILTER_ORDER]; // Output of pole filter 00887 const float *gamma_n, *gamma_d; // Formant filter factor table 00888 float lpc_n[LP_FILTER_ORDER], lpc_d[LP_FILTER_ORDER]; // Transfer function coefficients 00889 00890 if (p->cur_frame_mode == MODE_12k2 || p->cur_frame_mode == MODE_10k2) { 00891 gamma_n = ff_pow_0_7; 00892 gamma_d = ff_pow_0_75; 00893 } else { 00894 gamma_n = ff_pow_0_55; 00895 gamma_d = ff_pow_0_7; 00896 } 00897 00898 for (i = 0; i < LP_FILTER_ORDER; i++) { 00899 lpc_n[i] = lpc[i] * gamma_n[i]; 00900 lpc_d[i] = lpc[i] * gamma_d[i]; 00901 } 00902 00903 memcpy(pole_out, p->postfilter_mem, sizeof(float) * LP_FILTER_ORDER); 00904 ff_celp_lp_synthesis_filterf(pole_out + LP_FILTER_ORDER, lpc_d, samples, 00905 AMR_SUBFRAME_SIZE, LP_FILTER_ORDER); 00906 memcpy(p->postfilter_mem, pole_out + AMR_SUBFRAME_SIZE, 00907 sizeof(float) * LP_FILTER_ORDER); 00908 00909 ff_celp_lp_zero_synthesis_filterf(buf_out, lpc_n, 00910 pole_out + LP_FILTER_ORDER, 00911 AMR_SUBFRAME_SIZE, LP_FILTER_ORDER); 00912 00913 ff_tilt_compensation(&p->tilt_mem, tilt_factor(lpc_n, lpc_d), buf_out, 00914 AMR_SUBFRAME_SIZE); 00915 00916 ff_adaptive_gain_control(buf_out, buf_out, speech_gain, AMR_SUBFRAME_SIZE, 00917 AMR_AGC_ALPHA, &p->postfilter_agc); 00918 } 00919 00921 00922 static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, 00923 AVPacket *avpkt) 00924 { 00925 00926 AMRContext *p = avctx->priv_data; // pointer to private data 00927 const uint8_t *buf = avpkt->data; 00928 int buf_size = avpkt->size; 00929 float *buf_out = data; // pointer to the output data buffer 00930 int i, subframe; 00931 float fixed_gain_factor; 00932 AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing 00933 float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing 00934 float synth_fixed_gain; // the fixed gain that synthesis should use 00935 const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use 00936 00937 p->cur_frame_mode = unpack_bitstream(p, buf, buf_size); 00938 if (p->cur_frame_mode == MODE_DTX) { 00939 av_log_missing_feature(avctx, "dtx mode", 1); 00940 return -1; 00941 } 00942 00943 if (p->cur_frame_mode == MODE_12k2) { 00944 lsf2lsp_5(p); 00945 } else 00946 lsf2lsp_3(p); 00947 00948 for (i = 0; i < 4; i++) 00949 ff_acelp_lspd2lpc(p->lsp[i], p->lpc[i], 5); 00950 00951 for (subframe = 0; subframe < 4; subframe++) { 00952 const AMRNBSubframe *amr_subframe = &p->frame.subframe[subframe]; 00953 00954 decode_pitch_vector(p, amr_subframe, subframe); 00955 00956 decode_fixed_sparse(&fixed_sparse, amr_subframe->pulses, 00957 p->cur_frame_mode, subframe); 00958 00959 // The fixed gain (section 6.1.3) depends on the fixed vector 00960 // (section 6.1.2), but the fixed vector calculation uses 00961 // pitch sharpening based on the on the pitch gain (section 6.1.3). 00962 // So the correct order is: pitch gain, pitch sharpening, fixed gain. 00963 decode_gains(p, amr_subframe, p->cur_frame_mode, subframe, 00964 &fixed_gain_factor); 00965 00966 pitch_sharpening(p, subframe, p->cur_frame_mode, &fixed_sparse); 00967 00968 ff_set_fixed_vector(p->fixed_vector, &fixed_sparse, 1.0, 00969 AMR_SUBFRAME_SIZE); 00970 00971 p->fixed_gain[4] = 00972 ff_amr_set_fixed_gain(fixed_gain_factor, 00973 ff_dot_productf(p->fixed_vector, p->fixed_vector, 00974 AMR_SUBFRAME_SIZE)/AMR_SUBFRAME_SIZE, 00975 p->prediction_error, 00976 energy_mean[p->cur_frame_mode], energy_pred_fac); 00977 00978 // The excitation feedback is calculated without any processing such 00979 // as fixed gain smoothing. This isn't mentioned in the specification. 00980 for (i = 0; i < AMR_SUBFRAME_SIZE; i++) 00981 p->excitation[i] *= p->pitch_gain[4]; 00982 ff_set_fixed_vector(p->excitation, &fixed_sparse, p->fixed_gain[4], 00983 AMR_SUBFRAME_SIZE); 00984 00985 // In the ref decoder, excitation is stored with no fractional bits. 00986 // This step prevents buzz in silent periods. The ref encoder can 00987 // emit long sequences with pitch factor greater than one. This 00988 // creates unwanted feedback if the excitation vector is nonzero. 00989 // (e.g. test sequence T19_795.COD in 3GPP TS 26.074) 00990 for (i = 0; i < AMR_SUBFRAME_SIZE; i++) 00991 p->excitation[i] = truncf(p->excitation[i]); 00992 00993 // Smooth fixed gain. 00994 // The specification is ambiguous, but in the reference source, the 00995 // smoothed value is NOT fed back into later fixed gain smoothing. 00996 synth_fixed_gain = fixed_gain_smooth(p, p->lsf_q[subframe], 00997 p->lsf_avg, p->cur_frame_mode); 00998 00999 synth_fixed_vector = anti_sparseness(p, &fixed_sparse, p->fixed_vector, 01000 synth_fixed_gain, spare_vector); 01001 01002 if (synthesis(p, p->lpc[subframe], synth_fixed_gain, 01003 synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 0)) 01004 // overflow detected -> rerun synthesis scaling pitch vector down 01005 // by a factor of 4, skipping pitch vector contribution emphasis 01006 // and adaptive gain control 01007 synthesis(p, p->lpc[subframe], synth_fixed_gain, 01008 synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 1); 01009 01010 postfilter(p, p->lpc[subframe], buf_out + subframe * AMR_SUBFRAME_SIZE); 01011 01012 // update buffers and history 01013 ff_clear_fixed_vector(p->fixed_vector, &fixed_sparse, AMR_SUBFRAME_SIZE); 01014 update_state(p); 01015 } 01016 01017 ff_acelp_apply_order_2_transfer_function(buf_out, buf_out, highpass_zeros, 01018 highpass_poles, 01019 highpass_gain * AMR_SAMPLE_SCALE, 01020 p->high_pass_mem, AMR_BLOCK_SIZE); 01021 01022 /* Update averaged lsf vector (used for fixed gain smoothing). 01023 * 01024 * Note that lsf_avg should not incorporate the current frame's LSFs 01025 * for fixed_gain_smooth. 01026 * The specification has an incorrect formula: the reference decoder uses 01027 * qbar(n-1) rather than qbar(n) in section 6.1(4) equation 71. */ 01028 ff_weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3], 01029 0.84, 0.16, LP_FILTER_ORDER); 01030 01031 /* report how many samples we got */ 01032 *data_size = AMR_BLOCK_SIZE * sizeof(float); 01033 01034 /* return the amount of bytes consumed if everything was OK */ 01035 return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC 01036 } 01037 01038 01039 AVCodec ff_amrnb_decoder = { 01040 .name = "amrnb", 01041 .type = AVMEDIA_TYPE_AUDIO, 01042 .id = CODEC_ID_AMR_NB, 01043 .priv_data_size = sizeof(AMRContext), 01044 .init = amrnb_decode_init, 01045 .decode = amrnb_decode_frame, 01046 .long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"), 01047 .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, 01048 };