From 3d3cf6745e2a5dc9c377244454c3186d75b177fa Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Sun, 14 Oct 2012 00:12:55 -0400 Subject: [PATCH] aacdec: use float planar sample format for output --- libavcodec/aac.h | 7 +-- libavcodec/aacdec.c | 101 ++++++++++++++++++++++++++------------------ libavcodec/aacsbr.c | 6 +-- 3 files changed, 66 insertions(+), 48 deletions(-) diff --git a/libavcodec/aac.h b/libavcodec/aac.h index 9c6ac277d5..6c5d962dd8 100644 --- a/libavcodec/aac.h +++ b/libavcodec/aac.h @@ -236,9 +236,10 @@ typedef struct SingleChannelElement { uint8_t zeroes[128]; ///< band is not coded (used by encoder) DECLARE_ALIGNED(32, float, coeffs)[1024]; ///< coefficients for IMDCT DECLARE_ALIGNED(32, float, saved)[1024]; ///< overlap - DECLARE_ALIGNED(32, float, ret)[2048]; ///< PCM output + DECLARE_ALIGNED(32, float, ret_buf)[2048]; ///< PCM output buffer DECLARE_ALIGNED(16, float, ltp_state)[3072]; ///< time signal for LTP PredictorState predictor_state[MAX_PREDICTORS]; + float *ret; ///< PCM output } SingleChannelElement; /** @@ -297,10 +298,10 @@ typedef struct AACContext { /** @} */ /** - * @name Members used for output interleaving + * @name Members used for output * @{ */ - float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output). + SingleChannelElement *output_element[MAX_CHANNELS]; ///< Points to each SingleChannelElement /** @} */ DECLARE_ALIGNED(32, float, temp)[128]; diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index 59f39fd26f..d2a31cae0b 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -149,10 +149,10 @@ static av_cold int che_configure(AACContext *ac, ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr); } if (type != TYPE_CCE) { - ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret; + ac->output_element[(*channels)++] = &ac->che[type][id]->ch[0]; if (type == TYPE_CPE || (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1)) { - ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret; + ac->output_element[(*channels)++] = &ac->che[type][id]->ch[1]; } } } else { @@ -163,6 +163,38 @@ static av_cold int che_configure(AACContext *ac, return 0; } +static int frame_configure_elements(AVCodecContext *avctx) +{ + AACContext *ac = avctx->priv_data; + int type, id, ch, ret; + + /* set channel pointers to internal buffers by default */ + for (type = 0; type < 4; type++) { + for (id = 0; id < MAX_ELEM_ID; id++) { + ChannelElement *che = ac->che[type][id]; + if (che) { + che->ch[0].ret = che->ch[0].ret_buf; + che->ch[1].ret = che->ch[1].ret_buf; + } + } + } + + /* get output buffer */ + ac->frame.nb_samples = 2048; + if ((ret = avctx->get_buffer(avctx, &ac->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + + /* map output channel pointers to AVFrame data */ + for (ch = 0; ch < avctx->channels; ch++) { + if (ac->output_element[ch]) + ac->output_element[ch]->ret = (float *)ac->frame.extended_data[ch]; + } + + return 0; +} + struct elem_to_channel { uint64_t av_position; uint8_t syn_ele; @@ -378,8 +410,8 @@ static void pop_output_configuration(AACContext *ac) { * @return Returns error status. 0 - OK, !0 - error */ static int output_configure(AACContext *ac, - uint8_t layout_map[MAX_ELEM_ID*4][3], int tags, - enum OCStatus oc_type) + uint8_t layout_map[MAX_ELEM_ID*4][3], int tags, + enum OCStatus oc_type, int get_new_frame) { AVCodecContext *avctx = ac->avctx; int i, channels = 0, ret; @@ -417,6 +449,11 @@ static int output_configure(AACContext *ac, avctx->channels = ac->oc[1].channels = channels; ac->oc[1].status = oc_type; + if (get_new_frame) { + if ((ret = frame_configure_elements(ac->avctx)) < 0) + return ret; + } + return 0; } @@ -457,7 +494,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id) 2) < 0) return NULL; if (output_configure(ac, layout_map, layout_map_tags, - OC_TRIAL_FRAME) < 0) + OC_TRIAL_FRAME, 1) < 0) return NULL; ac->oc[1].m4ac.chan_config = 2; @@ -473,7 +510,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id) 1) < 0) return NULL; if (output_configure(ac, layout_map, layout_map_tags, - OC_TRIAL_FRAME) < 0) + OC_TRIAL_FRAME, 1) < 0) return NULL; ac->oc[1].m4ac.chan_config = 1; @@ -660,7 +697,7 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx, } else if (m4ac->sbr == 1 && m4ac->ps == -1) m4ac->ps = 1; - if (ac && (ret = output_configure(ac, layout_map, tags, OC_GLOBAL_HDR))) + if (ac && (ret = output_configure(ac, layout_map, tags, OC_GLOBAL_HDR, 0))) return ret; if (extension_flag) { @@ -802,11 +839,12 @@ static void reset_predictor_group(PredictorState *ps, int group_num) static av_cold int aac_decode_init(AVCodecContext *avctx) { AACContext *ac = avctx->priv_data; - float output_scale_factor; ac->avctx = avctx; ac->oc[1].m4ac.sample_rate = avctx->sample_rate; + avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; + if (avctx->extradata_size > 0) { if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac, avctx->extradata, @@ -836,20 +874,12 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) &layout_map_tags, ac->oc[1].m4ac.chan_config); if (!ret) output_configure(ac, layout_map, layout_map_tags, - OC_GLOBAL_HDR); + OC_GLOBAL_HDR, 0); else if (avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } } - if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) { - avctx->sample_fmt = AV_SAMPLE_FMT_FLT; - output_scale_factor = 1.0 / 32768.0; - } else { - avctx->sample_fmt = AV_SAMPLE_FMT_S16; - output_scale_factor = 1.0; - } - AAC_INIT_VLC_STATIC( 0, 304); AAC_INIT_VLC_STATIC( 1, 270); AAC_INIT_VLC_STATIC( 2, 550); @@ -877,9 +907,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]), 352); - ff_mdct_init(&ac->mdct, 11, 1, output_scale_factor/1024.0); - ff_mdct_init(&ac->mdct_small, 8, 1, output_scale_factor/128.0); - ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0/output_scale_factor); + ff_mdct_init(&ac->mdct, 11, 1, 1.0 / (32768.0 * 1024.0)); + ff_mdct_init(&ac->mdct_small, 8, 1, 1.0 / (32768.0 * 128.0)); + ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0 * 32768.0); // window initialization ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); @@ -1940,7 +1970,7 @@ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt, ac->oc[1].m4ac.sbr = 1; ac->oc[1].m4ac.ps = 1; output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags, - ac->oc[1].status); + ac->oc[1].status, 1); } else { ac->oc[1].m4ac.sbr = 1; } @@ -2330,7 +2360,7 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb) &layout_map_tags, hdr_info.chan_config)) return -7; if (output_configure(ac, layout_map, layout_map_tags, - FFMAX(ac->oc[1].status, OC_TRIAL_FRAME))) + FFMAX(ac->oc[1].status, OC_TRIAL_FRAME), 0)) return -7; } else { ac->oc[1].m4ac.chan_config = 0; @@ -2372,6 +2402,11 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, } } + if (frame_configure_elements(avctx) < 0) { + err = -1; + goto fail; + } + ac->tags_mapped = 0; // parse while ((elem_type = get_bits(gb, 3)) != TYPE_END) { @@ -2426,7 +2461,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, "Not evaluating a further program_config_element as this construct is dubious at best.\n"); pop_output_configuration(ac); } else { - err = output_configure(ac, layout_map, tags, OC_TRIAL_PCE); + err = output_configure(ac, layout_map, tags, OC_TRIAL_PCE, 1); pce_found = 1; } break; @@ -2469,23 +2504,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, samples <<= multiplier; if (samples) { - /* get output buffer */ ac->frame.nb_samples = samples; - if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) { - av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); - err = -1; - goto fail; - } - - if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) - ac->fmt_conv.float_interleave((float *)ac->frame.data[0], - (const float **)ac->output_data, - samples, avctx->channels); - else - ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0], - (const float **)ac->output_data, - samples, avctx->channels); - *(AVFrame *)data = ac->frame; } *got_frame_ptr = !!samples; @@ -2844,7 +2863,7 @@ AVCodec ff_aac_decoder = { .decode = aac_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"), .sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE + AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE }, .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .channel_layouts = aac_channel_layout, @@ -2865,7 +2884,7 @@ AVCodec ff_aac_latm_decoder = { .decode = latm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"), .sample_fmts = (const enum AVSampleFormat[]) { - AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE + AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE }, .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .channel_layouts = aac_channel_layout, diff --git a/libavcodec/aacsbr.c b/libavcodec/aacsbr.c index b1c4e73ffc..df5d9279bf 100644 --- a/libavcodec/aacsbr.c +++ b/libavcodec/aacsbr.c @@ -140,7 +140,6 @@ static void sbr_turnoff(SpectralBandReplication *sbr) { av_cold void ff_aac_sbr_ctx_init(AACContext *ac, SpectralBandReplication *sbr) { - float mdct_scale; sbr->kx[0] = sbr->kx[1]; sbr_turnoff(sbr); sbr->data[0].synthesis_filterbank_samples_offset = SBR_SYNTHESIS_BUF_SIZE - (1280 - 128); @@ -148,9 +147,8 @@ av_cold void ff_aac_sbr_ctx_init(AACContext *ac, SpectralBandReplication *sbr) /* SBR requires samples to be scaled to +/-32768.0 to work correctly. * mdct scale factors are adjusted to scale up from +/-1.0 at analysis * and scale back down at synthesis. */ - mdct_scale = ac->avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? 32768.0f : 1.0f; - ff_mdct_init(&sbr->mdct, 7, 1, 1.0 / (64 * mdct_scale)); - ff_mdct_init(&sbr->mdct_ana, 7, 1, -2.0 * mdct_scale); + ff_mdct_init(&sbr->mdct, 7, 1, 1.0 / (64 * 32768.0)); + ff_mdct_init(&sbr->mdct_ana, 7, 1, -2.0 * 32768.0); ff_ps_ctx_init(&sbr->ps); ff_sbrdsp_init(&sbr->dsp); }