Go to the documentation of this file.
86 memset(
ctx->channel_blocks, 0,
sizeof(
ctx->channel_blocks));
93 ctx->num_channel_blocks = 1;
98 ctx->num_channel_blocks = 1;
103 ctx->num_channel_blocks = 2;
109 ctx->num_channel_blocks = 3;
116 ctx->num_channel_blocks = 4;
124 ctx->num_channel_blocks = 5;
133 ctx->num_channel_blocks = 5;
142 "Unsupported channel count: %d!\n", avctx->
channels);
181 if (!
ctx->ch_units || !
ctx->fdsp) {
185 for (
i = 0;
i <
ctx->num_channel_blocks;
i++) {
186 for (ch = 0; ch < 2; ch++) {
187 ctx->ch_units[
i].channels[ch].ch_num = ch;
188 ctx->ch_units[
i].channels[ch].wnd_shape = &
ctx->ch_units[
i].channels[ch].wnd_shape_hist[0][0];
189 ctx->ch_units[
i].channels[ch].wnd_shape_prev = &
ctx->ch_units[
i].channels[ch].wnd_shape_hist[1][0];
190 ctx->ch_units[
i].channels[ch].gain_data = &
ctx->ch_units[
i].channels[ch].gain_data_hist[0][0];
191 ctx->ch_units[
i].channels[ch].gain_data_prev = &
ctx->ch_units[
i].channels[ch].gain_data_hist[1][0];
192 ctx->ch_units[
i].channels[ch].tones_info = &
ctx->ch_units[
i].channels[ch].tones_info_hist[0][0];
193 ctx->ch_units[
i].channels[ch].tones_info_prev = &
ctx->ch_units[
i].channels[ch].tones_info_hist[1][0];
196 ctx->ch_units[
i].waves_info = &
ctx->ch_units[
i].wave_synth_hist[0];
197 ctx->ch_units[
i].waves_info_prev = &
ctx->ch_units[
i].wave_synth_hist[1];
212 int i, sb, ch,
qu, nspeclines, RNG_index;
219 for (ch = 0; ch < num_channels; ch++)
229 sb_RNG_index[sb] = RNG_index & 0x3FC;
232 for (ch = 0; ch < num_channels; ch++) {
245 for (
i = 0;
i < nspeclines;
i++)
252 sb_RNG_index[sb], sb);
276 for (ch = 0; ch < num_channels; ch++) {
314 &
ctx->time_buf[ch][sb * 128]);
320 &
ctx->time_buf[ch][0], &
ctx->outp_buf[ch][0]);
324 for (ch = 0; ch < num_channels; ch++) {
337 int *got_frame_ptr,
AVPacket *avpkt)
341 int i,
ret, ch_unit_id, ch_block = 0, out_ch_index = 0, channels_to_process;
342 float **samples_p = (
float **)
frame->extended_data;
362 if (ch_block >=
ctx->num_channel_blocks ||
363 ctx->channel_blocks[ch_block] != ch_unit_id) {
365 "Frame data doesn't match channel configuration!\n");
369 ctx->ch_units[ch_block].unit_type = ch_unit_id;
370 channels_to_process = ch_unit_id + 1;
373 &
ctx->ch_units[ch_block],
379 channels_to_process, avctx);
381 channels_to_process, avctx);
383 for (
i = 0;
i < channels_to_process;
i++)
384 memcpy(samples_p[out_ch_index +
i],
ctx->outp_buf[
i],
388 out_ch_index += channels_to_process;
397 .
name =
"atrac3plus",
410 .
name =
"atrac3plusal",
411 .long_name =
NULL_IF_CONFIG_SMALL(
"ATRAC3+ AL (Adaptive TRansform Acoustic Coding 3+ Advanced Lossless)"),
Atrac3pWaveSynthParams * waves_info
void ff_atrac3p_init_dsp_static(void)
Initialize sine waves synthesizer and ff_sine_* tables.
@ AV_SAMPLE_FMT_FLTP
float, planar
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
Atrac3pWavesData * tones_info_prev
uint64_t channel_layout
Audio channel layout.
int num_wavs
number of sine waves in the group
#define AV_CH_LAYOUT_MONO
AtracGCContext gainc_ctx
gain compensation context
static av_cold int atrac3p_decode_close(AVCodecContext *avctx)
static void reconstruct_frame(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit, int num_channels, AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
Atrac3pIPQFChannelCtx ipqf_ctx[2]
void ff_atrac3p_generate_tones(Atrac3pChanUnitCtx *ch_unit, AVFloatDSPContext *fdsp, int ch_num, int sb, float *out)
Synthesize sine waves for a particular subband.
Atrac3pChanUnitCtx * ch_units
global channel units
static av_cold void atrac3p_init_static(void)
void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, AVFloatDSPContext *fdsp, int ch_index, float *sp, int rng_index, int sb_num)
Perform power compensation aka noise dithering.
void ff_atrac3p_ipqf(FFTContext *dct_ctx, Atrac3pIPQFChannelCtx *hist, const float *in, float *out)
Subband synthesis filter based on the polyphase quadrature (pseudo-QMF) filter bank.
uint8_t negate_coeffs[ATRAC3P_SUBBANDS]
1 - subband-wise IMDCT coefficients negation
float mdct_buf[2][ATRAC3P_FRAME_SAMPLES]
output of the IMDCT
#define ATRAC3P_FRAME_SAMPLES
@ CH_UNIT_EXTENSION
unit containing extension information
uint8_t * wnd_shape
IMDCT window shape for current frame.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void decode_residual_spectrum(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit, float out[2][ATRAC3P_FRAME_SAMPLES], int num_channels, AVCodecContext *avctx)
static av_cold int set_channel_params(ATRAC3PContext *ctx, AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
av_cold void ff_atrac_init_gain_compensation(AtracGCContext *gctx, int id2exp_offset, int loc_scale)
Initialize gain compensation context.
#define AV_CH_LAYOUT_STEREO
static av_cold int atrac3p_decode_init(AVCodecContext *avctx)
const float ff_atrac3p_sf_tab[64]
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
float outp_buf[2][ATRAC3P_FRAME_SAMPLES]
int used_quant_units
number of quant units with coded spectrum
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
@ CH_UNIT_MONO
unit containing one coded channel
int num_coded_subbands
number of subbands with coded spectrum
Gain compensation context structure.
const uint16_t ff_atrac3p_qu_to_spec_pos[33]
Map quant unit number to its position in the spectrum.
Atrac3pWaveSynthParams * waves_info_prev
uint64_t my_channel_layout
current channel layout
const float ff_atrac3p_mant_tab[8]
@ CH_UNIT_TERMINATOR
unit sequence terminator
int num_channel_blocks
number of channel blocks
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static unsigned int get_bits1(GetBitContext *s)
AtracGainInfo * gain_data_prev
gain control data for previous frame
int16_t spectrum[2048]
decoded IMDCT spectrum
uint8_t channel_blocks[5]
channel configuration descriptor
const AVCodec ff_atrac3pal_decoder
Gain control parameters for one subband.
Atrac3pWavesData * tones_info
Atrac3pChanParams channels[2]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
av_cold void ff_atrac3p_init_vlcs(void)
Initialize VLC tables for bitstream parsing.
enum AVSampleFormat sample_fmt
audio sample format
int qu_sf_idx[32]
array of scale factor indexes for each quant unit
@ CH_UNIT_STEREO
unit containing two jointly-coded channels
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
float time_buf[2][ATRAC3P_FRAME_SAMPLES]
output of the gain compensation
#define AV_CH_LAYOUT_5POINT1_BACK
FFTContext ipqf_dct_ctx
IDCT context used by IPQF.
int ff_atrac3p_decode_channel_unit(GetBitContext *gb, Atrac3pChanUnitCtx *ctx, int num_channels, AVCodecContext *avctx)
Decode bitstream data of a channel unit.
int channels
number of audio channels
#define DECLARE_ALIGNED(n, t, v)
#define i(width, name, range_min, range_max)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
void ff_atrac3p_imdct(AVFloatDSPContext *fdsp, FFTContext *mdct_ctx, float *pIn, float *pOut, int wind_id, int sb)
Regular IMDCT and windowing without overlapping, with spectrum reversal in the odd subbands.
static int atrac3p_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
int unit_type
unit type (mono/stereo)
#define AV_CH_LAYOUT_7POINT1
void ff_atrac3p_init_imdct(AVCodecContext *avctx, FFTContext *mdct_ctx)
Initialize IMDCT transform.
const char * name
Name of the codec implementation.
void * av_calloc(size_t nmemb, size_t size)
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_CH_LAYOUT_SURROUND
float prev_buf[2][ATRAC3P_FRAME_SAMPLES]
overlapping buffer
int tones_present
1 - tones info present
uint8_t swap_channels[ATRAC3P_SUBBANDS]
1 - perform subband-wise channel swapping
main external API structure.
void ff_atrac_gain_compensation(AtracGCContext *gctx, float *in, float *prev, AtracGainInfo *gc_now, AtracGainInfo *gc_next, int num_samples, float *out)
Apply gain compensation and perform the MDCT overlapping part.
Parameters of a group of sine waves.
uint8_t * wnd_shape_prev
IMDCT window shape for previous frame.
#define ATRAC3P_SUBBAND_SAMPLES
number of samples per subband
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
This structure stores compressed data.
#define ATRAC3P_SUBBANDS
Global unit sizes.
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
#define AV_CH_LAYOUT_4POINT0
#define AV_CH_LAYOUT_6POINT1_BACK
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int qu_wordlen[32]
array of word lengths for each quant unit
AtracGainInfo * gain_data
gain control data for next frame
const AVCodec ff_atrac3p_decoder
float samples[2][ATRAC3P_FRAME_SAMPLES]
quantized MDCT spectrum