Go to the documentation of this file.
55 #define MIN_CHANNELS 1
56 #define MAX_CHANNELS 8
57 #define MAX_JS_PAIRS 8 / 2
59 #define JOINT_STEREO 0x12
62 #define SAMPLES_PER_FRAME 1024
65 #define ATRAC3_VLC_BITS 8
148 for (
i = 0;
i < 128;
i++)
168 off = (intptr_t)
input & 3;
169 buf = (
const uint32_t *)(
input - off);
171 c =
av_be2ne32((0x537F6103U >> (off * 8)) | (0x537F6103U << (32 - (off * 8))));
175 for (
i = 0;
i < bytes / 4;
i++)
190 for (
i = 0, j = 255;
i < 128;
i++, j--) {
191 float wi = sin(((
i + 0.5) / 256.0 - 0.5) *
M_PI) + 1.0;
192 float wj = sin(((j + 0.5) / 256.0 - 0.5) *
M_PI) + 1.0;
193 float w = 0.5 * (wi * wi + wj * wj);
220 int coding_flag,
int *mantissas,
223 int i,
code, huff_symb;
228 if (coding_flag != 0) {
233 for (
i = 0;
i < num_codes;
i++) {
241 for (
i = 0;
i < num_codes;
i++) {
253 for (
i = 0;
i < num_codes;
i++) {
258 for (
i = 0;
i < num_codes;
i++) {
275 int num_subbands, coding_mode,
i, j,
first, last, subband_size;
276 int subband_vlc_index[32], sf_index[32];
284 for (
i = 0;
i <= num_subbands;
i++)
288 for (
i = 0;
i <= num_subbands;
i++) {
289 if (subband_vlc_index[
i] != 0)
293 for (
i = 0;
i <= num_subbands;
i++) {
297 subband_size = last -
first;
299 if (subband_vlc_index[
i] != 0) {
304 mantissas, subband_size);
335 int nb_components, coding_mode_selector, coding_mode;
336 int band_flags[4], mantissa[8];
337 int component_count = 0;
342 if (nb_components == 0)
345 coding_mode_selector =
get_bits(gb, 2);
346 if (coding_mode_selector == 2)
349 coding_mode = coding_mode_selector & 1;
351 for (
i = 0;
i < nb_components;
i++) {
352 int coded_values_per_component, quant_step_index;
354 for (
b = 0;
b <= num_bands;
b++)
357 coded_values_per_component =
get_bits(gb, 3);
360 if (quant_step_index <= 1)
363 if (coding_mode_selector == 3)
366 for (
b = 0;
b < (num_bands + 1) * 4;
b++) {
367 int coded_components;
369 if (band_flags[
b >> 2] == 0)
374 for (
c = 0;
c < coded_components;
c++) {
376 int sf_index, coded_values, max_coded_values;
380 if (component_count >= 64)
386 coded_values = coded_values_per_component + 1;
387 coded_values =
FFMIN(max_coded_values, coded_values);
393 mantissa, coded_values);
395 cmp->num_coefs = coded_values;
398 for (m = 0; m < coded_values; m++)
399 cmp->coef[m] = mantissa[m] * scale_factor;
406 return component_count;
423 for (
b = 0;
b <= num_bands;
b++) {
431 if (j && loc[j] <= loc[j - 1])
438 gain[
b].num_points = 0;
454 int i, j, last_pos = -1;
457 for (
i = 0;
i < num_components;
i++) {
458 last_pos =
FFMAX(components[
i].
pos + components[
i].num_coefs, last_pos);
462 for (j = 0; j < components[
i].num_coefs; j++)
469 #define INTERPOLATE(old, new, nsample) \
470 ((old) + (nsample) * 0.125 * ((new) - (old)))
475 int i, nsample, band;
476 float mc1_l, mc1_r, mc2_l, mc2_r;
478 for (
i = 0, band = 0; band < 4 * 256; band += 256,
i++) {
479 int s1 = prev_code[
i];
480 int s2 = curr_code[
i];
491 for (; nsample < band + 8; nsample++) {
492 float c1 = su1[nsample];
493 float c2 = su2[nsample];
497 su2[nsample] =
c1 * 2.0 -
c2;
504 for (; nsample < band + 256; nsample++) {
505 float c1 = su1[nsample];
506 float c2 = su2[nsample];
507 su1[nsample] =
c2 * 2.0;
508 su2[nsample] = (
c1 -
c2) * 2.0;
512 for (; nsample < band + 256; nsample++) {
513 float c1 = su1[nsample];
514 float c2 = su2[nsample];
515 su1[nsample] = (
c1 +
c2) * 2.0;
516 su2[nsample] =
c2 * -2.0;
521 for (; nsample < band + 256; nsample++) {
522 float c1 = su1[nsample];
523 float c2 = su2[nsample];
524 su1[nsample] =
c1 +
c2;
525 su2[nsample] =
c1 -
c2;
540 ch[0] = (
index & 7) / 7.0;
541 ch[1] = sqrt(2 - ch[0] * ch[0]);
543 FFSWAP(
float, ch[0], ch[1]);
553 if (p3[1] != 7 || p3[3] != 7) {
557 for (band = 256; band < 4 * 256; band += 256) {
558 for (nsample = band; nsample < band + 8; nsample++) {
559 su1[nsample] *=
INTERPOLATE(
w[0][0],
w[0][1], nsample - band);
560 su2[nsample] *=
INTERPOLATE(
w[1][0],
w[1][1], nsample - band);
562 for(; nsample < band + 256; nsample++) {
563 su1[nsample] *=
w[1][0];
564 su2[nsample] *=
w[1][1];
580 int channel_num,
int coding_mode)
582 int band,
ret, num_subbands, last_tonal, num_bands;
586 if (coding_mode ==
JOINT_STEREO && (channel_num % 2) == 1) {
621 num_bands =
FFMAX((last_tonal + 256) >> 8, num_bands);
625 for (band = 0; band < 4; band++) {
627 if (band <= num_bands)
636 256, &
output[band * 256]);
658 const uint8_t *js_databuf;
659 int js_pair, js_block_align;
663 for (ch = 0; ch <
channels; ch = ch + 2) {
665 js_databuf = databuf + js_pair * js_block_align;
669 js_databuf, js_block_align * 8);
682 for (
i = 0;
i < js_block_align / 2;
i++, ptr1++, ptr2--)
683 FFSWAP(uint8_t, *ptr1, *ptr2);
685 const uint8_t *ptr2 = js_databuf + js_block_align - 1;
686 for (
i = 0;
i < js_block_align;
i++)
692 for (
i = 4; *ptr1 == 0xF8;
i++, ptr1++) {
693 if (
i >= js_block_align)
710 for (
i = 0;
i < 4;
i++) {
747 float *p1 = out_samples[
i];
748 float *p2 = p1 + 256;
749 float *p3 = p2 + 256;
750 float *p4 = p3 + 256;
760 int size,
float **out_samples)
782 float *p1 = out_samples[
i];
783 float *p2 = p1 + 256;
784 float *p3 = p2 + 256;
785 float *p4 = p3 + 256;
795 int *got_frame_ptr,
AVPacket *avpkt)
797 const uint8_t *buf = avpkt->
data;
798 int buf_size = avpkt->
size;
801 const uint8_t *databuf;
803 if (buf_size < avctx->block_align) {
805 "Frame too small (%d bytes). Truncated file?\n", buf_size);
834 int *got_frame_ptr,
AVPacket *avpkt)
843 (
float **)
frame->extended_data);
864 for (
i = 0;
i < 7;
i++) {
869 &hufftabs[0][0], 2, 1,
880 int version, delay, samples_per_frame, frame_factor;
881 const uint8_t *edata_ptr = avctx->
extradata;
884 float scale = 1.0 / 32768;
901 bytestream_get_le16(&edata_ptr));
905 bytestream_get_le16(&edata_ptr));
906 frame_factor = bytestream_get_le16(&edata_ptr);
908 bytestream_get_le16(&edata_ptr));
927 version = bytestream_get_be32(&edata_ptr);
928 samples_per_frame = bytestream_get_be16(&edata_ptr);
929 delay = bytestream_get_be16(&edata_ptr);
952 if (delay != 0x88E) {
998 for (
i = 0;
i < 4;
i++) {
1031 #if FF_API_SUBFRAMES
1032 AV_CODEC_CAP_SUBFRAMES |
1041 .
p.
name =
"atrac3al",
1042 CODEC_LONG_NAME(
"ATRAC3 AL (Adaptive TRansform Acoustic Coding 3 Advanced Lossless)"),
1050 #if FF_API_SUBFRAMES
1051 AV_CODEC_CAP_SUBFRAMES |
static const int8_t mantissa_vlc_tab[18]
@ AV_SAMPLE_FMT_FLTP
float, planar
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
const FFCodec ff_atrac3_decoder
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void imlt(ATRAC3Context *q, float *input, float *output, int odd_band)
Regular 512 points IMDCT without overlapping, with the exception of the swapping of odd bands caused ...
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
This structure describes decoded (raw) audio or video data.
#define SAMPLES_PER_FRAME
float delay_buf1[46]
qmf delay buffers
static void channel_weighting(float *su1, float *su2, int *p3)
static const uint16_t table[]
int matrix_coeff_index_now[MAX_JS_PAIRS][4]
static int atrac3al_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
int nb_channels
Number of channels in this layout.
static const uint8_t clc_length_tab[8]
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
TonalComponent components[64]
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
float spectrum[SAMPLES_PER_FRAME]
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
AVCodec p
The public AVCodec.
static const float inv_max_quant[8]
AVChannelLayout ch_layout
Audio channel layout.
int flags
AV_CODEC_FLAG_*.
static VLCElem atrac3_vlc_table[7 *1<< ATRAC3_VLC_BITS]
av_cold void ff_atrac_init_gain_compensation(AtracGCContext *gctx, int id2exp_offset, int loc_scale)
Initialize gain compensation context.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
static const int8_t mantissa_clc_tab[4]
static int decode_tonal_components(GetBitContext *gb, TonalComponent *components, int num_bands)
Restore the quantized tonal components.
@ AV_TX_FLOAT_MDCT
Standard MDCT with a sample data type of float, double or int32_t, respecively.
#define FF_CODEC_DECODE_CB(func)
int num_points
number of gain control points
static int get_sbits(GetBitContext *s, int n)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Gain compensation context structure.
static int decode_bytes(const uint8_t *input, uint8_t *out, int bytes)
static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
#define CODEC_LONG_NAME(str)
static int add_tonal_components(float *spectrum, int num_components, TonalComponent *components)
Combine the tonal band spectrum and regular band spectrum.
@ AV_TX_FULL_IMDCT
Performs a full inverse MDCT rather than leaving out samples that can be derived through symmetry.
static unsigned int get_bits1(GetBitContext *s)
float ff_atrac_sf_table[64]
Gain control parameters for one subband.
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void read_quant_spectral_coeffs(GetBitContext *gb, int selector, int coding_flag, int *mantissas, int num_codes)
Mantissa decoding.
int matrix_coeff_index_next[MAX_JS_PAIRS][4]
static int atrac3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define DECLARE_ALIGNED(n, t, v)
void(* vector_fmul)(float *dst, const float *src0, const float *src1, int len)
Calculate the entry wise product of two vectors of floats and store the result in a vector of floats.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
enum AVSampleFormat sample_fmt
audio sample format
int loc_code[7]
location of gain control points
static av_cold void init_imdct_window(void)
float imdct_buf[SAMPLES_PER_FRAME]
static int decode_spectrum(GetBitContext *gb, float *output)
Restore the quantized band spectrum coefficients.
static void get_channel_weights(int index, int flag, float ch[2])
av_cold void ff_atrac_generate_tables(void)
Generate common tables.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
int coding_mode
stream data
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
AVSampleFormat
Audio sample formats.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static av_cold int atrac3_decode_close(AVCodecContext *avctx)
int scrambled_stream
extradata
void * av_calloc(size_t nmemb, size_t size)
static int decode_channel_sound_unit(ATRAC3Context *q, GetBitContext *gb, ChannelUnit *snd, float *output, int channel_num, int coding_mode)
Decode a Sound Unit.
static const uint8_t huff_tab_sizes[7]
static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf, float **out_samples)
static av_cold int atrac3_decode_init(AVCodecContext *avctx)
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int lev_code[7]
level at corresponding control point
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
void ff_atrac_gain_compensation(AtracGCContext *gctx, float *in, float *prev, AtracGainInfo *gc_now, AtracGainInfo *gc_next, int num_samples, float *out)
Apply gain compensation and perform the MDCT overlapping part.
static VLC spectral_coeff_tab[7]
static const uint16_t subband_tab[33]
static int al_decode_frame(AVCodecContext *avctx, const uint8_t *databuf, int size, float **out_samples)
float prev_frame[SAMPLES_PER_FRAME]
int matrix_coeff_index_prev[MAX_JS_PAIRS][4]
joint-stereo related variables
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define avpriv_request_sample(...)
static void scale(int *out, const int *in, const int w, const int h, const int shift)
This structure stores compressed data.
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
int weighting_delay[MAX_JS_PAIRS][6]
#define INTERPOLATE(old, new, nsample)
static void reverse_matrixing(float *su1, float *su2, int *prev_code, int *curr_code)
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define VLC_INIT_USE_STATIC
static const float matrix_coeffs[8]
static av_cold void atrac3_init_static_data(void)
uint8_t * decoded_bytes_buffer
data buffers
const FFCodec ff_atrac3al_decoder
void(* vector_fmul)(float *dst, const float *src0, const float *src1, int len)
void ff_atrac_iqmf(float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp)
Quadrature mirror synthesis filter.
static int decode_gain_control(GetBitContext *gb, GainBlock *block, int num_bands)
Decode gain parameters for the coded bands.
static float mdct_window[MDCT_SIZE]
static const uint8_t atrac3_hufftabs[][2]