Go to the documentation of this file.
60 #define MONO 0x1000001
61 #define STEREO 0x1000002
62 #define JOINT_STEREO 0x1000003
63 #define MC_COOK 0x2000000
65 #define SUBBAND_SIZE 20
66 #define MAX_SUBPACKETS 5
108 int *subband_coef_index,
int *subband_coef_sign,
115 float *decode_buffer,
116 float *mlt_buffer1,
float *mlt_buffer2);
119 cook_gains *gains_ptr,
float *previous_buffer);
122 int gain_index,
int gain_index_next);
171 static const float exp2_tab[2] = {1,
M_SQRT2};
172 float exp2_val =
powf(2, -63);
173 float root_val =
powf(2, -32);
174 for (
i = -63;
i < 64;
i++) {
187 q->gain_size_factor = q->samples_per_channel / 8;
188 for (
i = 0;
i < 31;
i++)
190 (1.0 / (
double) q->gain_size_factor));
199 for (
i = 0;
i < 13;
i++) {
205 for (
i = 0;
i < 7;
i++) {
211 for (
i = 0;
i < q->num_subpackets;
i++) {
212 if (q->subpacket[
i].joint_stereo == 1) {
214 (1 << q->subpacket[
i].js_vlc_bits) - 1,
228 int mlt_size = q->samples_per_channel;
230 if ((q->mlt_window =
av_malloc_array(mlt_size,
sizeof(*q->mlt_window))) == 0)
235 for (j = 0; j < mlt_size; j++)
236 q->mlt_window[j] *= sqrt(2.0 / q->samples_per_channel);
252 for (
i = 0;
i < 5;
i++)
258 #define DECODE_BYTES_PAD1(bytes) (3 - ((bytes) + 3) % 4)
259 #define DECODE_BYTES_PAD2(bytes) ((bytes) % 4 + DECODE_BYTES_PAD1(2 * (bytes)))
283 static const uint32_t
tab[4] = {
290 uint32_t *obuf = (uint32_t *)
out;
297 off = (intptr_t) inbuffer & 3;
298 buf = (
const uint32_t *) (inbuffer - off);
301 for (
i = 0;
i < bytes / 4;
i++)
302 obuf[
i] =
c ^ buf[
i];
321 for (
i = 0;
i < 13;
i++)
323 for (
i = 0;
i < 7;
i++)
325 for (
i = 0;
i < q->num_subpackets;
i++)
351 gaininfo[
i++] = gain;
364 int *quant_index_table)
368 quant_index_table[0] =
get_bits(&q->gb, 6) - 6;
382 j =
get_vlc2(&q->gb, q->envelope_quant_index[vlc_index - 1].table,
383 q->envelope_quant_index[vlc_index - 1].bits, 2);
384 quant_index_table[
i] = quant_index_table[
i - 1] + j - 12;
385 if (quant_index_table[
i] > 63 || quant_index_table[
i] < -63) {
387 "Invalid quantizer %d at position %d, outside [-63, 63] range\n",
388 quant_index_table[
i],
i);
407 int exp_idx, bias, tmpbias1, tmpbias2, bits_left, num_bits,
index, v,
i, j;
408 int exp_index2[102] = { 0 };
409 int exp_index1[102] = { 0 };
411 int tmp_categorize_array[128 * 2] = { 0 };
417 if (bits_left > q->samples_per_channel)
418 bits_left = q->samples_per_channel +
419 ((bits_left - q->samples_per_channel) * 5) / 8;
424 for (
i = 32;
i > 0;
i =
i / 2) {
428 exp_idx = av_clip_uintp2((
i - quant_index_table[
index] + bias) / 2, 3);
432 if (num_bits >= bits_left - 32)
439 exp_idx = av_clip_uintp2((bias - quant_index_table[
i]) / 2, 3);
441 exp_index1[
i] = exp_idx;
442 exp_index2[
i] = exp_idx;
444 tmpbias1 = tmpbias2 = num_bits;
447 if (tmpbias1 + tmpbias2 > 2 * bits_left) {
451 if (exp_index1[
i] < 7) {
452 v = (-2 * exp_index1[
i]) - quant_index_table[
i] + bias;
461 tmp_categorize_array[tmp_categorize_array1_idx++] =
index;
469 if (exp_index2[
i] > 0) {
470 v = (-2 * exp_index2[
i]) - quant_index_table[
i] + bias;
479 tmp_categorize_array[--tmp_categorize_array2_idx] =
index;
490 category_index[
i] = tmp_categorize_array[tmp_categorize_array2_idx++];
505 for (
i = 0;
i < q->num_vectors;
i++)
507 int idx = category_index[
i];
524 int *subband_coef_index,
int *subband_coef_sign,
531 if (subband_coef_index[
i]) {
533 if (subband_coef_sign[
i])
538 if (
av_lfg_get(&q->random_state) < 0x80000000)
553 int *subband_coef_index,
int *subband_coef_sign)
566 for (j = vd - 1; j >= 0; j--) {
571 for (j = 0; j < vd; j++) {
572 if (subband_coef_index[
i * vd + j]) {
574 subband_coef_sign[
i * vd + j] =
get_bits1(&q->gb);
577 subband_coef_sign[
i * vd + j] = 0;
580 subband_coef_sign[
i * vd + j] = 0;
597 int *quant_index_table,
float *mlt_buffer)
618 memset(subband_coef_index, 0,
sizeof(subband_coef_index));
619 memset(subband_coef_sign, 0,
sizeof(subband_coef_sign));
621 q->scalar_dequant(q,
index, quant_index_table[band],
622 subband_coef_index, subband_coef_sign,
634 int category_index[128] = { 0 };
636 int quant_index_table[102];
663 int gain_index,
int gain_index_next)
667 fc1 =
pow2tab[gain_index + 63];
669 if (gain_index == gain_index_next) {
670 for (
i = 0;
i < q->gain_size_factor;
i++)
673 fc2 = q->gain_table[15 + (gain_index_next - gain_index)];
674 for (
i = 0;
i < q->gain_size_factor;
i++) {
690 cook_gains *gains_ptr,
float *previous_buffer)
701 for (
i = 0;
i < q->samples_per_channel;
i++)
702 inbuffer[
i] = inbuffer[
i] *
fc * q->mlt_window[
i] -
703 previous_buffer[
i] * q->mlt_window[q->samples_per_channel - 1 -
i];
718 cook_gains *gains_ptr,
float *previous_buffer)
720 float *buffer0 = q->mono_mdct_output;
721 float *buffer1 = q->mono_mdct_output + q->samples_per_channel;
725 q->mdct_ctx.imdct_calc(&q->mdct_ctx, q->mono_mdct_output, inbuffer);
727 q->imlt_window(q, buffer1, gains_ptr, previous_buffer);
730 for (
i = 0;
i < 8;
i++)
731 if (gains_ptr->
now[
i] || gains_ptr->
now[
i + 1])
732 q->interpolate(q, &buffer1[q->gain_size_factor *
i],
733 gains_ptr->
now[
i], gains_ptr->
now[
i + 1]);
736 memcpy(previous_buffer, buffer0,
737 q->samples_per_channel *
sizeof(*previous_buffer));
753 int length =
end - start + 1;
759 for (
i = 0;
i < length;
i++)
760 decouple_tab[start +
i] =
get_vlc2(&q->gb,
764 for (
i = 0;
i < length;
i++) {
770 decouple_tab[start +
i] = v;
790 float *decode_buffer,
791 float *mlt_buffer1,
float *mlt_buffer2)
796 mlt_buffer1[
SUBBAND_SIZE * subband + j] = f1 * decode_buffer[tmp_idx];
797 mlt_buffer2[
SUBBAND_SIZE * subband + j] = f2 * decode_buffer[tmp_idx];
809 float *mlt_buffer_left,
float *mlt_buffer_right)
813 float *decode_buffer = q->decode_buffer_0;
816 const float *cplscale;
818 memset(decode_buffer, 0,
sizeof(q->decode_buffer_0));
821 memset(mlt_buffer_left, 0, 1024 *
sizeof(*mlt_buffer_left));
822 memset(mlt_buffer_right, 0, 1024 *
sizeof(*mlt_buffer_right));
830 mlt_buffer_left[
i * 20 + j] = decode_buffer[
i * 40 + j];
831 mlt_buffer_right[
i * 20 + j] = decode_buffer[
i * 40 + 20 + j];
840 idx -= decouple_tab[cpl_tmp];
842 f1 = cplscale[decouple_tab[cpl_tmp] + 1];
844 q->decouple(q, p,
i, f1, f2, decode_buffer,
845 mlt_buffer_left, mlt_buffer_right);
884 q->adsp.vector_clipf(
out, q->mono_mdct_output + q->samples_per_channel,
885 FFALIGN(q->samples_per_channel, 8), -1.0f, 1.0f);
901 cook_gains *gains_ptr,
float *previous_buffer,
904 imlt_gain(q, decode_buffer, gains_ptr, previous_buffer);
906 q->saturate_output(q,
out);
919 const uint8_t *inbuffer,
float **outbuffer)
921 int sub_packet_size = p->
size;
924 memset(q->decode_buffer_1, 0,
sizeof(q->decode_buffer_1));
928 if ((res =
joint_decode(q, p, q->decode_buffer_1, q->decode_buffer_2)) < 0)
931 if ((res =
mono_decode(q, p, q->decode_buffer_1)) < 0)
936 if ((res =
mono_decode(q, p, q->decode_buffer_2)) < 0)
961 int *got_frame_ptr,
AVPacket *avpkt)
965 int buf_size = avpkt->
size;
972 if (buf_size < avctx->block_align)
976 if (q->discarded_packets >= 2) {
977 frame->nb_samples = q->samples_per_channel;
986 for (
i = 1;
i < q->num_subpackets;
i++) {
988 q->subpacket[0].size -= q->subpacket[
i].size + 1;
989 if (q->subpacket[0].size < 0) {
991 "frame subpacket size total > avctx->block_align!\n");
997 for (
i = 0;
i < q->num_subpackets;
i++) {
998 q->subpacket[
i].bits_per_subpacket = (q->subpacket[
i].size * 8) >>
999 q->subpacket[
i].bits_per_subpdiv;
1000 q->subpacket[
i].ch_idx = chidx;
1002 "subpacket[%i] size %i js %i %i block_align %i\n",
1003 i, q->subpacket[
i].size, q->subpacket[
i].joint_stereo,
offset,
1008 offset += q->subpacket[
i].size;
1009 chidx += q->subpacket[
i].num_channels;
1015 if (q->discarded_packets < 2) {
1016 q->discarded_packets++;
1029 #define PRINT(a, b) ff_dlog(q->avctx, " %s = %d\n", a, b);
1030 ff_dlog(q->avctx,
"COOKextradata\n");
1031 ff_dlog(q->avctx,
"cookversion=%x\n", q->subpacket[0].cookversion);
1032 if (q->subpacket[0].cookversion >
STEREO) {
1033 PRINT(
"js_subband_start", q->subpacket[0].js_subband_start);
1034 PRINT(
"js_vlc_bits", q->subpacket[0].js_vlc_bits);
1036 ff_dlog(q->avctx,
"COOKContext\n");
1037 PRINT(
"nb_channels", q->avctx->channels);
1038 PRINT(
"bit_rate", (
int)q->avctx->bit_rate);
1039 PRINT(
"sample_rate", q->avctx->sample_rate);
1040 PRINT(
"samples_per_channel", q->subpacket[0].samples_per_channel);
1041 PRINT(
"subbands", q->subpacket[0].subbands);
1042 PRINT(
"js_subband_start", q->subpacket[0].js_subband_start);
1043 PRINT(
"log2_numvector_size", q->subpacket[0].log2_numvector_size);
1044 PRINT(
"numvector_size", q->subpacket[0].numvector_size);
1045 PRINT(
"total_subbands", q->subpacket[0].total_subbands);
1058 unsigned int channel_mask = 0;
1059 int samples_per_frame = 0;
1093 q->subpacket[
s].cookversion = bytestream2_get_be32(&
gb);
1094 samples_per_frame = bytestream2_get_be16(&
gb);
1095 q->subpacket[
s].subbands = bytestream2_get_be16(&
gb);
1096 bytestream2_get_be32(&
gb);
1097 q->subpacket[
s].js_subband_start = bytestream2_get_be16(&
gb);
1098 if (q->subpacket[
s].js_subband_start >= 51) {
1102 q->subpacket[
s].js_vlc_bits = bytestream2_get_be16(&
gb);
1105 q->subpacket[
s].samples_per_channel = samples_per_frame /
avctx->
channels;
1109 q->subpacket[
s].log2_numvector_size = 5;
1110 q->subpacket[
s].total_subbands = q->subpacket[
s].subbands;
1111 q->subpacket[
s].num_channels = 1;
1116 q->subpacket[
s].cookversion);
1117 q->subpacket[
s].joint_stereo = 0;
1118 switch (q->subpacket[
s].cookversion) {
1128 q->subpacket[
s].bits_per_subpdiv = 1;
1129 q->subpacket[
s].num_channels = 2;
1140 q->subpacket[
s].total_subbands = q->subpacket[
s].subbands +
1141 q->subpacket[
s].js_subband_start;
1142 q->subpacket[
s].joint_stereo = 1;
1143 q->subpacket[
s].num_channels = 2;
1145 if (q->subpacket[
s].samples_per_channel > 256) {
1146 q->subpacket[
s].log2_numvector_size = 6;
1148 if (q->subpacket[
s].samples_per_channel > 512) {
1149 q->subpacket[
s].log2_numvector_size = 7;
1154 channel_mask |= q->subpacket[
s].channel_mask = bytestream2_get_be32(&
gb);
1157 q->subpacket[
s].total_subbands = q->subpacket[
s].subbands +
1158 q->subpacket[
s].js_subband_start;
1159 q->subpacket[
s].joint_stereo = 1;
1160 q->subpacket[
s].num_channels = 2;
1161 q->subpacket[
s].samples_per_channel = samples_per_frame >> 1;
1163 if (q->subpacket[
s].samples_per_channel > 256) {
1164 q->subpacket[
s].log2_numvector_size = 6;
1166 if (q->subpacket[
s].samples_per_channel > 512) {
1167 q->subpacket[
s].log2_numvector_size = 7;
1170 q->subpacket[
s].samples_per_channel = samples_per_frame;
1175 q->subpacket[
s].cookversion);
1179 if (
s > 1 && q->subpacket[
s].samples_per_channel != q->samples_per_channel) {
1183 q->samples_per_channel = q->subpacket[0].samples_per_channel;
1187 q->subpacket[
s].numvector_size = (1 << q->subpacket[
s].log2_numvector_size);
1190 if (q->subpacket[
s].total_subbands > 53) {
1195 if ((q->subpacket[
s].js_vlc_bits > 6) ||
1196 (q->subpacket[
s].js_vlc_bits < 2 * q->subpacket[
s].joint_stereo)) {
1198 q->subpacket[
s].js_vlc_bits, 2 * q->subpacket[
s].joint_stereo);
1202 if (q->subpacket[
s].subbands > 50) {
1206 if (q->subpacket[
s].subbands == 0) {
1210 q->subpacket[
s].gains1.now = q->subpacket[
s].gain_1;
1211 q->subpacket[
s].gains1.previous = q->subpacket[
s].gain_2;
1212 q->subpacket[
s].gains2.now = q->subpacket[
s].gain_3;
1213 q->subpacket[
s].gains2.previous = q->subpacket[
s].gain_4;
1215 if (q->num_subpackets + q->subpacket[
s].num_channels > q->avctx->channels) {
1216 av_log(
avctx,
AV_LOG_ERROR,
"Too many subpackets %d for channels %d\n", q->num_subpackets, q->avctx->channels);
1220 q->num_subpackets++;
1225 if (q->samples_per_channel != 256 && q->samples_per_channel != 512 &&
1226 q->samples_per_channel != 1024) {
1228 q->samples_per_channel);
1243 q->decoded_bytes_buffer =
1247 if (!q->decoded_bytes_buffer)
1280 .priv_data_size =
sizeof(COOKContext),
static void decode_vectors(COOKContext *q, COOKSubpacket *p, int *category, int *quant_index_table, float *mlt_buffer)
Fill the mlt_buffer with mlt coefficients.
static int mono_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer)
@ AV_SAMPLE_FMT_FLTP
float, planar
static const uint16_t *const cvh_huffcodes[7]
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
static av_cold int cook_decode_init(AVCodecContext *avctx)
Cook initialization.
#define FFSWAP(type, a, b)
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
static enum AVSampleFormat sample_fmts[]
static av_cold void init_pow2table(void)
static void decode_gain_info(GetBitContext *gb, int *gaininfo)
Fill the gain array for the timedomain quantization.
#define AV_CH_LAYOUT_MONO
static int cook_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
void(* interpolate)(struct cook *q, float *buffer, int gain_index, int gain_index_next)
static int get_bits_count(const GetBitContext *s)
static int unpack_SQVH(COOKContext *q, COOKSubpacket *p, int category, int *subband_coef_index, int *subband_coef_sign)
Unpack the subband_coef_index and subband_coef_sign vectors.
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
static void scalar_dequant_float(COOKContext *q, int index, int quant_index, int *subband_coef_index, int *subband_coef_sign, float *mlt_p)
The real requantization of the mltcoefs.
static const uint16_t envelope_quant_index_huffcodes[13][24]
av_cold void ff_audiodsp_init(AudioDSPContext *c)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
#define fc(width, name, range_min, range_max)
void(* scalar_dequant)(struct cook *q, int index, int quant_index, int *subband_coef_index, int *subband_coef_sign, float *mlt_p)
static av_cold int cook_decode_close(AVCodecContext *avctx)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define DECODE_BYTES_PAD1(bytes)
static av_cold int init_cook_mlt(COOKContext *q)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static const int expbits_tab[8]
static const uint8_t *const cvh_huffbits[7]
static const struct twinvq_data tab
static int decode_bytes(const uint8_t *inbuffer, uint8_t *out, int bytes)
Cook indata decoding, every 32 bits are XORed with 0x37c511f2.
static int decode_subpacket(COOKContext *q, COOKSubpacket *p, const uint8_t *inbuffer, float **outbuffer)
Cook subpacket decoding.
static const float dither_tab[9]
#define AV_CH_LAYOUT_STEREO
static av_cold void init_cplscales_table(COOKContext *q)
static void saturate_output_float(COOKContext *q, float *out)
Saturate the output signal and interleave.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const float quant_centroid_tab[7][14]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
const float * cplscales[5]
static av_cold int init_cook_vlc_tables(COOKContext *q)
static unsigned int av_lfg_get(AVLFG *c)
Get the next random unsigned 32-bit number using an ALFG.
static const int vhvlcsize_tab[7]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void(* decouple)(struct cook *q, COOKSubpacket *p, int subband, float f1, float f2, float *decode_buffer, float *mlt_buffer1, float *mlt_buffer2)
void ff_free_vlc(VLC *vlc)
float decode_buffer_1[1024]
static const int vd_tab[7]
and forward the result(frame or status change) to the corresponding input. If nothing is possible
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static void imlt_window_float(COOKContext *q, float *inbuffer, cook_gains *gains_ptr, float *previous_buffer)
Apply transform window, overlap buffers.
static void decouple_float(COOKContext *q, COOKSubpacket *p, int subband, float f1, float f2, float *decode_buffer, float *mlt_buffer1, float *mlt_buffer2)
function decouples a pair of signals from a single signal via multiplication.
static unsigned int get_bits1(GetBitContext *s)
static void dump_cook_context(COOKContext *q)
static void mlt_compensate_output(COOKContext *q, float *decode_buffer, cook_gains *gains_ptr, float *previous_buffer, float *out)
Final part of subpacket decoding: Apply modulated lapped transform, gain compensation,...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static const int vpr_tab[7]
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
COOKSubpacket subpacket[MAX_SUBPACKETS]
static const float *const cplscales[5]
Context structure for the Lagged Fibonacci PRNG.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
enum AVSampleFormat sample_fmt
audio sample format
static void imlt_gain(COOKContext *q, float *inbuffer, cook_gains *gains_ptr, float *previous_buffer)
The modulated lapped transform, this takes transform coefficients and transforms them into timedomain...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
unsigned int channel_mask
int channels
number of audio channels
static const int kmax_tab[7]
#define DECLARE_ALIGNED(n, t, v)
#define i(width, name, range_min, range_max)
float mono_previous_buffer1[1024]
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
void AAC_RENAME() ff_sine_window_init(INTFLOAT *window, int n)
Generate a sine window.
#define av_malloc_array(a, b)
AVSampleFormat
Audio sample formats.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static const int vhsize_tab[7]
const char * name
Name of the codec implementation.
static int decode_envelope(COOKContext *q, COOKSubpacket *p, int *quant_index_table)
Create the quant index table needed for the envelope.
static int decouple_info(COOKContext *q, COOKSubpacket *p, int *decouple_tab)
function for getting the jointstereo coupling information
static float pow2tab[127]
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const uint8_t *const ccpl_huffbits[5]
static void interpolate_float(COOKContext *q, float *buffer, int gain_index, int gain_index_next)
the actual requantization of the timedomain samples
#define AV_INPUT_BUFFER_PADDING_SIZE
#define FF_ARRAY_ELEMS(a)
main external API structure.
static float rootpow2tab[127]
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
float mono_previous_buffer2[1024]
uint8_t * decoded_bytes_buffer
static const uint8_t envelope_quant_index_huffbits[13][24]
static av_cold void init_gain_table(COOKContext *q)
Filter the word “frame” indicates either a video frame or a group of audio samples
static int joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer_left, float *mlt_buffer_right)
function for decoding joint stereo data
VLC envelope_quant_index[13]
void(* imlt_window)(struct cook *q, float *buffer1, cook_gains *gains_ptr, float *previous_buffer)
#define avpriv_request_sample(...)
This structure stores compressed data.
void(* saturate_output)(struct cook *q, float *out)
float decode_buffer_0[1060]
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static void categorize(COOKContext *q, COOKSubpacket *p, const int *quant_index_table, int *category, int *category_index)
Calculate the category and category_index vector.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const uint16_t *const ccpl_huffcodes[5]
static const int cplband[51]
float mono_mdct_output[2048]
float decode_buffer_2[1024]
VLC_TYPE(* table)[2]
code, bits
static void expand_category(COOKContext *q, int *category, int *category_index)
Expand the category vector.
static const int invradix_tab[7]
static void decode_bytes_and_gain(COOKContext *q, COOKSubpacket *p, const uint8_t *inbuffer, cook_gains *gains_ptr)
First part of subpacket decoding: decode raw stream bytes and read gain info.