Go to the documentation of this file.
51 float two_cos_w = 2.0f * cos_val;
53 for (j = 0; j + 1 < order; j += 2 * 2) {
55 q *= lsp[j] - two_cos_w;
56 p *= lsp[j + 1] - two_cos_w;
58 q *= lsp[j + 2] - two_cos_w;
59 p *= lsp[j + 3] - two_cos_w;
62 p *= p * (2.0f - two_cos_w);
63 q *= q * (2.0f + two_cos_w);
77 for (
i = 0;
i < size_s / 2;
i++) {
117 float *
out,
const float *
in,
153 const float *
buf,
float *lpc,
174 const int16_t *cb0,
const int16_t *cb1,
int cb_len)
179 for (
i = 0;
i < tctx->
n_div[ftype];
i++) {
183 const int16_t *tab0, *
tab1;
203 tab0 = cb0 + tmp0 * cb_len;
204 tab1 = cb1 + tmp1 * cb_len;
206 for (j = 0; j <
length; j++)
207 out[tctx->
permut[ftype][pos + j]] = sign0 * tab0[j] +
226 out[
i] = (1.0 / (1 << 13)) *
231 float val = (1.0 / (1 << 23)) *
235 for (j = 0; j < sub; j++)
238 sub_step *
bits->sub_gain_bits[
i * sub + j],
253 float min_dist2 = min_dist * 0.5;
254 for (
i = 1;
i < order;
i++)
255 if (lsp[
i] - lsp[
i - 1] < min_dist) {
256 float avg = (lsp[
i] + lsp[
i - 1]) * 0.5;
258 lsp[
i - 1] =
avg - min_dist2;
259 lsp[
i] =
avg + min_dist2;
264 int lpc_hist_idx,
float *lsp,
float *hist)
271 const float *cb3 = cb2 + (1 << mtab->
lsp_bit2) * mtab->
n_lsp;
273 const int8_t funny_rounding[4] = {
285 lsp[j] =
cb[lpc_idx1 * mtab->
n_lsp + j] +
286 cb2[lpc_idx2[
i] * mtab->
n_lsp + j];
292 float tmp1 = 1.0 - cb3[lpc_hist_idx * mtab->
n_lsp +
i];
293 float tmp2 = hist[
i] * cb3[lpc_hist_idx * mtab->
n_lsp +
i];
295 lsp[
i] = lsp[
i] * tmp1 + tmp2;
310 lsp[
i] = 2 * cos(lsp[
i]);
328 int wtype,
float *
in,
float *prev,
int ch)
335 int j, first_wsize, wsize;
339 int types_sizes[] = {
347 prev_buf = prev + (
size - bsize) / 2;
349 for (j = 0; j < mtab->
fmode[ftype].
sub; j++) {
352 if (!j && wtype == 4)
354 else if (j == mtab->
fmode[ftype].
sub - 1 && wtype == 7)
359 mdct->
imdct_half(mdct, buf1 + bsize * j,
in + bsize * j);
363 ff_sine_windows[
av_log2(wsize)],
367 memcpy(out2, buf1 + bsize * j + wsize / 2,
368 (bsize - wsize / 2) *
sizeof(
float));
372 prev_buf = buf1 + bsize * j + bsize / 2;
389 prev_buf + 2 *
i * mtab->
size,
396 size1 = mtab->
size - size2;
399 memcpy(out1, prev_buf, size1 *
sizeof(*out1));
400 memcpy(out1 + size1, tctx->
curr_frame, size2 *
sizeof(*out1));
404 memcpy(out2, &prev_buf[2 * mtab->
size],
405 size1 *
sizeof(*out2));
407 size2 *
sizeof(*out2));
419 int block_size = mtab->
size / sub;
441 float *chunk =
out + mtab->
size *
i;
444 for (j = 0; j < sub; j++) {
446 bits->bark_use_hist[
i][j],
i,
447 tctx->
tmp_buf, gain[sub *
i + j], ftype);
450 chunk + block_size * j,
463 for (j = 0; j < mtab->
fmode[ftype].
sub; j++) {
477 int *got_frame_ptr,
AVPacket *avpkt)
481 int buf_size = avpkt->
size;
492 out = (
float **)
frame->extended_data;
495 if (buf_size < avctx->block_align) {
497 "Frame too small (%d bytes). Truncated file?\n", buf_size);
540 float norm =
channels == 1 ? 2.0 : 1.0;
542 for (
i = 0;
i < 3;
i++) {
545 -sqrt(norm / bsize) / (1 << 15))))
562 for (
i = 0;
i < 3;
i++) {
564 double freq = 2 *
M_PI / m;
566 (m / 4),
sizeof(*tctx->
cos_tabs[
i]), alloc_fail);
568 for (j = 0; j <= m / 8; j++)
569 tctx->
cos_tabs[
i][j] = cos((2 * j + 1) * freq);
570 for (j = 1; j < m / 8; j++)
592 const uint8_t line_len[2],
int length_div,
597 for (
i = 0;
i < line_len[0];
i++) {
600 if (num_blocks == 1 ||
610 for (j = 0; j < num_vect && (j + num_vect *
i < block_size * num_blocks); j++)
611 tab[
i * num_vect + j] =
i * num_vect + (j +
shift) % num_vect;
631 const uint8_t line_len[2],
int length_div)
636 for (
i = 0;
i < num_vect;
i++)
637 for (j = 0; j < line_len[i >= length_div]; j++)
638 out[cont++] =
in[j * num_vect +
i];
643 int block_size =
size / n_blocks;
647 out[
i] = block_size * (
in[
i] % n_blocks) +
in[
i] / n_blocks;
653 int block_size,
size;
655 int16_t *tmp_perm = (int16_t *)tctx->
tmp_buf;
666 block_size, tctx->
length[ftype],
689 int bsize_no_main_cb[3], bse_bits[3],
i;
692 for (
i = 0;
i < 3;
i++)
698 bsize_no_main_cb[2] = bse_bits[2] + lsp_bits_per_block + ppc_bits +
701 for (
i = 0;
i < 2;
i++)
702 bsize_no_main_cb[
i] =
708 bsize_no_main_cb[1] += 2;
709 bsize_no_main_cb[2] += 2;
713 for (
i = 0;
i < 4;
i++) {
714 int bit_size, vect_size;
715 int rounded_up, rounded_down, num_rounded_down, num_rounded_up;
720 bit_size = total_fr_bits - bsize_no_main_cb[
i];
721 vect_size = n_ch * mtab->
size;
724 tctx->
n_div[
i] = (bit_size + 13) / 14;
726 rounded_up = (bit_size + tctx->
n_div[
i] - 1) /
728 rounded_down = (bit_size) / tctx->
n_div[
i];
729 num_rounded_down = rounded_up * tctx->
n_div[
i] - bit_size;
730 num_rounded_up = tctx->
n_div[
i] - num_rounded_down;
737 rounded_up = (vect_size + tctx->
n_div[
i] - 1) /
739 rounded_down = (vect_size) / tctx->
n_div[
i];
740 num_rounded_down = rounded_up * tctx->
n_div[
i] - vect_size;
741 num_rounded_up = tctx->
n_div[
i] - num_rounded_down;
742 tctx->
length[
i][0] = rounded_up;
743 tctx->
length[
i][1] = rounded_down;
756 for (
i = 0;
i < 3;
i++) {
774 int64_t frames_per_packet;
783 if (frames_per_packet <= 0) {
const float * lspcodebook
@ AV_SAMPLE_FMT_FLTP
float, planar
static void linear_perm(int16_t *out, int16_t *in, int n_blocks, int size)
#define TWINVQ_SUB_AMP_MAX
const TwinVQModeTab * mtab
void AAC_RENAME() ff_init_ff_sine_windows(int index)
initialize the specified entry of ff_sine_windows
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint8_t sub
Number subblocks in each frame.
int sample_rate
samples per second
#define FFSWAP(type, a, b)
uint8_t bits_main_spec[2][4][2]
bits for the main codebook
static double cb(void *priv, double x, double y)
static void dequant(TwinVQContext *tctx, const uint8_t *cb_bits, float *out, enum TwinVQFrameType ftype, const int16_t *cb0, const int16_t *cb1, int cb_len)
Inverse quantization.
static void decode_lsp(TwinVQContext *tctx, int lpc_idx1, uint8_t *lpc_idx2, int lpc_hist_idx, float *lsp, float *hist)
float * curr_frame
non-interleaved output
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
TwinVQFrameData bits[TWINVQ_MAX_FRAMES_PER_PACKET]
uint8_t ppc_shape_len
size of PPC shape CB
av_cold int ff_twinvq_decode_init(AVCodecContext *avctx)
static void imdct_output(TwinVQContext *tctx, enum TwinVQFrameType ftype, int wtype, float **out, int offset)
void ff_sort_nearly_sorted_floats(float *vals, int len)
Sort values in ascending order.
uint8_t pgain_bit
bits for PPC gain
#define TWINVQ_PPC_SHAPE_CB_SIZE
void(* butterflies_float)(float *av_restrict v1, float *av_restrict v2, int len)
Calculate the sum and difference of two vectors of floats.
const int16_t * cb0
main codebooks for spectrum data
#define TWINVQ_WINDOW_TYPE_BITS
uint8_t bark_n_coef
number of BSE CB coefficients to read
static const struct twinvq_data tab
int flags
AV_CODEC_FLAG_*.
uint16_t size
frame size in samples
static float twinvq_mulawinv(float y, float clip, float mu)
void(* dec_bark_env)(struct TwinVQContext *tctx, const uint8_t *in, int use_hist, int ch, float *out, float gain, enum TwinVQFrameType ftype)
int ff_twinvq_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
void(* decode_ppc)(struct TwinVQContext *tctx, int period_coef, int g_coef, const float *shape, float *speech)
static int chunk_end(AVFormatContext *s, int flush)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const uint8_t wtype_to_wsize[]
float lsp_hist[2][20]
LSP coefficients of the last frame.
uint8_t ppc_shape_bit
number of bits of the PPC shape CB coeffs
static void rearrange_lsp(int order, float *lsp, float min_dist)
Rearrange the LSP coefficients so that they have a minimum distance of min_dist.
#define TWINVQ_LSP_COEFS_MAX
@ TWINVQ_FT_MEDIUM
Medium frame (divided in m<n sub-blocks)
Parameters and tables that are different for every combination of bitrate/sample rate.
static void twinvq_memset_float(float *buf, float val, int size)
float * prev_frame
non-interleaved previous frame
@ TWINVQ_FT_PPC
Periodic Peak Component (part of the long frame)
static void eval_lpcenv(TwinVQContext *tctx, const float *cos_vals, float *lpc)
Evaluate the LPC amplitude spectrum envelope from the line spectrum pairs.
av_cold int ff_twinvq_decode_close(AVCodecContext *avctx)
uint8_t length[4][2]
main codebook stride
int64_t bit_rate
the average bitrate
static float get_cos(int idx, int part, const float *cos_tab, int size)
static float eval_lpc_spectrum(const float *lsp, float cos_val, int order)
Evaluate a single LPC amplitude spectrum envelope coefficient from the line spectrum pairs.
static void transpose_perm(int16_t *out, int16_t *in, int num_vect, const uint8_t line_len[2], int length_div)
Interpret the input data as in the following table:
void(* imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input)
static void dec_lpc_spectrum_inv(TwinVQContext *tctx, float *lsp, enum TwinVQFrameType ftype, float *lpc)
static void read_and_decode_spectrum(TwinVQContext *tctx, float *out, enum TwinVQFrameType ftype)
@ TWINVQ_FT_LONG
Long frame (single sub-block + PPC)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
void(* vector_fmul)(float *dst, const float *src0, const float *src1, int len)
Calculate the entry wise product of two vectors of floats and store the result in a vector of floats.
enum AVSampleFormat sample_fmt
audio sample format
uint8_t bark_n_bit
number of bits of the BSE coefs
uint8_t ppc_period_bit
number of the bits for the PPC period value
int bits_main_spec_change[4]
#define TWINVQ_PPC_SHAPE_LEN_MAX
#define TWINVQ_CHANNELS_MAX
const int16_t * ppc_shape_cb
PPC shape CB.
const char const char void * val
static av_cold void init_bitstream_params(TwinVQContext *tctx)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int init_mdct_win(TwinVQContext *tctx)
Init IMDCT and windowing tables.
static void interpolate(float *out, float v1, float v2, int size)
int(* read_bitstream)(AVCodecContext *avctx, struct TwinVQContext *tctx, const uint8_t *buf, int buf_size)
int channels
number of audio channels
#define TWINVQ_SUB_GAIN_BITS
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
float bark_hist[3][2][40]
BSE coefficients of last frame.
static void permutate_in_line(int16_t *tab, int num_vect, int num_blocks, int block_size, const uint8_t line_len[2], int length_div, enum TwinVQFrameType ftype)
Interpret the data as if it were a num_blocks x line_len[0] matrix and for each line do a cyclic perm...
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
@ TWINVQ_FT_SHORT
Short frame (divided in n sub-blocks)
#define FF_ARRAY_ELEMS(a)
static void imdct_and_window(TwinVQContext *tctx, enum TwinVQFrameType ftype, int wtype, float *in, float *prev, int ch)
main external API structure.
static av_cold void construct_perm_table(TwinVQContext *tctx, enum TwinVQFrameType ftype)
uint8_t cb_len_read
number of spectrum coefficients to read
enum TwinVQFrameType ftype
#define FF_ALLOC_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
static void eval_lpcenv_or_interp(TwinVQContext *tctx, enum TwinVQFrameType ftype, float *out, const float *in, int size, int step, int part)
Evaluate the LPC amplitude spectrum envelope from the line spectrum pairs.
static int shift(int a, int b)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void dec_gain(TwinVQContext *tctx, enum TwinVQFrameType ftype, float *out)
static void eval_lpcenv_2parts(TwinVQContext *tctx, enum TwinVQFrameType ftype, const float *buf, float *lpc, int size, int step)
void(* vector_fmul_window)(float *dst, const float *src0, const float *src1, const float *win, int len)
Overlap/add with window function.
This structure stores compressed data.
#define TWINVQ_SUBBLOCKS_MAX
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
#define TWINVQ_MAX_FRAMES_PER_PACKET
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static float cos_tab[256]
uint8_t lsp_split
number of CB entries for the LSP decoding
enum TwinVQFrameType ff_twinvq_wtype_to_ftype_table[]
struct TwinVQFrameMode fmode[3]
frame type-dependent parameters
uint8_t n_lsp
number of lsp coefficients