43 #define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)
45 #define SAMPLES_BUF_SIZE 4096
68 float scale_factor_inv_table[64];
85 if (channels <= 0 || channels > 2){
86 av_log(avctx,
AV_LOG_ERROR,
"encoding %d channel(s) is not allowed in mp2\n", channels);
89 bitrate = bitrate / 1000;
142 ff_dlog(avctx,
"%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n",
162 v = (int)(
exp2((3 - i) / 3.0) * (1 << 20));
167 s->scale_factor_inv_table[i] =
exp2(-(3 - i) / 3.0) / (float)(1 << 20);
208 for(j=31;j>=3;j-=2) tab[j] += tab[j - 2];
252 x1 =
MUL((t[8] - x2), xp[0]);
253 x2 =
MUL((t[8] + x2), xp[1]);
266 xr =
MUL(t[28],xp[0]);
270 xr =
MUL(t[4],xp[1]);
271 t[ 4] = (t[24] - xr);
272 t[24] = (t[24] + xr);
274 xr =
MUL(t[20],xp[2]);
278 xr =
MUL(t[12],xp[3]);
279 t[12] = (t[16] - xr);
280 t[16] = (t[16] + xr);
285 for (i = 0; i < 4; i++) {
286 xr =
MUL(tab[30-i*4],xp[0]);
287 tab[30-i*4] = (tab[i*4] - xr);
288 tab[ i*4] = (tab[i*4] + xr);
290 xr =
MUL(tab[ 2+i*4],xp[1]);
291 tab[ 2+i*4] = (tab[28-i*4] - xr);
292 tab[28-i*4] = (tab[28-i*4] + xr);
294 xr =
MUL(tab[31-i*4],xp[0]);
295 tab[31-i*4] = (tab[1+i*4] - xr);
296 tab[ 1+i*4] = (tab[1+i*4] + xr);
298 xr =
MUL(tab[ 3+i*4],xp[1]);
299 tab[ 3+i*4] = (tab[29-i*4] - xr);
300 tab[29-i*4] = (tab[29-i*4] + xr);
308 xr =
MUL(t1[0], *xp);
321 #define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS)
336 s->
samples_buf[ch][offset + (31 - i)] = samples[0];
345 sum = p[0*64] * q[0*64];
346 sum += p[1*64] * q[1*64];
347 sum += p[2*64] * q[2*64];
348 sum += p[3*64] * q[3*64];
349 sum += p[4*64] * q[4*64];
350 sum += p[5*64] * q[5*64];
351 sum += p[6*64] * q[6*64];
352 sum += p[7*64] * q[7*64];
357 tmp1[0] = tmp[16] >>
WSHIFT;
358 for( i=1; i<=16; i++ ) tmp1[i] = (tmp[i+16]+tmp[16-i]) >>
WSHIFT;
359 for( i=17; i<=31; i++ ) tmp1[i] = (tmp[i+16]-tmp[80-i]) >>
WSHIFT;
377 unsigned char scale_code[
SBLIMIT],
378 unsigned char scale_factors[SBLIMIT][3],
379 int sb_samples[3][12][SBLIMIT],
382 int *p, vmax,
v,
n, i, j, k, code;
384 unsigned char *sf = &scale_factors[0][0];
386 for(j=0;j<sblimit;j++) {
389 p = &sb_samples[i][0][j];
402 index = (21 -
n) * 3 - 3;
404 while (vmax <= s->scale_factor_table[index+1])
426 switch(d1 * 5 + d2) {
458 sf[1] = sf[2] = sf[0];
463 sf[0] = sf[1] = sf[2];
469 sf[0] = sf[2] = sf[1];
475 sf[1] = sf[2] = sf[0];
483 sf[0], sf[1], sf[2], d1, d2, code);
484 scale_code[j] = code;
502 #define SB_NOTALLOCATED 0
503 #define SB_ALLOCATED 1
514 int i, ch,
b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size;
518 const unsigned char *alloc;
520 memcpy(smr, smr1, s->
nb_channels *
sizeof(
short) * SBLIMIT);
536 current_frame_size = 32;
550 if (smr[ch][i] > max_smr && subband_status[ch][i] !=
SB_NOMORE) {
551 max_smr = smr[ch][i];
559 ff_dlog(
NULL,
"current=%d max=%d max_sb=%d max_ch=%d alloc=%d\n",
560 current_frame_size, max_frame_size, max_sb, max_ch,
566 for(i=0;i<max_sb;i++) {
567 alloc += 1 << alloc[0];
581 if (current_frame_size + incr <= max_frame_size) {
584 current_frame_size += incr;
586 smr[max_ch][max_sb] = smr1[max_ch][max_sb] -
quant_snr[alloc[
b]];
588 if (b == ((1 << alloc[0]) - 1))
589 subband_status[max_ch][max_sb] =
SB_NOMORE;
594 subband_status[max_ch][max_sb] =
SB_NOMORE;
597 *padding = max_frame_size - current_frame_size;
609 int i, j, k, l, bit_alloc_bits,
b, ch;
637 j += 1 << bit_alloc_bits;
692 a = (float)sample * s->scale_factor_inv_table[s->
scale_factors[ch][i][k]];
693 q[m] = (
int)((a + 1.0) * steps * 0.5);
704 q1 = sample << (-
shift);
706 q1 = sample >>
shift;
707 q1 = (q1 *
mult) >>
P;
711 q[
m] = (q1 * (unsigned)steps) >> (
P + 1);
722 q[0] + steps * (q[1] + steps * q[2]));
731 j += 1 << bit_alloc_bits;
737 for(i=0;i<padding;i++)
748 const int16_t *samples = (
const int16_t *)frame->
data[0];
#define MPA_MAX_CODED_FRAME_SIZE
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
unsigned char scale_diff_table[128]
static const unsigned char nb_scale_factors[4]
unsigned short scale_factor_mult[64]
unsigned short total_quant_bits[17]
const int ff_mpa_quant_bits[17]
static const uint8_t q1[256]
mpeg audio layer common tables.
const int32_t ff_mpa_enwindow[257]
static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const AVCodecDefault mp2_defaults[]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static const int costab32[30]
const int ff_mpa_quant_steps[17]
int scale_factor_table[64]
const uint16_t avpriv_mpa_freq_tab[3]
const unsigned char *const ff_mpa_alloc_tables[5]
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]
mpeg audio layer 2 tables.
static void compute_bit_allocation(MpegAudioContext *s, short smr1[MPA_MAX_CHANNELS][SBLIMIT], unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int *padding)
unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT]
static int bit_alloc(AC3EncodeContext *s, int snr_offset)
Run the bit allocation with a given SNR offset.
static const unsigned short quant_snr[17]
static av_cold int MPA_encode_init(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void compute_scale_factors(MpegAudioContext *s, unsigned char scale_code[SBLIMIT], unsigned char scale_factors[SBLIMIT][3], int sb_samples[3][12][SBLIMIT], int sblimit)
static const struct endianess table[]
int initial_padding
Audio only.
static const int bitinv32[32]
static const uint8_t offset[127][2]
Libavcodec external API header.
static int put_bits_count(PutBitContext *s)
const unsigned char * alloc_table
int8_t scale_factor_shift[64]
int bit_rate
the average bitrate
audio channel layout utility functions
static void encode_frame(MpegAudioContext *s, unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT], int padding)
int frame_size
Number of samples per channel in an audio frame.
int samples_offset[MPA_MAX_CHANNELS]
int sample_rate
samples per second
static const float fixed_smr[SBLIMIT]
main external API structure.
static int16_t mult(Float11 *f1, Float11 *f2)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]
static void idct32(int *out, int *tab)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
mpeg audio declarations for both encoder and decoder.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const int ff_mpa_sblimit_table[5]
static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT])
int ff_mpa_l2_select_table(int bitrate, int nb_channels, int freq, int lsf)
int channels
number of audio channels
static const struct twinvq_data tab
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
const uint16_t avpriv_mpa_bitrate_tab[2][3][15]
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT]
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.