Go to the documentation of this file.
25 #include "config_components.h"
56 f->mant =
i? (i<<6) >>
f->exp : 1<<5;
65 res = (((f1->
mant * f2->
mant) + 0x30) >> 4);
66 res =
exp > 19 ? res << (
exp - 19) : res >> (19 -
exp);
67 return (f1->
sign ^ f2->
sign) ? -res : res;
72 return (
value < 0) ? -1 : 1;
109 { 116, 365, 365, 116 };
111 { -22, 439, 439, -22 };
116 { 7, 217, 330, INT_MAX };
118 { INT16_MIN, 135, 273, 373, 373, 273, 135, INT16_MIN };
120 { -4, 30, 137, 582, 582, 137, 30, -4 };
122 { 0, 1, 2, 7, 7, 2, 1, 0 };
125 { -125, 79, 177, 245, 299, 348, 399, INT_MAX };
127 { INT16_MIN, 4, 135, 213, 273, 323, 373, 425,
128 425, 373, 323, 273, 213, 135, 4, INT16_MIN };
130 { -12, 18, 41, 64, 112, 198, 355, 1122,
131 1122, 355, 198, 112, 64, 41, 18, -12};
133 { 0, 0, 0, 1, 1, 1, 3, 7, 7, 3, 1, 1, 1, 0, 0, 0 };
136 { -122, -16, 67, 138, 197, 249, 297, 338,
137 377, 412, 444, 474, 501, 527, 552, INT_MAX };
139 { INT16_MIN, -66, 28, 104, 169, 224, 274, 318,
140 358, 395, 429, 459, 488, 514, 539, 566,
141 566, 539, 514, 488, 459, 429, 395, 358,
142 318, 274, 224, 169, 104, 28, -66, INT16_MIN };
144 { 14, 14, 24, 39, 40, 41, 58, 100,
145 141, 179, 219, 280, 358, 440, 529, 696,
146 696, 529, 440, 358, 280, 219, 179, 141,
147 100, 58, 41, 40, 39, 24, 14, 14 };
149 { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6, 6,
150 6, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
164 int sign,
exp,
i, dln;
172 dln = ((
exp<<7) + (((d<<7)>>
exp)&0x7f)) - (
c->y>>2);
174 while (
c->tbls.quant[
i] < INT_MAX &&
c->tbls.quant[
i] < dln)
179 if (
c->code_size != 2 &&
i == 0)
192 dql =
c->tbls.iquant[
i] + (
c->y >> 2);
193 dex = (dql>>7) & 0
xf;
194 dqt = (1<<7) + (dql & 0x7f);
195 return (dql < 0) ? 0 : ((dqt<<dex) >> 7);
200 int dq, re_signal, pk0, fa1,
i, tr, ylint, ylfrac, thr2, al, dq0;
202 int I_sig= I >> (
c->code_size - 1);
207 ylint = (
c->yl >> 15);
208 ylfrac = (
c->yl >> 10) & 0x1f;
209 thr2 = (ylint > 9) ? 0x1f << 10 : (0x20 + ylfrac) << ylint;
210 tr= (
c->td == 1 && dq > ((3*thr2)>>2));
214 re_signal = (int16_t)(
c->se + dq);
217 pk0 = (
c->sez + dq) ?
sgn(
c->sez + dq) : 0;
218 dq0 = dq ?
sgn(dq) : 0;
228 c->a[1] += 128*pk0*
c->pk[1] + fa1 - (
c->a[1]>>7);
229 c->a[1] =
av_clip(
c->a[1], -12288, 12288);
230 c->a[0] += 64*3*pk0*
c->pk[0] - (
c->a[0] >> 8);
231 c->a[0] =
av_clip(
c->a[0], -(15360 -
c->a[1]), 15360 -
c->a[1]);
234 c->b[
i] += 128*dq0*
sgn(-
c->dq[
i].sign) - (
c->b[
i]>>8);
239 c->pk[0] = pk0 ? pk0 : 1;
241 i2f(re_signal, &
c->sr[0]);
243 c->dq[
i] =
c->dq[
i-1];
245 c->dq[0].sign = I_sig;
247 c->td =
c->a[1] < -11776;
250 c->dms += (
c->tbls.F[I]<<4) + ((-
c->dms) >> 5);
251 c->dml += (
c->tbls.F[I]<<4) + ((-
c->dml) >> 7);
255 c->ap += (-
c->ap) >> 4;
256 if (
c->y <= 1535 ||
c->td ||
abs((
c->dms << 2) -
c->dml) >= (
c->dml >> 3))
261 c->yu =
av_clip(
c->y +
c->tbls.W[I] + ((-
c->y)>>5), 544, 5120);
262 c->yl +=
c->yu + ((-
c->yl)>>6);
265 al = (
c->ap >= 256) ? 1<<6 :
c->ap >> 2;
266 c->y = (
c->yl + (
c->yu - (
c->yl>>6))*al) >> 6;
277 return av_clip(re_signal * 4, -0xffff, 0xffff);
285 for (
i=0;
i<2;
i++) {
286 c->sr[
i].mant = 1<<5;
289 for (
i=0;
i<6;
i++) {
290 c->dq[
i].mant = 1<<5;
300 #if CONFIG_ADPCM_G726_ENCODER || CONFIG_ADPCM_G726LE_ENCODER
316 c->little_endian = !strcmp(avctx->
codec->
name,
"g726le");
321 "allowed when the compliance level is higher than unofficial. "
322 "Resample or reduce the compliance level.\n");
339 c->code_size =
av_clip(
c->code_size, 2, 5);
347 avctx->
frame_size = ((
int[]){ 4096, 2736, 2048, 1640 })[
c->code_size - 2];
356 const int16_t *
samples = (
const int16_t *)
frame->data[0];
365 for (
i = 0;
i <
frame->nb_samples;
i++)
366 if (
c->little_endian) {
372 if (
c->little_endian) {
382 #define OFFSET(x) offsetof(G726Context, x)
383 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
389 static const AVClass g726_class = {
402 #if CONFIG_ADPCM_G726_ENCODER
411 .
init = g726_encode_init,
415 .p.priv_class = &g726_class,
420 #if CONFIG_ADPCM_G726LE_ENCODER
429 .
init = g726_encode_init,
433 .p.priv_class = &g726_class,
438 #if CONFIG_ADPCM_G726_DECODER || CONFIG_ADPCM_G726LE_DECODER
450 c->little_endian = !strcmp(avctx->
codec->
name,
"g726le");
453 if (
c->code_size < 2 ||
c->code_size > 5) {
465 int *got_frame_ptr,
AVPacket *avpkt)
467 const uint8_t *buf = avpkt->
data;
468 int buf_size = avpkt->
size;
472 int out_samples,
ret;
474 out_samples = buf_size * 8 /
c->code_size;
477 frame->nb_samples = out_samples;
484 while (out_samples--)
504 #if CONFIG_ADPCM_G726_DECODER
511 .
init = g726_decode_init,
513 .flush = g726_decode_flush,
518 #if CONFIG_ADPCM_G726LE_DECODER
524 .
init = g726_decode_init,
526 .flush = g726_decode_flush,
static int sgn(int value)
int frame_size
Number of samples per channel in an audio frame.
G726Tables tbls
static tables needed for computation
static const int16_t W_tbl16[]
static const int16_t iquant_tbl32[]
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int sample_rate
samples per second
static const FFCodecDefault defaults[]
uint8_t mant
6 bits mantissa
const uint8_t * F
special table #2
static const int16_t W_tbl32[]
int dml
long average magnitude of F[i]
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
int av_log2_16bit(unsigned v)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static const int16_t iquant_tbl40[]
static const int16_t iquant_tbl16[]
const int * quant
quantization table
static const int quant_tbl24[]
24kbit/s 3 bits per sample
static const int quant_tbl16[]
16kbit/s 2 bits per sample
int nb_channels
Number of channels in this layout.
static const uint8_t F_tbl40[]
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
AVCodec p
The public AVCodec.
const struct AVCodec * codec
AVChannelLayout ch_layout
Audio channel layout.
static av_cold int g726_reset(G726Context *c)
static int16_t g726_decode(G726Context *c, int I)
#define FF_CODEC_ENCODE_CB(func)
int a[2]
second order predictor coeffs
static int16_t mult(Float11 *f1, Float11 *f2)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int little_endian
little-endian bitstream as used in aiff and Sun AU
#define FF_CODEC_DECODE_CB(func)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
const FFCodec ff_adpcm_g726le_decoder
static const int quant_tbl40[]
40kbit/s 5 bits per sample
static uint8_t quant(G726Context *c, int d)
Paragraph 4.2.2 page 18: Adaptive quantizer.
static unsigned int get_bits_le(GetBitContext *s, int n)
static const int quant_tbl32[]
32kbit/s 4 bits per sample
static const int16_t W_tbl24[]
static const uint8_t F_tbl16[]
#define CODEC_LONG_NAME(str)
int dms
short average magnitude of F[i]
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
static const G726Tables G726Tables_pool[]
int sez
estimated second order prediction
static void flush_put_bits_le(PutBitContext *s)
static const int16_t W_tbl40[]
static const int16_t iquant_tbl24[]
static const uint8_t F_tbl32[]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
const OptionDef options[]
int y
quantizer scaling factor for the next iteration
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static Float11 * i2f(int i, Float11 *f)
An AVChannelLayout holds information about the channel layout of audio data.
enum AVSampleFormat sample_fmt
audio sample format
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
int ap
scale factor control
#define i(width, name, range_min, range_max)
AVSampleFormat
Audio sample formats.
#define xf(width, name, var, range_min, range_max, subs,...)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
@ AV_SAMPLE_FMT_S16
signed 16 bits
const FFCodec ff_adpcm_g726le_encoder
const char * name
Name of the codec implementation.
static const uint8_t F_tbl24[]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
main external API structure.
@ AV_CODEC_ID_ADPCM_G726LE
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
@ AV_OPT_TYPE_INT
Underlying C type is int.
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Filter the word “frame” indicates either a video frame or a group of audio samples
const int16_t * iquant
inverse quantization table
static int FUNC() dqt(CodedBitstreamContext *ctx, RWContext *rw, JPEGRawQuantisationTableSpecification *current)
const FFCodec ff_adpcm_g726_encoder
uint8_t exp
4 bits exponent
#define avpriv_request_sample(...)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
#define AV_CHANNEL_LAYOUT_MONO
This structure stores compressed data.
const int16_t * W
special table #1 ;-)
static void put_bits_le(PutBitContext *s, int n, BitBuf value)
const FFCodec ff_adpcm_g726_decoder
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
int b[6]
sixth order predictor coeffs
static int16_t inverse_quant(G726Context *c, int i)
Paragraph 4.2.3 page 22: Inverse adaptive quantizer.
int se
estimated signal for the next iteration