Go to the documentation of this file.
64 unsigned int consumed;
71 uint32_t audio_sample;
75 uint32_t levels[2][8];
95 frame->blocks = 4 * ((
data[1] >> 4) & 0x03) + 4;
98 frame->allocation = (
data[1] >> 1) & 0x01;
113 crc_header[0] =
data[1];
114 crc_header[1] =
data[2];
118 if (
len * 8 < consumed +
frame->subbands)
122 for (sb = 0; sb <
frame->subbands - 1; sb++)
123 frame->joint |= ((
data[4] >> (7 - sb)) & 0x01) << sb;
124 if (
frame->subbands == 4)
125 crc_header[crc_pos / 8] =
data[4] & 0xf0;
127 crc_header[crc_pos / 8] =
data[4];
129 consumed +=
frame->subbands;
130 crc_pos +=
frame->subbands;
133 if (
len * 8 < consumed + (4 *
frame->subbands *
frame->channels))
136 for (ch = 0; ch <
frame->channels; ch++) {
137 for (sb = 0; sb <
frame->subbands; sb++) {
139 frame->scale_factor[ch][sb] =
140 (
data[consumed >> 3] >> (4 - (consumed & 0x7))) & 0x0F;
141 crc_header[crc_pos >> 3] |=
142 frame->scale_factor[ch][sb] << (4 - (crc_pos & 0x7));
154 for (ch = 0; ch <
frame->channels; ch++) {
155 for (sb = 0; sb <
frame->subbands; sb++)
156 levels[ch][sb] = (1 <<
bits[ch][sb]) - 1;
160 for (ch = 0; ch <
frame->channels; ch++) {
161 for (sb = 0; sb <
frame->subbands; sb++) {
164 if (levels[ch][sb] == 0) {
174 if (consumed >
len * 8)
177 if ((
data[consumed >> 3] >> (7 - (consumed & 0x7))) & 0x01)
178 audio_sample |= 1 << (
bits[ch][sb] -
bit - 1);
184 (((((uint64_t) audio_sample << 1) | 1) <<
shift) /
185 levels[ch][sb]) - (1 <<
shift);
192 for (sb = 0; sb <
frame->subbands; sb++) {
193 if (
frame->joint & (0x01 << sb)) {
205 if ((consumed & 0x7) != 0)
206 consumed += 8 - (consumed & 0x7);
208 return consumed >> 3;
219 for (
i = 0;
i < 8;
i++) {
224 memcpy(v + 80, v, 9 *
sizeof(*v));
236 for (idx = 0,
i = 0;
i < 4;
i++, idx += 5) {
262 for (
i = 0;
i < 16;
i++) {
267 memcpy(v + 160, v, 9 *
sizeof(*v));
283 for (idx = 0,
i = 0;
i < 8;
i++, idx += 5) {
306 switch (
frame->subbands) {
308 for (ch = 0; ch <
frame->channels; ch++)
314 for (ch = 0; ch <
frame->channels; ch++)
330 memset(sbc->
dsp.
V, 0,
sizeof(sbc->
dsp.
V));
331 for (ch = 0; ch < 2; ch++)
338 void *
data,
int *got_frame_ptr,
343 int ret, frame_length;
349 if (frame_length <= 0)
379 .supported_samplerates = (
const int[]) { 16000, 32000, 44100, 48000, 0 },
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_CH_LAYOUT_MONO
This structure describes decoded (raw) audio or video data.
static int sbc_decode_init(AVCodecContext *avctx)
#define SBCDEC_FIXED_EXTRA_BITS
static void sbc_synthesize_eight(struct sbc_decoder_state *state, struct sbc_frame *frame, int ch, int blk, AVFrame *output_frame)
#define bit(string, value)
#define AV_CH_LAYOUT_STEREO
const int32_t ff_sbc_proto_4_40m1[]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static int sbc_unpack_frame(const uint8_t *data, struct sbc_frame *frame, size_t len)
const int32_t ff_synmatrix8[16][8]
const int32_t ff_synmatrix4[8][4]
struct sbc_decoder_state dsp
Describe the class of an AVClass context structure.
static int sbc_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
const int32_t ff_sbc_proto_8_80m1[]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
enum AVSampleFormat sample_fmt
audio sample format
static void sbc_synthesize_audio(struct sbc_decoder_state *state, struct sbc_frame *frame, AVFrame *output_frame)
const int32_t ff_sbc_proto_4_40m0[]
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
int channels
number of audio channels
#define DECLARE_ALIGNED(n, t, v)
#define i(width, name, range_min, range_max)
AVSampleFormat
Audio sample formats.
#define xf(width, name, var, range_min, range_max, subs,...)
const char * name
Name of the codec implementation.
uint8_t ff_sbc_crc8(const AVCRC *ctx, const uint8_t *data, size_t len)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FF_ARRAY_ELEMS(a)
main external API structure.
static void sbc_synthesize_four(struct sbc_decoder_state *state, struct sbc_frame *frame, int ch, int blk, AVFrame *output_frame)
void ff_sbc_calculate_bits(const struct sbc_frame *frame, int(*bits)[8])
static int shift(int a, int b)
This structure stores compressed data.
static const uint16_t channel_layouts[7]
const int32_t ff_sbc_proto_8_80m0[]