Go to the documentation of this file.
23 #include <AudioToolbox/AudioToolbox.h>
25 #define FF_BUFQUEUE_SIZE 256
66 return kAudioFormatMPEG4AAC;
68 return kAudioFormatMPEG4AAC_HE;
70 return kAudioFormatMPEG4AAC_HE_V2;
72 return kAudioFormatMPEG4AAC_LD;
73 #if MAC_OS_X_VERSION_MIN_REQUIRED >= 1060
75 return kAudioFormatMPEG4AAC_ELD;
79 return kAudioFormatAppleIMA4;
81 return kAudioFormatAppleLossless;
82 #if MAC_OS_X_VERSION_MIN_REQUIRED >= 1060
84 return kAudioFormatiLBC;
87 return kAudioFormatALaw;
89 return kAudioFormatULaw;
99 UInt32
size =
sizeof(unsigned);
100 AudioConverterPrimeInfo prime_info;
101 AudioStreamBasicDescription out_format;
104 kAudioConverterPropertyMaximumOutputPacketSize,
110 size =
sizeof(prime_info);
112 if (!AudioConverterGetProperty(at->
converter,
113 kAudioConverterPrimeInfo,
114 &
size, &prime_info)) {
118 size =
sizeof(out_format);
119 if (!AudioConverterGetProperty(at->
converter,
120 kAudioConverterCurrentOutputStreamDescription,
121 &
size, &out_format)) {
122 if (out_format.mFramesPerPacket) {
123 avctx->
frame_size = out_format.mFramesPerPacket;
155 *
tag = bytestream2_get_byte(gb);
157 int c = bytestream2_get_byte(gb);
172 return avctx->
bit_rate <= 14000 ? 30 : 20;
197 return kAudioChannelLabel_LFE2;
205 layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
211 if (c < 0 || c >= 64)
214 layout->mChannelDescriptions[
i].mChannelLabel = label;
224 static const struct {
257 AudioStreamBasicDescription in_format = {
259 .mFormatID = kAudioFormatLinearPCM,
263 : kAudioFormatFlagIsSignedInteger)
264 | kAudioFormatFlagIsPacked,
266 .mFramesPerPacket = 1,
271 AudioStreamBasicDescription out_format = {
274 .mChannelsPerFrame = in_format.mChannelsPerFrame,
276 UInt32 layout_size =
sizeof(AudioChannelLayout) +
278 AudioChannelLayout *channel_layout =
av_malloc(layout_size);
285 out_format.mFramesPerPacket = 8000 *
mode / 1000;
286 out_format.mBytesPerPacket = (
mode == 20 ? 38 : 50);
306 if (AudioConverterSetProperty(at->
converter, kAudioConverterInputChannelLayout,
307 layout_size, channel_layout)) {
315 channel_layout->mChannelLayoutTag =
tag;
316 channel_layout->mNumberChannelDescriptions = 0;
319 if (AudioConverterSetProperty(at->
converter, kAudioConverterOutputChannelLayout,
320 layout_size, channel_layout)) {
329 kAudioConverterPropertyBitDepthHint,
333 #if !TARGET_OS_IPHONE
336 kAudioCodecBitRateControlMode_Variable :
337 kAudioCodecBitRateControlMode_Constant;
339 AudioConverterSetProperty(at->
converter, kAudioCodecPropertyBitRateControlMode,
342 if (at->
mode == kAudioCodecBitRateControlMode_Variable) {
344 if (q < 0 || q > 14) {
346 "VBR quality %d out of range, should be 0-14\n", q);
350 AudioConverterSetProperty(at->
converter, kAudioCodecPropertySoundQualityForVBR,
358 kAudioConverterApplicableEncodeBitRates,
361 UInt32 new_rate = rate;
368 kAudioConverterApplicableEncodeBitRates,
370 count =
size /
sizeof(AudioValueRange);
371 for (
i = 0;
i < count;
i++) {
372 AudioValueRange *
range = &ranges[
i];
373 if (rate >=
range->mMinimum && rate <= range->mMaximum) {
376 }
else if (rate >
range->mMaximum) {
377 new_rate =
range->mMaximum;
379 new_rate =
range->mMinimum;
383 if (new_rate != rate) {
385 "Bitrate %u not allowed; changing to %u\n", rate, new_rate);
390 AudioConverterSetProperty(at->
converter, kAudioConverterEncodeBitRate,
391 sizeof(rate), &rate);
395 AudioConverterSetProperty(at->
converter, kAudioConverterCodecQuality,
398 if (!AudioConverterGetPropertyInfo(at->
converter, kAudioConverterCompressionMagicCookie,
415 kAudioConverterCompressionMagicCookie,
416 &extradata_size, extradata);
438 flags = bytestream2_get_byte(&gb);
456 #if !TARGET_OS_IPHONE && defined(__MAC_10_9)
457 if (at->
mode == kAudioCodecBitRateControlMode_Variable && avctx->
rc_max_rate) {
460 AudioConverterSetProperty(at->
converter, kAudioCodecPropertyPacketSizeLimitForVBR,
461 sizeof(max_size), &max_size);
475 AudioBufferList *
data,
476 AudioStreamPacketDescription **packets,
496 data->mNumberBuffers = 1;
498 data->mBuffers[0].mDataByteSize =
frame->nb_samples *
501 data->mBuffers[0].mData =
frame->data[0];
502 if (*nb_packets >
frame->nb_samples)
503 *nb_packets =
frame->nb_samples;
522 AudioBufferList out_buffers = {
531 AudioStreamPacketDescription out_pkt_desc = {0};
562 out_buffers.mBuffers[0].mData = avpkt->
data;
567 got_packet_ptr, &out_buffers,
572 if ((!
ret ||
ret == 1) && *got_packet_ptr) {
573 avpkt->
size = out_buffers.mBuffers[0].mDataByteSize;
575 out_pkt_desc.mVariableFramesInPacket :
580 }
else if (
ret &&
ret != 1) {
616 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
618 #if !TARGET_OS_IPHONE
619 {
"aac_at_mode",
"ratecontrol mode", offsetof(
ATDecodeContext,
mode),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, kAudioCodecBitRateControlMode_Variable,
AE, .unit =
"mode"},
620 {
"auto",
"VBR if global quality is given; CBR otherwise", 0,
AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX,
AE, .unit =
"mode"},
621 {
"cbr",
"constant bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_Constant}, INT_MIN, INT_MAX,
AE, .unit =
"mode"},
622 {
"abr",
"long-term average bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_LongTermAverage}, INT_MIN, INT_MAX,
AE, .unit =
"mode"},
623 {
"cvbr",
"constrained variable bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_VariableConstrained}, INT_MIN, INT_MAX,
AE, .unit =
"mode"},
624 {
"vbr" ,
"variable bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_Variable}, INT_MIN, INT_MAX,
AE, .unit =
"mode"},
630 #define FFAT_ENC_CLASS(NAME) \
631 static const AVClass ffat_##NAME##_enc_class = { \
632 .class_name = "at_" #NAME "_enc", \
633 .item_name = av_default_item_name, \
635 .version = LIBAVUTIL_VERSION_INT, \
638 #define FFAT_ENC(NAME, ID, PROFILES, CAPS, CHANNEL_LAYOUTS, CH_LAYOUTS) \
639 FFAT_ENC_CLASS(NAME) \
640 const FFCodec ff_##NAME##_at_encoder = { \
641 .p.name = #NAME "_at", \
642 CODEC_LONG_NAME(#NAME " (AudioToolbox)"), \
643 .p.type = AVMEDIA_TYPE_AUDIO, \
645 .priv_data_size = sizeof(ATDecodeContext), \
646 .init = ffat_init_encoder, \
647 .close = ffat_close_encoder, \
648 FF_CODEC_ENCODE_CB(ffat_encode), \
649 .flush = ffat_encode_flush, \
650 .p.priv_class = &ffat_##NAME##_enc_class, \
651 .p.capabilities = AV_CODEC_CAP_DELAY | \
652 AV_CODEC_CAP_ENCODER_FLUSH CAPS, \
653 .p.ch_layouts = CH_LAYOUTS, \
654 .p.sample_fmts = (const enum AVSampleFormat[]) { \
656 AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NONE \
658 .p.profiles = PROFILES, \
659 .p.wrapper_name = "at", \
int frame_size
Number of samples per channel in an audio frame.
#define AV_LOG_WARNING
Something somehow does not look correct.
@ AV_CODEC_ID_ADPCM_IMA_QT
#define MP4DecConfigDescrTag
#define AV_CHANNEL_LAYOUT_OCTAGONAL
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
#define AV_CHANNEL_LAYOUT_STEREO
int sample_rate
samples per second
#define AV_CH_LOW_FREQUENCY_2
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
enum AVChannel av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx)
Get the channel with the given index in a channel layout.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
enum AVChannelOrder order
Channel order used in this layout.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int nb_channels
Number of channels in this layout.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
#define AV_CH_SURROUND_DIRECT_RIGHT
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
struct FFBufQueue used_frame_queue
#define MP4DecSpecificDescrTag
AVChannelLayout ch_layout
Audio channel layout.
int initial_padding
Audio only.
int flags
AV_CODEC_FLAG_*.
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
#define AV_CHANNEL_LAYOUT_SURROUND
static int ff_bufqueue_is_full(struct FFBufQueue *queue)
Test if a buffer queue is full.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define AV_PROFILE_UNKNOWN
#define AV_CH_LOW_FREQUENCY
#define AV_CHANNEL_LAYOUT_4POINT0
#define AV_CHANNEL_LAYOUT_7POINT1
int global_quality
Global quality for codecs which cannot change it per frame.
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_PROFILE_AAC_ELD
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t rc_max_rate
maximum bitrate
#define AV_CH_STEREO_RIGHT
#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK
Describe the class of an AVClass context structure.
int64_t bit_rate
the average bitrate
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
AVCodecID
Identify the syntax and semantics of the bitstream.
An AVChannelLayout holds information about the channel layout of audio data.
#define AV_PROFILE_AAC_LD
enum AVSampleFormat sample_fmt
audio sample format
#define MKBETAG(a, b, c, d)
#define AV_CHANNEL_LAYOUT_6POINT0
#define AVERROR_EXTERNAL
Generic error in an external library.
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
#define AV_CH_TOP_BACK_RIGHT
int flags
A combination of AV_PKT_FLAG values.
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Structure holding the queue.
#define AV_CHANNEL_LAYOUT_QUAD
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
unsigned short available
number of available buffers
#define AV_CH_BACK_CENTER
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const uint8_t * buffer_end
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_CHANNEL_LAYOUT_7POINT0
#define AV_INPUT_BUFFER_PADDING_SIZE
#define AV_PROFILE_AAC_LOW
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
AudioConverterRef converter
main external API structure.
#define AV_PROFILE_AAC_HE_V2
@ AV_OPT_TYPE_INT
Underlying C type is int.
const VDPAUPixFmtMap * map
#define AV_CHANNEL_LAYOUT_MONO
#define AV_PROFILE_AAC_HE
This structure stores compressed data.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AV_CHANNEL_LAYOUT_6POINT1
#define AV_CHANNEL_LAYOUT_5POINT0
@ AV_SAMPLE_FMT_DBL
double
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
#define AV_CHANNEL_LAYOUT_5POINT1
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
struct FFBufQueue frame_queue