Go to the documentation of this file.
23 #include <opus_multistream.h>
43 #ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
59 0, 1, 1, 2, 2, 2, 2, 3
70 { 0, 4, 1, 2, 3, 5, 6 },
71 { 0, 6, 1, 2, 3, 4, 5, 7 },
82 { 0, 1, 5, 6, 2, 4, 3 },
83 { 0, 1, 6, 7, 4, 5, 2, 3 },
87 int coupled_stream_count,
95 bytestream_put_byte(&p, 1);
99 bytestream_put_le16(&p, 0);
102 bytestream_put_byte(&p, mapping_family);
103 if (mapping_family != 0) {
104 bytestream_put_byte(&p, stream_count);
105 bytestream_put_byte(&p, coupled_stream_count);
117 "Quality-based encoding not supported, "
118 "please specify a bitrate and VBR setting.\n");
122 ret = opus_multistream_encoder_ctl(enc, OPUS_SET_BITRATE(avctx->
bit_rate));
123 if (
ret != OPUS_OK) {
125 "Failed to set bitrate: %s\n", opus_strerror(
ret));
129 ret = opus_multistream_encoder_ctl(enc,
130 OPUS_SET_COMPLEXITY(
opts->complexity));
133 "Unable to set complexity: %s\n", opus_strerror(
ret));
135 ret = opus_multistream_encoder_ctl(enc, OPUS_SET_VBR(!!
opts->vbr));
138 "Unable to set VBR: %s\n", opus_strerror(
ret));
140 ret = opus_multistream_encoder_ctl(enc,
141 OPUS_SET_VBR_CONSTRAINT(
opts->vbr == 2));
144 "Unable to set constrained VBR: %s\n", opus_strerror(
ret));
146 ret = opus_multistream_encoder_ctl(enc,
147 OPUS_SET_PACKET_LOSS_PERC(
opts->packet_loss));
150 "Unable to set expected packet loss percentage: %s\n",
153 ret = opus_multistream_encoder_ctl(enc,
154 OPUS_SET_INBAND_FEC(
opts->fec));
157 "Unable to set inband FEC: %s\n",
161 ret = opus_multistream_encoder_ctl(enc,
162 OPUS_SET_MAX_BANDWIDTH(
opts->max_bandwidth));
165 "Unable to set maximum bandwidth: %s\n", opus_strerror(
ret));
168 #ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
169 ret = opus_multistream_encoder_ctl(enc,
170 OPUS_SET_PHASE_INVERSION_DISABLED(!
opts->apply_phase_inv));
173 "Unable to set phase inversion: %s\n",
181 if (avctx->
channels > max_channels) {
195 "No channel layout specified. Opus encoder will use Vorbis "
196 "channel layout for %d channels.\n", avctx->
channels);
202 "Invalid channel layout %s for specified mapping family %d.\n",
203 name, mapping_family);
214 const uint8_t ** channel_map_result)
219 switch (mapping_family) {
247 "Unknown channel mapping family %d. Output channel layout may be invalid.\n",
252 *channel_map_result = channel_map;
260 uint8_t libopus_channel_mapping[255];
263 int coupled_stream_count, header_size,
frame_size;
272 "LPC mode cannot be used with a frame duration of less "
273 "than 10ms. Enabling restricted low-delay mode.\n"
274 "Use a longer frame duration if this is not what you want.\n");
282 #ifdef OPUS_FRAMESIZE_120_MS
292 "Frame duration must be exactly one of: 2.5, 5, 10, 20, 40"
293 #ifdef OPUS_FRAMESIZE_120_MS
294 ", 60, 80, 100 or 120.\n",
304 "Compression level must be in the range 0 to 10. "
305 "Defaulting to 10.\n");
330 "Invalid frequency cutoff: %d. Using default maximum bandwidth.\n"
331 "Cutoff frequency must be exactly one of: 4000, 6000, 8000, 12000 or 20000.\n",
349 mapping_family = avctx->
channels > 2 ? 1 : 0;
352 memcpy(libopus_channel_mapping,
354 avctx->
channels *
sizeof(*libopus_channel_mapping));
356 enc = opus_multistream_encoder_create(
358 coupled_stream_count,
366 enc = opus_multistream_surround_encoder_create(
368 &opus->
stream_count, &coupled_stream_count, libopus_channel_mapping,
372 if (
ret != OPUS_OK) {
374 "Failed to create encoder: %s\n", opus_strerror(
ret));
381 32000 * coupled_stream_count;
383 "No bit rate set. Defaulting to %"PRId64
" bps.\n", avctx->
bit_rate);
388 "Please choose a value between 500 and %d.\n", avctx->
bit_rate,
395 if (
ret != OPUS_OK) {
401 header_size = 19 + (mapping_family == 0 ? 0 : 2 + avctx->
channels);
421 "Unable to get number of lookahead samples: %s\n",
425 mapping_family, libopus_channel_mapping);
434 opus_multistream_encoder_destroy(enc);
441 int nb_channels,
int nb_samples,
int bytes_per_sample) {
448 memcpy(&dst[dst_pos], &
src[src_pos], bytes_per_sample);
458 const int sample_size = avctx->
channels * bytes_per_sample;
474 memcpy(audio,
frame->data[0],
frame->nb_samples * sample_size);
476 audio =
frame->data[0];
491 ret = opus_multistream_encode_float(opus->
enc, (
float *)audio,
495 ret = opus_multistream_encode(opus->
enc, (opus_int16 *)audio,
501 "Error encoding frame: %s\n", opus_strerror(
ret));
512 if ((discard_padding < opus->
opts.packet_size) != (avpkt->
duration > 0)) {
516 if (discard_padding > 0) {
524 AV_WL32(side_data + 4, discard_padding);
536 opus_multistream_encoder_destroy(opus->
enc);
546 #define OFFSET(x) offsetof(LibopusEncContext, opts.x)
547 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
549 {
"application",
"Intended application type",
OFFSET(application),
AV_OPT_TYPE_INT, { .i64 = OPUS_APPLICATION_AUDIO }, OPUS_APPLICATION_VOIP, OPUS_APPLICATION_RESTRICTED_LOWDELAY,
FLAGS,
"application" },
550 {
"voip",
"Favor improved speech intelligibility", 0,
AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0,
FLAGS,
"application" },
551 {
"audio",
"Favor faithfulness to the input", 0,
AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0,
FLAGS,
"application" },
552 {
"lowdelay",
"Restrict to only the lowest delay modes", 0,
AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0,
FLAGS,
"application" },
553 {
"frame_duration",
"Duration of a frame in milliseconds",
OFFSET(frame_duration),
AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 120.0,
FLAGS },
560 {
"mapping_family",
"Channel Mapping Family",
OFFSET(mapping_family),
AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255,
FLAGS,
"mapping_family" },
561 #ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
562 {
"apply_phase_inv",
"Apply intensity stereo phase inversion",
OFFSET(apply_phase_inv),
AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1,
FLAGS },
576 {
"compression_level",
"10" },
581 48000, 24000, 16000, 12000, 8000, 0,
600 .wrapper_name =
"libopus",
static const AVClass libopus_class
int frame_size
Number of samples per channel in an audio frame.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define AV_LOG_WARNING
Something somehow does not look correct.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static int libopus_encode(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
@ OPUS_BANDWIDTH_NARROWBAND
int sample_rate
samples per second
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
static enum AVSampleFormat sample_fmts[]
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
This structure describes decoded (raw) audio or video data.
void * av_mallocz_array(size_t nmemb, size_t size)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
@ OPUS_BANDWIDTH_FULLBAND
static const uint8_t opus_vorbis_channel_map[8][8]
static av_cold int libopus_encode_init(AVCodecContext *avctx)
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
int initial_padding
Audio only.
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static av_cold int libopus_encode_close(AVCodecContext *avctx)
int global_quality
Global quality for codecs which cannot change it per frame.
static void libopus_write_header(AVCodecContext *avctx, int stream_count, int coupled_stream_count, int mapping_family, const uint8_t *channel_mapping)
int ff_opus_error_to_averror(int err)
@ OPUS_BANDWIDTH_WIDEBAND
AVCodec ff_libopus_encoder
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static int libopus_configure_encoder(AVCodecContext *avctx, OpusMSEncoder *enc, LibopusEncOpts *opts)
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
static const AVOption libopus_options[]
const uint8_t ff_vorbis_channel_layout_offsets[8][8]
@ OPUS_BANDWIDTH_SUPERWIDEBAND
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
enum AVSampleFormat sample_fmt
audio sample format
static const AVCodecDefault libopus_defaults[]
const uint8_t * encoder_channel_map
int channels
number of audio channels
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int cutoff
Audio cutoff bandwidth (0 means "automatic")
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
const char * name
Name of the codec implementation.
@ AV_PKT_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
static int libopus_check_max_channels(AVCodecContext *avctx, int max_channels)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
static int libopus_check_vorbis_layout(AVCodecContext *avctx, int mapping_family)
main external API structure.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
@ OPUS_BANDWIDTH_MEDIUMBAND
This structure stores compressed data.
const uint64_t ff_vorbis_channel_layouts[9]
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int libopus_validate_layout_and_get_channel_map(AVCodecContext *avctx, int mapping_family, const uint8_t **channel_map_result)
static const uint8_t libavcodec_libopus_channel_map[8][8]
static void libopus_copy_samples_with_channel_map(uint8_t *dst, const uint8_t *src, const uint8_t *channel_map, int nb_channels, int nb_samples, int bytes_per_sample)
static const uint8_t opus_coupled_streams[8]
static const int libopus_sample_rates[]