Go to the documentation of this file.
32 float *
bits,
float lambda)
35 uint32_t
cm[2] = { (1 <<
f->blocks) - 1, (1 <<
f->blocks) - 1 };
37 float buf[176 * 2], lowband_scratch[176], norm1[176], norm2[176];
38 float dist, cost, err_x = 0.0f, err_y = 0.0f;
41 float *
Y = (
f->channels == 2) ? &buf[176] :
NULL;
45 memcpy(
X, X_orig, band_size*
sizeof(
float));
47 memcpy(
Y, Y_orig, band_size*
sizeof(
float));
50 if (band <= f->coded_bands - 1) {
51 int curr_balance =
f->remaining /
FFMIN(3,
f->coded_bands - band);
56 pvq->
quant_band(pvq,
f, rc, band,
X,
NULL, band_size,
b / 2,
f->blocks,
NULL,
57 f->size, norm1, 0, 1.0f, lowband_scratch,
cm[0]);
59 pvq->
quant_band(pvq,
f, rc, band,
Y,
NULL, band_size,
b / 2,
f->blocks,
NULL,
60 f->size, norm2, 0, 1.0f, lowband_scratch,
cm[1]);
62 pvq->
quant_band(pvq,
f, rc, band,
X,
Y, band_size,
b,
f->blocks,
NULL,
f->size,
63 norm1, 0, 1.0f, lowband_scratch,
cm[0] |
cm[1]);
66 for (
i = 0;
i < band_size;
i++) {
67 err_x += (
X[
i] - X_orig[
i])*(
X[
i] - X_orig[
i]);
69 err_y += (
Y[
i] - Y_orig[
i])*(
Y[
i] - Y_orig[
i]);
78 return lambda*dist*cost;
84 int silence = 0, ch,
i, j;
89 for (ch = 0; ch <
s->avctx->ch_layout.nb_channels; ch++) {
90 const int lap_size = (1 <<
s->bsize_analysis);
96 for (
i = 0;
i < lap_size;
i++) {
97 const int offset =
i*120 + lap_size;
102 s->dsp->vector_fmul(
s->scratch,
s->scratch,
s->window[
s->bsize_analysis],
105 s->mdct_fn[
s->bsize_analysis](
s->mdct[
s->bsize_analysis], st->
coeffs[ch],
106 s->scratch,
sizeof(
float));
112 for (ch = 0; ch <
s->avctx->ch_layout.nb_channels; ch++) {
114 float avg_c_s, energy = 0.0f, dist_dev = 0.0f;
116 const float *coeffs = st->
bands[ch][
i];
117 for (j = 0; j <
range; j++)
118 energy += coeffs[j]*coeffs[j];
121 silence |= !!st->
energy[ch][
i];
122 avg_c_s = energy /
range;
124 for (j = 0; j <
range; j++) {
125 const float c_s = coeffs[j]*coeffs[j];
126 dist_dev += (avg_c_s - c_s)*(avg_c_s - c_s);
135 if (
s->avctx->ch_layout.nb_channels > 1) {
137 float incompat = 0.0f;
138 const float *coeffs1 = st->
bands[0][
i];
139 const float *coeffs2 = st->
bands[1][
i];
141 for (j = 0; j <
range; j++)
142 incompat += (coeffs1[j] - coeffs2[j])*(coeffs1[j] - coeffs2[j]);
147 for (ch = 0; ch <
s->avctx->ch_layout.nb_channels; ch++) {
173 float c_change = 0.0f;
176 for (
i = offset_s;
i < offset_e;
i++) {
177 c_change +=
s->steps[
i]->total_change;
178 if (c_change > tgt_change)
184 s->inflection_points[
s->inflection_points_count++] =
i;
190 int fsize, silent_frames;
192 for (silent_frames = 0; silent_frames <
s->buffered_steps; silent_frames++)
193 if (!
s->steps[silent_frames]->silence)
195 if (--silent_frames < 0)
199 if ((1 <<
fsize) > silent_frames)
212 int max_delay_samples = (
s->options->max_delay_ms*
s->avctx->sample_rate)/1000;
230 float total_energy_change = 0.0f;
232 if (
s->buffered_steps <
s->max_steps && !
s->eof) {
233 const int awin = (1 <<
s->bsize_analysis);
234 if (++
s->steps_to_process >= awin) {
236 s->steps_to_process = 0;
238 if ((++
s->buffered_steps) <
s->max_steps)
242 for (
i = 0;
i <
s->buffered_steps;
i++)
243 total_energy_change +=
s->steps[
i]->total_change;
246 s->buffered_steps, 1, 0);
260 int i, neighbouring_points = 0, start_offset = 0;
261 int radius = (1 <<
s->p.framesize), step_offset = radius*
index;
266 f->channels =
s->avctx->ch_layout.nb_channels;
267 f->size =
s->p.framesize;
269 for (
i = 0;
i < (1 <<
f->size);
i++)
270 silence &=
s->steps[
index*(1 <<
f->size) +
i]->silence;
272 f->silence = silence;
278 for (
i = 0;
i <
s->inflection_points_count;
i++) {
279 if (
s->inflection_points[
i] >= step_offset) {
285 for (
i = start_offset;
i <
FFMIN(radius,
s->inflection_points_count - start_offset);
i++) {
286 if (
s->inflection_points[
i] < (step_offset + radius)) {
287 neighbouring_points++;
292 f->transient = neighbouring_points > 0;
306 f->skip_band_floor =
f->end_band;
307 f->intensity_stereo =
f->end_band;
319 float rate, frame_bits = 0;
326 float max_score = 1.0f;
331 float tonal_contrib = 0.0f;
332 for (
f = 0;
f < (1 <<
s->p.framesize);
f++) {
334 for (ch = 0; ch <
s->avctx->ch_layout.nb_channels; ch++) {
336 tonal_contrib += start[
f]->
tone[ch][
i];
339 tonal += tonal_contrib;
346 if (band_score[
i] > max_score)
347 max_score = band_score[
i];
351 f_out->
alloc_boost[
i] = (int)((band_score[
i]/max_score)*3.0f);
352 frame_bits += band_score[
i]*8.0f;
391 if (
s->avctx->ch_layout.nb_channels < 2)
398 f->dual_stereo = td2 < td1;
399 s->dual_stereo_used += td2 < td1;
405 float dist, best_dist = FLT_MAX;
409 if (
s->avctx->ch_layout.nb_channels < 2)
412 for (
i =
f->end_band;
i >= end_band;
i--) {
413 f->intensity_stereo =
i;
415 if (best_dist > dist) {
421 f->intensity_stereo = best_band;
422 s->avg_is_band = (
s->avg_is_band +
f->intensity_stereo)/2.0
f;
428 float score[2] = { 0 };
430 for (cway = 0; cway < 2; cway++) {
432 int base =
f->transient ? 120 : 960;
434 for (
i = 0;
i < 2;
i++) {
440 float iscore0 = 0.0f;
441 float iscore1 = 0.0f;
442 for (j = 0; j < (1 <<
f->size); j++) {
443 for (k = 0; k <
s->avctx->ch_layout.nb_channels; k++) {
449 score[cway] +=
config[cway][
i] ? iscore1 : iscore0;
453 f->tf_select = score[0] < score[1];
461 int start_transient_flag =
f->transient;
472 if (
f->transient != start_transient_flag) {
487 for (
i = 0;
i < steps_out;
i++)
490 for (
i = 0;
i <
s->max_steps;
i++)
493 for (
i = 0;
i <
s->max_steps;
i++) {
494 const int i_new =
i - steps_out;
495 s->steps[i_new < 0 ?
s->max_steps + i_new : i_new] =
tmp[
i];
498 for (
i = steps_out;
i <
s->buffered_steps;
i++)
499 s->steps[
i]->index -= steps_out;
501 ideal_fbits =
s->avctx->bit_rate/(
s->avctx->sample_rate/
frame_size);
503 for (
i = 0;
i <
s->p.frames;
i++) {
504 s->avg_is_band +=
f[
i].intensity_stereo;
505 s->lambda *= ideal_fbits /
f[
i].framebits;
508 s->avg_is_band /= (
s->p.frames + 1);
510 s->steps_to_process = 0;
511 s->buffered_steps -= steps_out;
512 s->total_packets_out +=
s->p.frames;
513 s->inflection_points_count = 0;
524 s->bufqueue = bufqueue;
525 s->max_steps =
ceilf(
s->options->max_delay_ms/2.5f);
528 s->inflection_points_count = 0;
530 s->inflection_points =
av_mallocz(
sizeof(*
s->inflection_points)*
s->max_steps);
531 if (!
s->inflection_points) {
542 for (ch = 0; ch <
s->avctx->ch_layout.nb_channels; ch++) {
549 for (
i = 0;
i <
s->max_steps;
i++) {
568 0, 15 << (
i + 3), &
scale, 0);
584 for (
i = 0;
i <
s->max_steps;
i++)
607 for (
i = 0;
i <
s->max_steps;
i++)
611 av_log(
s->avctx,
AV_LOG_INFO,
"Dual Stereo used: %0.2f%%\n", ((
float)
s->dual_stereo_used/
s->total_packets_out)*100.0f);
float stereo[CELT_MAX_BANDS]
static void celt_search_for_dual_stereo(OpusPsyContext *s, CeltFrame *f)
static int flush_silent_frames(OpusPsyContext *s)
static int bands_dist(OpusPsyContext *s, CeltFrame *f, float *total_dist)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define OPUS_MAX_FRAME_SIZE
static void search_for_change_points(OpusPsyContext *s, float tgt_change, int offset_s, int offset_e, int resolution, int level)
static void step_collect_psy_metrics(OpusPsyContext *s, int index)
This structure describes decoded (raw) audio or video data.
enum OpusBandwidth bandwidth
static int celt_search_for_tf(OpusPsyContext *s, OpusPsyStep **start, CeltFrame *f)
@ OPUS_BANDWIDTH_FULLBAND
float coeffs[OPUS_MAX_CHANNELS][OPUS_BLOCK_SIZE(CELT_BLOCK_960)]
static __device__ float ceilf(float a)
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
#define OPUS_RC_CHECKPOINT_SPAWN(rc)
void ff_opus_rc_enc_init(OpusRangeCoder *rc)
void ff_opus_psy_postencode_update(OpusPsyContext *s, CeltFrame *f)
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be then all remaining lines in the current slice are horizontally scaled and put in the ring buffer[This is done for luma and chroma, each with possibly different numbers of lines per picture.] Input to YUV Converter When the input to the main path is not planar bits per component YUV or bit it is converted to planar bit YUV Two sets of converters exist for this the other leaves the full chroma resolution
float change_amp[OPUS_MAX_CHANNELS][CELT_MAX_BANDS]
int flags
AV_CODEC_FLAG_*.
const h264_weight_func weight
void ff_opus_psy_signal_eof(OpusPsyContext *s)
#define OPUS_SAMPLES_TO_BLOCK_SIZE(x)
static void celt_gauge_psy_weight(OpusPsyContext *s, OpusPsyStep **start, CeltFrame *f_out)
static float bessel_filter(FFBesselFilter *s, float x)
@ AV_TX_FLOAT_MDCT
Standard MDCT with a sample data type of float, double or int32_t, respecively.
static void celt_search_for_intensity(OpusPsyContext *s, CeltFrame *f)
int alloc_boost[CELT_MAX_BANDS]
static int64_t fsize(FILE *f)
#define OPUS_BLOCK_SIZE(x)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
const int8_t ff_celt_tf_select[4][2][2][2]
static av_always_inline uint32_t opus_rc_tell_frac(const OpusRangeCoder *rc)
const uint8_t ff_celt_freq_range[]
av_cold int ff_opus_psy_end(OpusPsyContext *s)
static void psy_output_groups(OpusPsyContext *s)
static __device__ float sqrtf(float a)
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
void ff_opus_psy_celt_frame_init(OpusPsyContext *s, CeltFrame *f, int index)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const OptionDef options[]
float * bands[OPUS_MAX_CHANNELS][CELT_MAX_BANDS]
float tone[OPUS_MAX_CHANNELS][CELT_MAX_BANDS]
int ff_opus_psy_process(OpusPsyContext *s, OpusPacketInfo *p)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int ff_opus_psy_celt_frame_process(OpusPsyContext *s, CeltFrame *f, int index)
static int bessel_init(FFBesselFilter *s, float n, float f0, float fs, int highpass)
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
#define AV_LOG_INFO
Standard information.
static float pvq_band_cost(CeltPVQ *pvq, CeltFrame *f, OpusRangeCoder *rc, int band, float *bits, float lambda)
int nb_samples
number of audio samples (per channel) described by this frame
static AVFrame * ff_bufqueue_peek(struct FFBufQueue *queue, unsigned index)
Get a buffer from the queue without altering it.
#define i(width, name, range_min, range_max)
Structure holding the queue.
uint8_t ** extended_data
pointers to the data planes/channels.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
float energy[OPUS_MAX_CHANNELS][CELT_MAX_BANDS]
void ff_celt_bitalloc(CeltFrame *f, OpusRangeCoder *rc, int encode)
main external API structure.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void scale(int *out, const int *in, const int w, const int h, const int shift)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
#define OPUS_RC_CHECKPOINT_ROLLBACK(rc)
const uint8_t ff_celt_band_end[]
const uint8_t ff_celt_freq_bands[]
av_cold int ff_opus_psy_init(OpusPsyContext *s, AVCodecContext *avctx, struct FFBufQueue *bufqueue, OpusEncOptions *options)
#define OPUS_RC_CHECKPOINT_BITS(rc)