Go to the documentation of this file.
85 while (
i[0] <
len && !(
in[
i[0]] && (
in[
i[0]]->reference & sel)))
87 while (
i[1] <
len && !(
in[
i[1]] && (
in[
i[1]]->reference & (sel ^ 3))))
91 in[
i[0]]->pic_id = is_long ?
i[0] :
in[
i[0]]->frame_num;
96 in[
i[1]]->pic_id = is_long ?
i[1] :
in[
i[1]]->frame_num;
105 int len,
int limit,
int dir)
111 best_poc = dir ? INT_MIN : INT_MAX;
113 for (
i = 0;
i <
len;
i++) {
114 const int poc =
src[
i]->poc;
115 if (((poc > limit) ^ dir) && ((poc < best_poc) ^ dir)) {
117 sorted[out_i] =
src[
i];
120 if (best_poc == (dir ? INT_MIN : INT_MAX))
122 limit = sorted[out_i++]->
poc - dir;
130 return (
h->cur_pic_ptr->f->width !=
f->width ||
131 h->cur_pic_ptr->f->height !=
f->height ||
132 h->cur_pic_ptr->f->format !=
f->format);
148 cur_poc =
h->cur_pic_ptr->poc;
156 sorted,
len, 0,
h->picture_structure);
159 h->long_ref, 16, 1,
h->picture_structure);
162 if (len < sl->ref_count[
list])
167 if (lens[0] == lens[1] && lens[1] > 1) {
168 for (
i = 0;
i < lens[0] &&
177 h->short_ref,
h->short_ref_count, 0,
h->picture_structure);
180 h-> long_ref, 16, 1,
h->picture_structure);
183 if (len < sl->ref_count[0])
188 ff_tlog(
h->avctx,
"List0: %s fn:%d 0x%p\n",
195 ff_tlog(
h->avctx,
"List1: %s fn:%d 0x%p\n",
225 for (
i = 0;
i <
h->short_ref_count;
i++) {
241 for (
i = 0;
i < 16;
i++) {
263 *structure =
h->picture_structure;
284 for (j = 0; j < 3; j++)
285 field[0].linesize[j] <<= 1;
291 for (j = 0; j < 3; j++)
318 switch (modification_of_pic_nums_idc) {
321 const unsigned int abs_diff_pic_num =
val + 1;
326 "abs_diff_pic_num overflow\n");
330 if (modification_of_pic_nums_idc == 0)
331 pred -= abs_diff_pic_num;
333 pred += abs_diff_pic_num;
338 for (
i =
h->short_ref_count - 1;
i >= 0;
i--) {
339 ref =
h->short_ref[
i];
340 assert(
ref->reference);
341 assert(!
ref->long_ref);
342 if (
ref->frame_num == frame_num &&
343 (
ref->reference & pic_structure))
356 if (long_idx > 31
U) {
358 "long_term_pic_idx overflow\n");
361 ref =
h->long_ref[long_idx];
362 assert(!(
ref && !
ref->reference));
363 if (
ref && (
ref->reference & pic_structure)) {
364 ref->pic_id = pic_id;
365 assert(
ref->long_ref);
378 i < 0 ?
"reference picture missing during reorder\n" :
379 "mismatching reference\n"
406 h->last_pocs[
i] = INT_MIN;
407 if (
h->default_ref[
list].parent
445 "illegal modification_of_pic_nums_idc %u\n",
475 for(
i = 0;
h->delayed_pic[
i];
i++)
476 if(pic ==
h->delayed_pic[
i]){
496 for (
i = 0;
i <
h->short_ref_count;
i++) {
516 assert(
i >= 0 && i < h->short_ref_count);
518 if (--
h->short_ref_count)
519 memmove(&
h->short_ref[
i], &
h->short_ref[
i + 1],
552 pic =
h->long_ref[
i];
555 assert(
h->long_ref[
i]->long_ref == 1);
556 h->long_ref[
i]->long_ref = 0;
569 for (
i = 0;
i < 16;
i++) {
572 assert(
h->long_ref_count == 0);
574 if (
h->short_ref_count && !
h->last_pic_for_ec.f->data[0]) {
579 for (
i = 0;
i <
h->short_ref_count;
i++) {
583 h->short_ref_count = 0;
585 memset(
h->default_ref, 0,
sizeof(
h->default_ref));
590 MMCO *mmco =
h->mmco;
593 if (
h->short_ref_count &&
594 h->long_ref_count +
h->short_ref_count >=
h->ps.sps->ref_frame_count &&
597 mmco[0].
short_pic_num =
h->short_ref[
h->short_ref_count - 1]->frame_num;
607 h->nb_mmco = nb_mmco;
612 MMCO *mmco =
h->mmco;
615 int pps_ref_count[2] = {0};
616 int current_ref_assigned = 0, err = 0;
625 if (!
h->explicit_ref_marking)
627 mmco_count =
h->nb_mmco;
632 for (
i = 0;
i < mmco_count;
i++) {
636 h->mmco[
i].short_pic_num,
h->mmco[
i].long_arg);
645 h->long_ref[mmco[
i].
long_arg]->frame_num != frame_num) {
653 switch (mmco[
i].opcode) {
657 h->mmco[
i].short_pic_num,
h->short_ref_count);
673 pic =
h->long_ref[j];
687 if (
h->short_ref[0] ==
h->cur_pic_ptr) {
688 av_log(
h->avctx,
AV_LOG_ERROR,
"mmco: cannot assign current picture to short and long at the same time\n");
693 if (
h->cur_pic_ptr->long_ref) {
695 if (
h->long_ref[j] ==
h->cur_pic_ptr) {
696 if (j != mmco[
i].long_arg)
697 av_log(
h->avctx,
AV_LOG_ERROR,
"mmco: cannot assign current picture to 2 long term references\n");
703 if (
h->long_ref[mmco[
i].
long_arg] !=
h->cur_pic_ptr) {
712 h->cur_pic_ptr->reference |=
h->picture_structure;
713 current_ref_assigned = 1;
716 assert(mmco[
i].long_arg <= 16);
718 for (j = mmco[
i].long_arg; j < 16; j++) {
723 while (
h->short_ref_count) {
726 for (j = 0; j < 16; j++) {
729 h->poc.frame_num =
h->cur_pic_ptr->frame_num = 0;
731 h->cur_pic_ptr->mmco_reset = 1;
733 h->last_pocs[j] = INT_MIN;
739 if (!current_ref_assigned) {
746 if (
h->short_ref_count &&
h->short_ref[0] ==
h->cur_pic_ptr) {
748 h->cur_pic_ptr->reference |=
h->picture_structure;
749 }
else if (
h->cur_pic_ptr->long_ref) {
751 "assignment for second field "
752 "in complementary field pair "
753 "(first field is long term)\n");
762 if (
h->short_ref_count)
763 memmove(&
h->short_ref[1], &
h->short_ref[0],
766 h->short_ref[0] =
h->cur_pic_ptr;
767 h->short_ref_count++;
768 h->cur_pic_ptr->reference |=
h->picture_structure;
772 if (
h->long_ref_count +
h->short_ref_count >
FFMAX(
h->ps.sps->ref_frame_count, 1)) {
779 "number of reference frames (%d+%d) exceeds max (%d; probably "
780 "corrupt input), discarding one\n",
781 h->long_ref_count,
h->short_ref_count,
h->ps.sps->ref_frame_count);
784 if (
h->long_ref_count && !
h->short_ref_count) {
785 for (
i = 0;
i < 16; ++
i)
792 pic =
h->short_ref[
h->short_ref_count - 1];
797 for (
i = 0;
i<
h->short_ref_count;
i++) {
798 pic =
h->short_ref[
i];
799 if (pic->invalid_gap) {
800 int d = av_mod_uintp2(
h->cur_pic_ptr->frame_num - pic->frame_num,
h->ps.sps->log2_max_frame_num);
801 if (d >
h->ps.sps->ref_frame_count)
810 if (
h->ps.pps_list[
i]) {
811 const PPS *
pps = (
const PPS *)
h->ps.pps_list[
i]->data;
812 pps_ref_count[0] =
FFMAX(pps_ref_count[0],
pps->ref_count[0]);
813 pps_ref_count[1] =
FFMAX(pps_ref_count[1],
pps->ref_count[1]);
819 &&
h->long_ref_count==0
820 && (
h->short_ref_count<=2
821 || pps_ref_count[0] <= 2 && pps_ref_count[1] <= 1 &&
h->avctx->has_b_frames
822 || pps_ref_count[0] <= 1 + (
h->picture_structure !=
PICT_FRAME) && pps_ref_count[1] <= 1)
823 && pps_ref_count[0]<=2 + (
h->picture_structure !=
PICT_FRAME) + (2*!
h->has_recovery_point)
825 h->cur_pic_ptr->recovered |= 1;
826 if(!
h->avctx->has_b_frames)
864 if (long_arg >= 32 ||
869 "illegal long ref in memory management control "
870 "operation %d\n", opcode);
879 "illegal memory management control operation %d\n",
static void remove_short_at_index(H264Context *h, int i)
Remove a picture from the short term reference list by its index in that list.
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
int nb_ref_modifications[2]
int long_arg
index, pic_num, or num long refs depending on opcode
static int mismatches_ref(const H264Context *h, const H264Picture *pic)
static void print_long_term(const H264Context *h)
print long term list
#define FFSWAP(type, a, b)
static int split_field_copy(H264Ref *dest, H264Picture *src, int parity, int id_add)
struct H264SliceContext::@69 ref_modifications[2][32]
static void ref_from_h264pic(H264Ref *dst, H264Picture *src)
This structure describes decoded (raw) audio or video data.
MMCO mmco[MAX_MMCO_COUNT]
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
#define PICT_BOTTOM_FIELD
static int build_def_list(H264Ref *def, int def_len, H264Picture *const *in, int len, int is_long, int sel)
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static H264Picture * remove_long(H264Context *h, int i, int ref_mask)
Remove a picture from the long term reference list by its index in that list.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int frame_num
frame_num (raw frame_num from slice header)
static double val(void *priv, double ch)
static H264Picture * find_short(H264Context *h, int frame_num, int *idx)
Find a H264Picture in the short term reference list by frame number.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_h264_remove_all_refs(H264Context *h)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
static int pic_num_extract(const H264Context *h, int pic_num, int *structure)
Extract structure information about the picture described by pic_num in the current decoding context ...
int short_pic_num
pic_num without wrapping (pic_num & max_pic_num)
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
#define AV_EF_EXPLODE
abort decoding on minor error detection
static void print_short_term(const H264Context *h)
print short term list
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
#define MAX_DELAYED_PIC_COUNT
MMCOOpcode
Memory management control operation opcode.
static int unreference_pic(H264Context *h, H264Picture *pic, int refmask)
Mark a picture as no longer needed for reference.
int av_buffer_get_ref_count(const AVBufferRef *buf)
static void skip_bits1(GetBitContext *s)
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void h264_initialise_ref_list(H264Context *h, H264SliceContext *sl)
#define i(width, name, range_min, range_max)
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
static const float pred[4]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FF_ARRAY_ELEMS(a)
static H264Picture * remove_short(H264Context *h, int frame_num, int ref_mask)
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
static void generate_sliding_window_mmcos(H264Context *h)
int field_poc[2]
top/bottom POC
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
static int add_sorted(H264Picture **sorted, H264Picture *const *src, int len, int limit, int dir)
static int ref[MAX_W *MAX_W]
static void h264_fill_mbaff_ref_list(H264SliceContext *sl)
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Memory management control operation.
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
static void pic_as_field(H264Ref *pic, const int parity)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int long_ref
1->long term reference 0->short term reference