Go to the documentation of this file.
25 #include "config_components.h"
91 if (!
s->context_initialized) {
94 memcpy(
s, s1,
sizeof(*
s));
96 s->context_initialized = 0;
97 s->context_reinit = 0;
107 if (
s->height != s1->
height ||
s->width != s1->
width ||
s->context_reinit) {
157 if (!
s->context_initialized)
166 if ((
s->width ||
s->height) &&
180 memset(
s->thread_context, 0,
sizeof(
s->thread_context));
181 s->thread_context[0] =
s;
183 if (
s->width &&
s->height) {
188 s->context_reinit = 0;
193 s->context_reinit = 1;
234 av_assert1(
s->mb_width ==
s->buffer_pools.alloc_mb_width);
235 av_assert1(
s->mb_height ==
s->buffer_pools.alloc_mb_height ||
236 FFALIGN(
s->mb_height, 2) ==
s->buffer_pools.alloc_mb_height);
237 av_assert1(
s->mb_stride ==
s->buffer_pools.alloc_mb_stride);
239 &
s->buffer_pools,
s->mb_height);
266 int h_chroma_shift, v_chroma_shift;
268 for (
int i = 0;
i <
frame->height;
i++)
287 av_assert1(!
s->last_pic.ptr ||
s->last_pic.ptr->f->buf[0]);
288 av_assert1(!
s->next_pic.ptr ||
s->next_pic.ptr->f->buf[0]);
292 "allocating dummy last picture for B frame\n");
296 "warning: first frame is no keyframe\n");
316 s->last_pic.ptr->f->buf[0]));
344 (!
s->progressive_frame && !
s->progressive_sequence);
345 s->cur_pic.ptr->field_picture =
s->picture_structure !=
PICT_FRAME;
347 s->cur_pic.ptr->f->pict_type =
s->pict_type;
358 ff_dlog(
s->avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
359 (
void*)
s->last_pic.ptr, (
void*)
s->next_pic.ptr, (
void*)
s->cur_pic.ptr,
360 s->last_pic.ptr ?
s->last_pic.ptr->f->data[0] :
NULL,
361 s->next_pic.ptr ?
s->next_pic.ptr->f->data[0] :
NULL,
362 s->cur_pic.ptr ?
s->cur_pic.ptr->f->data[0] :
NULL,
363 s->pict_type,
s->droppable);
380 if (
s->cur_pic.reference)
405 for (
unsigned y = 0; y < p->
mb_height; y++)
406 for (
unsigned x = 0; x < p->
mb_width; x++) {
407 const unsigned int block_idx = y * p->
mb_width + x;
408 const unsigned int mb_xy = y * p->
mb_stride + x;
425 s->last_pic.ptr ?
s->last_pic.ptr->f :
NULL,
426 y,
h,
s->picture_structure,
427 s->first_field,
s->low_delay);
438 s->mb_x =
s->mb_y = 0;
451 uint8_t *
dest,
const uint8_t *
src,
453 int src_x,
int src_y,
457 int motion_x,
int motion_y)
459 const int lowres =
s->avctx->lowres;
460 const int op_index =
lowres;
461 const int s_mask = (2 <<
lowres) - 1;
467 if (
s->quarter_sample) {
472 sx = motion_x & s_mask;
473 sy = motion_y & s_mask;
474 src_x += motion_x >>
lowres + 1;
475 src_y += motion_y >>
lowres + 1;
481 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer,
src,
482 s->linesize,
s->linesize,
483 w + 1, (
h + 1) << field_based,
484 src_x, src_y * (1 << field_based),
486 src =
s->sc.edge_emu_buffer;
506 uint8_t *
const *ref_picture,
508 int motion_x,
int motion_y,
511 const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
512 int mx,
my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
514 const int lowres =
s->avctx->lowres;
515 const int op_index =
lowres - 1 +
s->chroma_x_shift;
516 const int block_s = 8 >>
lowres;
517 const int s_mask = (2 <<
lowres) - 1;
520 int hc =
s->chroma_y_shift ? (
h+1-bottom_field)>>1 :
h;
524 linesize =
s->cur_pic.linesize[0] << field_based;
525 uvlinesize =
s->cur_pic.linesize[1] << field_based;
528 if (
s->quarter_sample) {
537 sx = motion_x & s_mask;
538 sy = motion_y & s_mask;
539 src_x =
s->mb_x * 2 * block_s + (motion_x >>
lowres + 1);
540 src_y = (
mb_y * 2 * block_s >> field_based) + (motion_y >>
lowres + 1);
543 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
544 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
545 uvsrc_x = src_x >> 1;
546 uvsrc_y = src_y >> 1;
551 uvsx = (2 *
mx) & s_mask;
552 uvsy = (2 *
my) & s_mask;
553 uvsrc_x =
s->mb_x * block_s + (
mx >>
lowres);
556 if (
s->chroma_y_shift) {
561 uvsrc_x =
s->mb_x * block_s + (
mx >>
lowres + 1);
562 uvsrc_y = (
mb_y * block_s >> field_based) + (
my >>
lowres + 1);
564 if (
s->chroma_x_shift) {
568 uvsy = motion_y & s_mask;
570 uvsrc_x =
s->mb_x*block_s + (
mx >> (
lowres+1));
573 uvsx = motion_x & s_mask;
574 uvsy = motion_y & s_mask;
581 ptr_y = ref_picture[0] + src_y *
linesize + src_x;
582 ptr_cb = ref_picture[1] + uvsrc_y *
uvlinesize + uvsrc_x;
583 ptr_cr = ref_picture[2] + uvsrc_y *
uvlinesize + uvsrc_x;
585 if ((
unsigned) src_x >
FFMAX(
h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
587 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr_y,
589 17, 17 + field_based,
590 src_x, src_y * (1 << field_based),
h_edge_pos,
592 ptr_y =
s->sc.edge_emu_buffer;
594 uint8_t *ubuf =
s->sc.edge_emu_buffer + 18 *
s->linesize;
595 uint8_t *vbuf =ubuf + 10 *
s->uvlinesize;
597 vbuf -=
s->uvlinesize;
598 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
601 uvsrc_x, uvsrc_y * (1 << field_based),
603 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
606 uvsrc_x, uvsrc_y * (1 << field_based),
615 dest_y +=
s->linesize;
616 dest_cb +=
s->uvlinesize;
617 dest_cr +=
s->uvlinesize;
621 ptr_y +=
s->linesize;
622 ptr_cb +=
s->uvlinesize;
623 ptr_cr +=
s->uvlinesize;
631 uvsx = (uvsx << 2) >>
lowres;
632 uvsy = (uvsy << 2) >>
lowres;
634 pix_op[op_index](dest_cb, ptr_cb,
uvlinesize, hc, uvsx, uvsy);
635 pix_op[op_index](dest_cr, ptr_cr,
uvlinesize, hc, uvsx, uvsy);
642 uint8_t *dest_cb, uint8_t *dest_cr,
643 uint8_t *
const *ref_picture,
647 const int lowres =
s->avctx->lowres;
648 const int op_index =
lowres;
649 const int block_s = 8 >>
lowres;
650 const int s_mask = (2 <<
lowres) - 1;
653 int emu = 0, src_x, src_y, sx, sy;
659 if (
s->quarter_sample) {
671 src_x =
s->mb_x * block_s + (
mx >>
lowres + 1);
672 src_y =
s->mb_y * block_s + (
my >>
lowres + 1);
674 offset = src_y *
s->uvlinesize + src_x;
675 ptr = ref_picture[1] +
offset;
678 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
679 s->uvlinesize,
s->uvlinesize,
682 ptr =
s->sc.edge_emu_buffer;
687 pix_op[op_index](dest_cb, ptr,
s->uvlinesize, block_s, sx, sy);
689 ptr = ref_picture[2] +
offset;
691 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
692 s->uvlinesize,
s->uvlinesize,
695 ptr =
s->sc.edge_emu_buffer;
697 pix_op[op_index](dest_cr, ptr,
s->uvlinesize, block_s, sx, sy);
712 uint8_t *dest_y, uint8_t *dest_cb,
714 int dir, uint8_t *
const *ref_picture,
719 const int lowres =
s->avctx->lowres;
720 const int block_s = 8 >>
lowres;
725 switch (
s->mv_type) {
730 s->mv[dir][0][0],
s->mv[dir][0][1],
736 for (
int i = 0;
i < 4;
i++) {
738 s->linesize) * block_s,
739 ref_picture[0], 0, 0,
740 (2 *
mb_x + (
i & 1)) * block_s,
741 (2 *
mb_y + (
i >> 1)) * block_s,
742 s->width,
s->height,
s->linesize,
744 block_s, block_s, pix_op,
745 s->mv[dir][
i][0],
s->mv[dir][
i][1]);
747 mx +=
s->mv[dir][
i][0];
748 my +=
s->mv[dir][
i][1];
759 1, 0,
s->field_select[dir][0],
761 s->mv[dir][0][0],
s->mv[dir][0][1],
765 1, 1,
s->field_select[dir][1],
767 s->mv[dir][1][0],
s->mv[dir][1][1],
770 if (
s->picture_structure !=
s->field_select[dir][0] + 1 &&
772 ref_picture =
s->cur_pic.ptr->f->data;
775 0, 0,
s->field_select[dir][0],
778 s->mv[dir][0][1], 2 * block_s,
mb_y >> 1);
782 for (
int i = 0;
i < 2;
i++) {
783 uint8_t *
const *ref2picture;
785 if (
s->picture_structure ==
s->field_select[dir][
i] + 1 ||
787 ref2picture = ref_picture;
789 ref2picture =
s->cur_pic.ptr->f->data;
793 0, 0,
s->field_select[dir][
i],
795 s->mv[dir][
i][0],
s->mv[dir][
i][1] +
796 2 * block_s *
i, block_s,
mb_y >> 1);
798 dest_y += 2 * block_s *
s->linesize;
799 dest_cb += (2 * block_s >>
s->chroma_y_shift) *
s->uvlinesize;
800 dest_cr += (2 * block_s >>
s->chroma_y_shift) *
s->uvlinesize;
805 for (
int i = 0;
i < 2;
i++) {
806 for (
int j = 0; j < 2; j++) {
810 s->mv[dir][2 *
i + j][0],
811 s->mv[dir][2 *
i + j][1],
814 pix_op =
s->h264chroma.avg_h264_chroma_pixels_tab;
817 for (
int i = 0;
i < 2;
i++) {
819 0, 0,
s->picture_structure !=
i + 1,
821 s->mv[dir][2 *
i][0],
s->mv[dir][2 *
i][1],
822 2 * block_s,
mb_y >> 1);
825 pix_op =
s->h264chroma.avg_h264_chroma_pixels_tab;
829 if (!
s->first_field) {
830 ref_picture =
s->cur_pic.ptr->f->data;
845 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !
s->quarter_sample;
851 switch (
s->mv_type) {
865 for (
int i = 0;
i < mvs;
i++) {
866 int my =
s->mv[dir][
i][1];
871 off = ((
FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
873 return av_clip(
s->mb_y + off, 0,
s->mb_height - 1);
875 return s->mb_height - 1;
880 int16_t *
block,
int i, uint8_t *
dest,
int line_size)
882 if (
s->block_last_index[
i] >= 0) {
898 if (
s->block_last_index[
i] >= 0) {
905 #define NOT_MPEG12_H261 0
906 #define MAY_BE_MPEG12_H261 1
907 #define DEFINITELY_MPEG12_H261 2
920 int lowres_flag,
int is_mpeg12)
922 #define IS_MPEG12_H261(s) (is_mpeg12 == MAY_BE_MPEG12_H261 ? ((s)->out_format <= FMT_H261) : is_mpeg12)
923 uint8_t *dest_y =
s->dest[0], *dest_cb =
s->dest[1], *dest_cr =
s->dest[2];
925 const int linesize =
s->cur_pic.linesize[0];
927 const int block_size = lowres_flag ? 8 >>
s->avctx->lowres : 8;
929 dct_linesize =
linesize <<
s->interlaced_dct;
951 op_pix =
s->h264chroma.avg_h264_chroma_pixels_tab;
961 op_pix =
s->hdsp.put_pixels_tab;
962 op_qpix =
s->qdsp.put_qpel_pixels_tab;
964 op_pix =
s->hdsp.put_no_rnd_pixels_tab;
965 op_qpix =
s->qdsp.put_no_rnd_qpel_pixels_tab;
968 ff_mpv_motion(
s, dest_y, dest_cb, dest_cr, 0,
s->last_pic.data, op_pix, op_qpix);
969 op_pix =
s->hdsp.avg_pixels_tab;
970 op_qpix =
s->qdsp.avg_qpel_pixels_tab;
973 ff_mpv_motion(
s, dest_y, dest_cb, dest_cr, 1,
s->next_pic.data, op_pix, op_qpix);
978 if (
s->avctx->skip_idct) {
1005 if (
s->chroma_y_shift) {
1017 if (!
s->chroma_x_shift) {
1018 add_dct(
s,
block[8], 8, dest_cb + block_size, dct_linesize);
1019 add_dct(
s,
block[9], 9, dest_cr + block_size, dct_linesize);
1025 }
else if (CONFIG_WMV2_DECODER) {
1033 s->avctx->bits_per_raw_sample > 8) {
1039 put_dct(
s,
block[1], 1, dest_y + block_size, dct_linesize,
s->qscale);
1044 if (
s->chroma_y_shift) {
1050 put_dct(
s,
block[4], 4, dest_cb, dct_linesize,
s->chroma_qscale);
1051 put_dct(
s,
block[5], 5, dest_cr, dct_linesize,
s->chroma_qscale);
1057 s->idsp.idct_put(dest_y, dct_linesize,
block[0]);
1058 s->idsp.idct_put(dest_y + block_size, dct_linesize,
block[1]);
1060 s->idsp.idct_put(dest_y +
dct_offset + block_size, dct_linesize,
block[3]);
1063 if (
s->chroma_y_shift) {
1070 s->idsp.idct_put(dest_cb, dct_linesize,
block[4]);
1071 s->idsp.idct_put(dest_cr, dct_linesize,
block[5]);
1074 if (!
s->chroma_x_shift) {
1075 s->idsp.idct_put(dest_cb + block_size, dct_linesize,
block[8]);
1076 s->idsp.idct_put(dest_cr + block_size, dct_linesize,
block[9]);
1077 s->idsp.idct_put(dest_cb + block_size +
dct_offset, dct_linesize,
block[10]);
1078 s->idsp.idct_put(dest_cr + block_size +
dct_offset, dct_linesize,
block[11]);
1088 const int mb_xy =
s->mb_y *
s->mb_stride +
s->mb_x;
1089 uint8_t *mbskip_ptr = &
s->mbskip_table[mb_xy];
1091 s->cur_pic.qscale_table[mb_xy] =
s->qscale;
1094 if (
s->mb_skipped) {
1098 }
else if (!
s->cur_pic.reference) {
1107 for (
int i = 0;
i < 6;
i++) {
1108 for (
int j = 0; j < 64; j++) {
1110 block[
i][
s->idsp.idct_permutation[j]]);
1117 if (!
s->avctx->lowres) {
static int ff_h263_round_chroma(int x)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
#define MV_TYPE_16X16
1 vector for the whole mb
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
int ff_thread_can_start_frame(AVCodecContext *avctx)
uint16_t(* dct_offset)[64]
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
This structure describes decoded (raw) audio or video data.
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
void ff_mpv_report_decode_progress(MpegEncContext *s)
unsigned int ff_toupper4(unsigned int x)
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
struct AVCodecContext * avctx
#define MAY_BE_MPEG12_H261
int height
picture size. must be a multiple of 16
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
@ AV_VIDEO_ENC_PARAMS_MPEG2
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
uint32_t * mb_type
types and macros are defined in mpegutils.h
const struct AVCodec * codec
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
ptrdiff_t linesize
line size, in bytes, may be different from width
int16_t(*[2] motion_val)[2]
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Video encoding parameters for a given frame.
int dummy
Picture is a dummy and should not be output.
static int16_t mult(Float11 *f1, Float11 *f2)
void ff_mpv_common_end(MpegEncContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
int ff_mpv_common_frame_size_change(MpegEncContext *s)
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
#define AV_CEIL_RSHIFT(a, b)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
MPVWorkPicture cur_pic
copy of the current picture structure.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
void ff_mpv_unref_picture(MPVWorkPicture *pic)
int low_delay
no reordering needed / has no B-frames
@ AVDISCARD_ALL
discard all
struct AVRefStructPool * picture_pool
Pool for MPVPictures.
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
MPVWorkPicture next_pic
copy of the next picture structure.
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
#define DEFINITELY_MPEG12_H261
@ AV_PICTURE_TYPE_I
Intra.
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
#define IS_MPEG12_H261(s)
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define FF_MPV_QSCALE_TYPE_MPEG1
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
uint16_t pb_field_time
like above, just for interlaced
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
#define FF_DEBUG_DCT_COEFF
@ AVDISCARD_NONKEY
discard all frames except keyframes
int mb_stride
mb_stride of the tables
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int quarter_sample
1->qpel, 0->half pel ME/MC
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
void ff_mpeg_flush(AVCodecContext *avctx)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
@ FF_THREAD_NO_FRAME_THREADING
static void color_frame(AVFrame *frame, int luma)
int mb_width
mb_width of the tables
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
#define FF_THREAD_FRAME
Decode more than one frame at once.
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
#define i(width, name, range_min, range_max)
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Data structure for storing block-level encoding information.
MPVWorkPicture last_pic
copy of the previous picture structure.
int mb_height
mb_height of the tables
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
main external API structure.
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
void ff_mpv_frame_end(MpegEncContext *s)
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
int coded_width
Bitstream width / height, may be different from width/height e.g.
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
int width
picture width / height.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
The exact code depends on how similar the blocks are and how related they are to the block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
@ AVDISCARD_NONREF
discard all non reference
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
int ff_mpv_decode_close(AVCodecContext *avctx)