Go to the documentation of this file.
25 #include "config_components.h"
91 if (!
s->context_initialized) {
94 memcpy(
s, s1,
sizeof(*
s));
96 s->context_initialized = 0;
97 s->context_reinit = 0;
100 s->bitstream_buffer =
NULL;
101 s->bitstream_buffer_size =
s->allocated_bitstream_buffer_size = 0;
109 if (
s->height != s1->
height ||
s->width != s1->
width ||
s->context_reinit) {
145 &
s->allocated_bitstream_buffer_size,
147 if (!
s->bitstream_buffer) {
148 s->bitstream_buffer_size = 0;
176 if (!
s->context_initialized)
185 if ((
s->width ||
s->height) &&
199 memset(
s->thread_context, 0,
sizeof(
s->thread_context));
200 s->thread_context[0] =
s;
202 if (
s->width &&
s->height) {
207 s->context_reinit = 0;
212 s->context_reinit = 1;
253 av_assert1(
s->mb_width ==
s->buffer_pools.alloc_mb_width);
254 av_assert1(
s->mb_height ==
s->buffer_pools.alloc_mb_height ||
255 FFALIGN(
s->mb_height, 2) ==
s->buffer_pools.alloc_mb_height);
256 av_assert1(
s->mb_stride ==
s->buffer_pools.alloc_mb_stride);
258 &
s->buffer_pools,
s->mb_height);
285 int h_chroma_shift, v_chroma_shift;
287 for (
int i = 0;
i <
frame->height;
i++)
306 av_assert1(!
s->last_pic.ptr ||
s->last_pic.ptr->f->buf[0]);
307 av_assert1(!
s->next_pic.ptr ||
s->next_pic.ptr->f->buf[0]);
311 "allocating dummy last picture for B frame\n");
315 "warning: first frame is no keyframe\n");
335 s->last_pic.ptr->f->buf[0]));
363 (!
s->progressive_frame && !
s->progressive_sequence);
364 s->cur_pic.ptr->field_picture =
s->picture_structure !=
PICT_FRAME;
366 s->cur_pic.ptr->f->pict_type =
s->pict_type;
377 ff_dlog(
s->avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
378 (
void*)
s->last_pic.ptr, (
void*)
s->next_pic.ptr, (
void*)
s->cur_pic.ptr,
379 s->last_pic.ptr ?
s->last_pic.ptr->f->data[0] :
NULL,
380 s->next_pic.ptr ?
s->next_pic.ptr->f->data[0] :
NULL,
381 s->cur_pic.ptr ?
s->cur_pic.ptr->f->data[0] :
NULL,
382 s->pict_type,
s->droppable);
399 if (
s->cur_pic.reference)
407 s->mb_width,
s->mb_height,
s->mb_stride,
s->quarter_sample);
424 for (
unsigned y = 0; y < p->
mb_height; y++)
425 for (
unsigned x = 0; x < p->
mb_width; x++) {
426 const unsigned int block_idx = y * p->
mb_width + x;
427 const unsigned int mb_xy = y * p->
mb_stride + x;
444 s->last_pic.ptr ?
s->last_pic.ptr->f :
NULL,
445 y,
h,
s->picture_structure,
446 s->first_field,
s->low_delay);
457 s->mb_x =
s->mb_y = 0;
459 s->bitstream_buffer_size = 0;
471 uint8_t *
dest,
const uint8_t *
src,
473 int src_x,
int src_y,
477 int motion_x,
int motion_y)
479 const int lowres =
s->avctx->lowres;
480 const int op_index =
lowres;
481 const int s_mask = (2 <<
lowres) - 1;
487 if (
s->quarter_sample) {
492 sx = motion_x & s_mask;
493 sy = motion_y & s_mask;
494 src_x += motion_x >>
lowres + 1;
495 src_y += motion_y >>
lowres + 1;
501 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer,
src,
502 s->linesize,
s->linesize,
503 w + 1, (
h + 1) << field_based,
504 src_x, src_y * (1 << field_based),
506 src =
s->sc.edge_emu_buffer;
526 uint8_t *
const *ref_picture,
528 int motion_x,
int motion_y,
531 const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
532 int mx,
my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
534 const int lowres =
s->avctx->lowres;
535 const int op_index =
lowres - 1 +
s->chroma_x_shift;
536 const int block_s = 8 >>
lowres;
537 const int s_mask = (2 <<
lowres) - 1;
540 int hc =
s->chroma_y_shift ? (
h+1-bottom_field)>>1 :
h;
544 linesize =
s->cur_pic.linesize[0] << field_based;
545 uvlinesize =
s->cur_pic.linesize[1] << field_based;
548 if (
s->quarter_sample) {
557 sx = motion_x & s_mask;
558 sy = motion_y & s_mask;
559 src_x =
s->mb_x * 2 * block_s + (motion_x >>
lowres + 1);
560 src_y = (
mb_y * 2 * block_s >> field_based) + (motion_y >>
lowres + 1);
563 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
564 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
565 uvsrc_x = src_x >> 1;
566 uvsrc_y = src_y >> 1;
571 uvsx = (2 *
mx) & s_mask;
572 uvsy = (2 *
my) & s_mask;
573 uvsrc_x =
s->mb_x * block_s + (
mx >>
lowres);
576 if (
s->chroma_y_shift) {
581 uvsrc_x =
s->mb_x * block_s + (
mx >>
lowres + 1);
582 uvsrc_y = (
mb_y * block_s >> field_based) + (
my >>
lowres + 1);
584 if (
s->chroma_x_shift) {
588 uvsy = motion_y & s_mask;
590 uvsrc_x =
s->mb_x*block_s + (
mx >> (
lowres+1));
593 uvsx = motion_x & s_mask;
594 uvsy = motion_y & s_mask;
601 ptr_y = ref_picture[0] + src_y *
linesize + src_x;
602 ptr_cb = ref_picture[1] + uvsrc_y *
uvlinesize + uvsrc_x;
603 ptr_cr = ref_picture[2] + uvsrc_y *
uvlinesize + uvsrc_x;
605 if ((
unsigned) src_x >
FFMAX(
h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
607 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr_y,
609 17, 17 + field_based,
610 src_x, src_y * (1 << field_based),
h_edge_pos,
612 ptr_y =
s->sc.edge_emu_buffer;
614 uint8_t *ubuf =
s->sc.edge_emu_buffer + 18 *
s->linesize;
615 uint8_t *vbuf =ubuf + 10 *
s->uvlinesize;
617 vbuf -=
s->uvlinesize;
618 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
621 uvsrc_x, uvsrc_y * (1 << field_based),
623 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
626 uvsrc_x, uvsrc_y * (1 << field_based),
635 dest_y +=
s->linesize;
636 dest_cb +=
s->uvlinesize;
637 dest_cr +=
s->uvlinesize;
641 ptr_y +=
s->linesize;
642 ptr_cb +=
s->uvlinesize;
643 ptr_cr +=
s->uvlinesize;
651 uvsx = (uvsx << 2) >>
lowres;
652 uvsy = (uvsy << 2) >>
lowres;
654 pix_op[op_index](dest_cb, ptr_cb,
uvlinesize, hc, uvsx, uvsy);
655 pix_op[op_index](dest_cr, ptr_cr,
uvlinesize, hc, uvsx, uvsy);
662 uint8_t *dest_cb, uint8_t *dest_cr,
663 uint8_t *
const *ref_picture,
667 const int lowres =
s->avctx->lowres;
668 const int op_index =
lowres;
669 const int block_s = 8 >>
lowres;
670 const int s_mask = (2 <<
lowres) - 1;
673 int emu = 0, src_x, src_y, sx, sy;
679 if (
s->quarter_sample) {
691 src_x =
s->mb_x * block_s + (
mx >>
lowres + 1);
692 src_y =
s->mb_y * block_s + (
my >>
lowres + 1);
694 offset = src_y *
s->uvlinesize + src_x;
695 ptr = ref_picture[1] +
offset;
698 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
699 s->uvlinesize,
s->uvlinesize,
702 ptr =
s->sc.edge_emu_buffer;
707 pix_op[op_index](dest_cb, ptr,
s->uvlinesize, block_s, sx, sy);
709 ptr = ref_picture[2] +
offset;
711 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
712 s->uvlinesize,
s->uvlinesize,
715 ptr =
s->sc.edge_emu_buffer;
717 pix_op[op_index](dest_cr, ptr,
s->uvlinesize, block_s, sx, sy);
732 uint8_t *dest_y, uint8_t *dest_cb,
734 int dir, uint8_t *
const *ref_picture,
739 const int lowres =
s->avctx->lowres;
740 const int block_s = 8 >>
lowres;
745 switch (
s->mv_type) {
750 s->mv[dir][0][0],
s->mv[dir][0][1],
756 for (
int i = 0;
i < 4;
i++) {
758 s->linesize) * block_s,
759 ref_picture[0], 0, 0,
760 (2 *
mb_x + (
i & 1)) * block_s,
761 (2 *
mb_y + (
i >> 1)) * block_s,
762 s->width,
s->height,
s->linesize,
764 block_s, block_s, pix_op,
765 s->mv[dir][
i][0],
s->mv[dir][
i][1]);
767 mx +=
s->mv[dir][
i][0];
768 my +=
s->mv[dir][
i][1];
779 1, 0,
s->field_select[dir][0],
781 s->mv[dir][0][0],
s->mv[dir][0][1],
785 1, 1,
s->field_select[dir][1],
787 s->mv[dir][1][0],
s->mv[dir][1][1],
790 if (
s->picture_structure !=
s->field_select[dir][0] + 1 &&
792 ref_picture =
s->cur_pic.ptr->f->data;
795 0, 0,
s->field_select[dir][0],
798 s->mv[dir][0][1], 2 * block_s,
mb_y >> 1);
802 for (
int i = 0;
i < 2;
i++) {
803 uint8_t *
const *ref2picture;
805 if (
s->picture_structure ==
s->field_select[dir][
i] + 1 ||
807 ref2picture = ref_picture;
809 ref2picture =
s->cur_pic.ptr->f->data;
813 0, 0,
s->field_select[dir][
i],
815 s->mv[dir][
i][0],
s->mv[dir][
i][1] +
816 2 * block_s *
i, block_s,
mb_y >> 1);
818 dest_y += 2 * block_s *
s->linesize;
819 dest_cb += (2 * block_s >>
s->chroma_y_shift) *
s->uvlinesize;
820 dest_cr += (2 * block_s >>
s->chroma_y_shift) *
s->uvlinesize;
825 for (
int i = 0;
i < 2;
i++) {
826 for (
int j = 0; j < 2; j++) {
830 s->mv[dir][2 *
i + j][0],
831 s->mv[dir][2 *
i + j][1],
834 pix_op =
s->h264chroma.avg_h264_chroma_pixels_tab;
837 for (
int i = 0;
i < 2;
i++) {
839 0, 0,
s->picture_structure !=
i + 1,
841 s->mv[dir][2 *
i][0],
s->mv[dir][2 *
i][1],
842 2 * block_s,
mb_y >> 1);
845 pix_op =
s->h264chroma.avg_h264_chroma_pixels_tab;
849 if (!
s->first_field) {
850 ref_picture =
s->cur_pic.ptr->f->data;
865 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !
s->quarter_sample;
871 switch (
s->mv_type) {
885 for (
int i = 0;
i < mvs;
i++) {
886 int my =
s->mv[dir][
i][1];
891 off = ((
FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
893 return av_clip(
s->mb_y + off, 0,
s->mb_height - 1);
895 return s->mb_height - 1;
900 int16_t *
block,
int i, uint8_t *
dest,
int line_size)
902 if (
s->block_last_index[
i] >= 0) {
915 for (
int i = 0;
i < 6;
i++) {
916 for (
int j = 0; j < 64; j++) {
918 block[
i][
s->idsp.idct_permutation[j]]);
925 if (!
s->avctx->lowres) {
static int ff_h263_round_chroma(int x)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
#define MV_TYPE_16X16
1 vector for the whole mb
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
int max_b_frames
max number of B-frames for encoding
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
int ff_thread_can_start_frame(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
void ff_mpv_report_decode_progress(MpegEncContext *s)
unsigned int ff_toupper4(unsigned int x)
struct AVCodecContext * avctx
int height
picture size. must be a multiple of 16
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
@ AV_VIDEO_ENC_PARAMS_MPEG2
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
uint32_t * mb_type
types and macros are defined in mpegutils.h
const struct AVCodec * codec
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
ptrdiff_t linesize
line size, in bytes, may be different from width
int16_t(*[2] motion_val)[2]
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Video encoding parameters for a given frame.
int dummy
Picture is a dummy and should not be output.
static int16_t mult(Float11 *f1, Float11 *f2)
void ff_mpv_common_end(MpegEncContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
int ff_mpv_common_frame_size_change(MpegEncContext *s)
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
#define AV_CEIL_RSHIFT(a, b)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
MPVWorkPicture cur_pic
copy of the current picture structure.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
int bitstream_buffer_size
void ff_mpv_unref_picture(MPVWorkPicture *pic)
int low_delay
no reordering needed / has no B-frames
struct AVRefStructPool * picture_pool
Pool for MPVPictures.
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
MPVWorkPicture next_pic
copy of the next picture structure.
@ AV_PICTURE_TYPE_I
Intra.
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
#define MAY_BE_MPEG12_H261
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
#define FF_MPV_QSCALE_TYPE_MPEG1
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
uint16_t pb_field_time
like above, just for interlaced
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
#define FF_DEBUG_DCT_COEFF
int mb_stride
mb_stride of the tables
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int quarter_sample
1->qpel, 0->half pel ME/MC
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
void ff_mpeg_flush(AVCodecContext *avctx)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
@ FF_THREAD_NO_FRAME_THREADING
static void color_frame(AVFrame *frame, int luma)
int mb_width
mb_width of the tables
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
#define DEFINITELY_MPEG12_H261
#define i(width, name, range_min, range_max)
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Data structure for storing block-level encoding information.
MPVWorkPicture last_pic
copy of the previous picture structure.
int mb_height
mb_height of the tables
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
main external API structure.
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
void ff_mpv_frame_end(MpegEncContext *s)
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
int coded_width
Bitstream width / height, may be different from width/height e.g.
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
int width
picture width / height.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
The exact code depends on how similar the blocks are and how related they are to the block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
uint8_t * bitstream_buffer
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
int ff_mpv_decode_close(AVCodecContext *avctx)