Go to the documentation of this file.
46 #define EA_PREAMBLE_SIZE 8
47 #define MADk_TAG MKTAG('M', 'A', 'D', 'k')
48 #define MADm_TAG MKTAG('M', 'A', 'D', 'm')
49 #define MADe_TAG MKTAG('M', 'A', 'D', 'e')
81 static inline void comp(
unsigned char *
dst, ptrdiff_t dst_stride,
82 unsigned char *
src, ptrdiff_t src_stride,
int add)
92 int j,
int mv_x,
int mv_y,
int add)
98 comp(
frame->data[0] + (mb_y*16 + ((j&2)<<2))*
frame->linesize[0] + mb_x*16 + ((j&1)<<3),
115 int mb_x,
int mb_y,
int j)
119 frame->data[0] + (mb_y*16 + ((j&2)<<2))*
frame->linesize[0] + mb_x*16 + ((j&1)<<3),
133 int16_t *quant_matrix =
s->quant_matrix;
149 }
else if (
level != 0) {
153 "ac-tex damaged at %d %d\n",
s->mb_x,
s->mb_y);
172 "ac-tex damaged at %d %d\n",
s->mb_x,
s->mb_y);
220 for (j=0; j<6; j++) {
221 if (mv_map & (1<<j)) {
223 if (
s->last_frame->data[0])
226 s->bdsp.clear_block(
s->block);
247 const uint8_t *buf = avpkt->
data;
248 int buf_size = avpkt->
size;
257 chunk_type = bytestream2_get_le32(&gb);
262 bytestream2_get_le16(&gb), 1000, 1<<30);
264 width = bytestream2_get_le16(&gb);
265 height = bytestream2_get_le16(&gb);
291 if (inter && !
s->last_frame->data[0]) {
296 memset(
s->last_frame->data[0], 0,
s->last_frame->height *
297 s->last_frame->linesize[0]);
298 memset(
s->last_frame->data[1], 0x80,
s->last_frame->height / 2 *
299 s->last_frame->linesize[1]);
300 memset(
s->last_frame->data[2], 0x80,
s->last_frame->height / 2 *
301 s->last_frame->linesize[2]);
306 if (!
s->bitstream_buf)
308 s->bbdsp.bswap16_buf(
s->bitstream_buf, (
const uint16_t *)(buf +
bytestream2_tell(&gb)),
313 for (
s->mb_y=0;
s->mb_y < (avctx->
height+15)/16;
s->mb_y++)
314 for (
s->mb_x=0;
s->mb_x < (avctx->
width +15)/16;
s->mb_x++)
#define AV_LOG_WARNING
Something somehow does not look correct.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
unsigned int bitstream_buf_size
static int BS_FUNC() decode210(BSCTX *bc)
Return decoded truncated unary code for the values 2, 1, 0.
static int decode_mb(MadContext *s, AVFrame *frame, int inter)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
#define UPDATE_CACHE(name, gb)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
AVCodec p
The public AVCodec.
int flags
AV_CODEC_FLAG_*.
void ff_ea_idct_put_c(uint8_t *dest, ptrdiff_t linesize, int16_t *block)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define CLOSE_READER(name, gb)
#define FF_CODEC_DECODE_CB(func)
av_cold void ff_blockdsp_init(BlockDSPContext *c)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define SHOW_SBITS(name, gb, num)
static int get_sbits(GetBitContext *s, int n)
#define SKIP_BITS(name, gb, num)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
static unsigned int get_bits1(GetBitContext *s)
const FFCodec ff_eamad_decoder
#define LAST_SKIP_BITS(name, gb, num)
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
static void calc_quant_matrix(MadContext *s, int qscale)
#define DECLARE_ALIGNED(n, t, v)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static av_cold int decode_end(AVCodecContext *avctx)
const uint16_t ff_mpeg1_default_intra_matrix[256]
#define OPEN_READER(name, gb)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define i(width, name, range_min, range_max)
static int decode_block_intra(MadContext *s, int16_t *block)
av_cold void ff_mpeg12_init_vlcs(void)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
const uint8_t ff_zigzag_direct[64]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int decode_init(AVCodecContext *avctx)
const uint16_t ff_inv_aanscales[64]
#define AV_INPUT_BUFFER_PADDING_SIZE
static void comp_block(MadContext *t, AVFrame *frame, int mb_x, int mb_y, int j, int mv_x, int mv_y, int add)
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
main external API structure.
#define SHOW_UBITS(name, gb, num)
static void idct_put(MadContext *t, AVFrame *frame, int16_t *block, int mb_x, int mb_y, int j)
This structure stores compressed data.
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
static int decode_motion(GetBitContext *gb)
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
uint16_t quant_matrix[64]