Go to the documentation of this file.
50 while ((
c->high >> 15) - (
c->low >> 15) < 2) {
51 if ((
c->low ^
c->high) & 0x10000) {
56 c->high = (uint16_t)
c->high << 8 | 0xFF;
57 c->value = (uint16_t)
c->value << 8 | bytestream2_get_byte(
c->gbc.gB);
58 c->low = (uint16_t)
c->low << 8;
69 int split = (n << 1) - range;
78 int low,
int high,
int n)
80 int split = (n << 1) - range;
87 c->high +=
c->low - 1;
97 int range =
c->high -
c->low + 1;
101 if (n <<
scale > range)
117 int range =
c->high -
c->low + 1, n = *probs;
121 if (n <<
scale > range)
127 while (probs[++
i] >
val) ;
139 int diff = (
c->high >> 16) - (
c->low >> 16);
143 while (!(
diff & 0x80)) {
148 return (
bits + bp + 7 >> 3) + ((
c->low >> 16) + 1 ==
c->high >> 16);
155 c->value = bytestream2_get_be24(gB);
158 c->get_model_sym = arith2_get_model_sym;
165 uint32_t *pal =
ctx->pal + 256 -
ctx->free_colours;
167 if (!
ctx->free_colours)
171 if (ncol >
ctx->free_colours || buf_size < 2 + ncol * 3)
173 for (
i = 0;
i < ncol;
i++)
180 int keyframe,
int w,
int h)
182 int last_symbol = 0, repeat = 0, prev_avail = 0;
185 int x, y, endx, endy, t;
187 #define READ_PAIR(a, b) \
188 a = bytestream2_get_byte(gB) << 4; \
189 t = bytestream2_get_byte(gB); \
191 b = (t & 0xF) << 8; \
192 b |= bytestream2_get_byte(gB); \
197 if (endx >=
w || endy >=
h || x > endx || y > endy)
210 int b = bytestream2_get_byte(gB);
212 last_symbol =
b << 8 | bytestream2_get_byte(gB);
216 if (repeat >= (INT_MAX >> 8) - 1) {
220 repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
222 if (last_symbol == -2) {
223 int skip =
FFMIN((
unsigned)repeat, dst +
w - p);
228 last_symbol = 127 -
b;
230 if (last_symbol >= 0)
232 else if (last_symbol == -1 && prev_avail)
234 }
while (++p < dst +
w);
243 uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal,
244 int keyframe,
int kf_slipt,
int slice,
int w,
int h)
246 uint8_t
bits[270] = { 0 };
250 int current_length = 0, read_codes = 0, next_code = 0, current_codes = 0;
251 int remaining_codes, surplus_codes,
i;
253 const int alphabet_size = 270 - keyframe;
255 int last_symbol = 0, repeat = 0, prev_avail = 0;
258 int x, y, clipw, cliph;
265 if (x + clipw >
w || y + cliph >
h)
267 pal_dst += pal_stride * y + x;
268 rgb_dst += rgb_stride * y + x * 3;
275 pal_dst += pal_stride * kf_slipt;
276 rgb_dst += rgb_stride * kf_slipt;
285 while (current_codes--) {
287 if (symbol >= 204 - keyframe)
288 symbol += 14 - keyframe;
289 else if (symbol > 189)
290 symbol =
get_bits1(gb) + (symbol << 1) - 190;
293 bits[symbol] = current_length;
294 codes[symbol] = next_code++;
299 remaining_codes = (1 << current_length) - next_code;
301 if (current_length > 22 || current_codes > remaining_codes)
303 }
while (current_codes != remaining_codes);
305 remaining_codes = alphabet_size - read_codes;
308 while ((surplus_codes = (2 << current_length) -
309 (next_code << 1) - remaining_codes) < 0) {
315 for (
i = 0;
i < alphabet_size;
i++)
317 if (surplus_codes-- == 0) {
321 bits[
i] = current_length;
322 codes[
i] = next_code++;
325 if (next_code != 1 << current_length)
328 if ((
i =
init_vlc(&vlc, 9, alphabet_size,
bits, 1, 1, codes, 4, 4, 0)) < 0)
333 uint8_t *pp = pal_dst;
334 uint8_t *rp = rgb_dst;
350 repeat += (1 <<
b) - 1;
352 if (last_symbol == -2) {
353 int skip =
FFMIN(repeat, pal_dst +
w - pp);
359 last_symbol = 267 -
b;
361 if (last_symbol >= 0) {
364 }
else if (last_symbol == -1 && prev_avail) {
365 *pp = *(pp - pal_stride);
366 memcpy(rp, rp - rgb_stride, 3);
369 }
while (++pp < pal_dst +
w);
370 pal_dst += pal_stride;
371 rgb_dst += rgb_stride;
380 int x,
int y,
int w,
int h,
int wmv9_mask)
417 s->end_mb_y = (
h + 15) >> 4;
421 s->end_mb_y =
s->end_mb_y + 1 >> 1;
425 if (v->
end_mb_x ==
s->mb_width &&
s->end_mb_y ==
s->mb_height) {
429 "disabling error correction due to block count mismatch %dx%d != %dx%d\n",
430 v->
end_mb_x,
s->end_mb_y,
s->mb_width,
s->mb_height);
435 f =
s->current_picture.f;
438 ctx->dsp.upsample_plane(
f->data[0],
f->linesize[0],
w,
h);
439 ctx->dsp.upsample_plane(
f->data[1],
f->linesize[1],
w+1 >> 1,
h+1 >> 1);
440 ctx->dsp.upsample_plane(
f->data[2],
f->linesize[2],
w+1 >> 1,
h+1 >> 1);
443 "Asymmetric WMV9 rectangle subsampling");
448 ctx->dsp.mss2_blit_wmv9_masked(
c->rgb_pic + y *
c->rgb_stride + x * 3,
449 c->rgb_stride, wmv9_mask,
450 c->pal_pic + y *
c->pal_stride + x,
452 f->data[0],
f->linesize[0],
453 f->data[1],
f->data[2],
f->linesize[1],
456 ctx->dsp.mss2_blit_wmv9(
c->rgb_pic + y *
c->rgb_stride + x * 3,
458 f->data[0],
f->linesize[0],
459 f->data[1],
f->data[2],
f->linesize[1],
471 #define MAX_WMV9_RECTANGLES 20
472 #define ARITH2_PADDING 2
477 const uint8_t *buf = avpkt->
data;
478 int buf_size = avpkt->
size;
485 int keyframe, has_wmv9, has_mv, is_rle, is_555,
ret;
488 int used_rects = 0,
i, implicit_rect = 0,
av_uninit(wmv9_mask);
499 if (
c->slice_split > 0)
500 ctx->split_position =
c->slice_split;
501 else if (
c->slice_split < 0) {
517 if (
c->slice_split && (
ctx->split_position < 1 - is_555 ||
518 ctx->split_position > avctx->
height - 1))
528 if (is_555 && (has_wmv9 || has_mv ||
c->slice_split &&
ctx->split_position))
539 implicit_rect = !arith2_get_bit(&acoder);
541 while (arith2_get_bit(&acoder)) {
544 r = &wmv9rects[used_rects];
549 wmv9rects[used_rects - 1].
x) +
550 wmv9rects[used_rects - 1].
x;
557 if (implicit_rect && used_rects) {
565 wmv9rects[0].
w = avctx->
width;
566 wmv9rects[0].
h = avctx->
height;
570 for (
i = 0;
i < used_rects;
i++) {
571 if (!implicit_rect && arith2_get_bit(&acoder)) {
576 wmv9_mask = arith2_get_bit(&acoder) - 1;
590 if (keyframe && !is_555) {
604 if (
c->mvX < 0 ||
c->mvY < 0) {
605 FFSWAP(uint8_t *,
c->pal_pic,
c->last_pal_pic);
610 if (
ctx->last_pic->data[0]) {
612 c->last_rgb_pic =
ctx->last_pic->data[0] +
613 ctx->last_pic->linesize[0] * (avctx->
height - 1);
624 c->last_rgb_pic =
NULL;
626 c->rgb_pic =
frame->data[0] +
628 c->rgb_stride = -
frame->linesize[0];
630 frame->key_frame = keyframe;
636 if (
decode_555(avctx, &gB, (uint16_t *)
c->rgb_pic,
c->rgb_stride >> 1,
652 c->rgb_pic,
c->rgb_stride,
c->pal, keyframe,
653 ctx->split_position, 0,
660 c->rgb_pic,
c->rgb_stride,
c->pal, keyframe,
661 ctx->split_position, 1,
668 }
else if (!implicit_rect || wmv9_mask != -1) {
673 c->keyframe = keyframe;
676 ctx->split_position))
681 if (
c->slice_split) {
696 memset(
c->pal_pic, 0,
c->pal_stride * avctx->
height);
700 for (
i = 0;
i < used_rects;
i++) {
701 int x = wmv9rects[
i].
x;
702 int y = wmv9rects[
i].
y;
703 int w = wmv9rects[
i].
w;
704 int h = wmv9rects[
i].
h;
706 int WMV9codedFrameSize;
707 if (buf_size < 4 || !(WMV9codedFrameSize =
AV_RL24(buf)))
710 x,
y,
w,
h, wmv9_mask))
712 buf += WMV9codedFrameSize + 3;
713 buf_size -= WMV9codedFrameSize + 3;
715 uint8_t *dst =
c->rgb_pic +
y *
c->rgb_stride +
x * 3;
716 if (wmv9_mask != -1) {
717 ctx->dsp.mss2_gray_fill_masked(dst,
c->rgb_stride,
719 c->pal_pic +
y *
c->pal_stride +
x,
724 memset(dst, 0x80,
w * 3);
725 dst +=
c->rgb_stride;
735 if (
c->mvX < 0 ||
c->mvY < 0) {
827 c->pal_stride =
c->mask_stride;
830 if (!
c->pal_pic || !
c->last_pal_pic || !
ctx->last_pic) {
static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int x, int y, int w, int h, int wmv9_mask)
#define AV_LOG_WARNING
Something somehow does not look correct.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int end_mb_x
Horizontal macroblock limit (used only by mss2)
int overlap
overlapped transforms in use
int max_b_frames
max number of B-frames for encoding
#define ARITH_GET_MODEL_SYM(prefix)
static int get_bits_count(const GetBitContext *s)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
void ff_vc1_init_transposed_scantables(VC1Context *v)
This structure describes decoded (raw) audio or video data.
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
struct AVCodecContext * avctx
static void arith2_init(ArithCoder *c, GetByteContext *gB)
static int arith2_get_number(ArithCoder *c, int n)
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
AVCodec p
The public AVCodec.
av_cold void ff_mss2dsp_init(MSS2DSPContext *dsp)
int multires
frame-level RESPIC syntax element present
static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride, int keyframe, int w, int h)
qpel_mc_func(* qpel_avg)[16]
static double val(void *priv, double ch)
static av_always_inline float scale(float x, float s)
static int arith2_get_scaled_value(int value, int n, int range)
static int arith2_get_consumed_bytes(ArithCoder *c)
int dquant
How qscale varies with MBs, 2 bits (not in Simple)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
qpel_mc_func put_qpel_pixels_tab[2][16]
#define FF_CODEC_DECODE_CB(func)
int res_sprite
Simple/Main Profile sequence header.
int res_fasttx
reserved, always 1
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
void ff_mpeg_er_frame_start(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AVDISCARD_ALL
discard all
av_cold void ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
void ff_vc1_decode_blocks(VC1Context *v)
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
int resync_marker
could this stream contain resync markers
#define MAX_WMV9_RECTANGLES
static int arith2_get_prob(ArithCoder *c, int16_t *probs)
static av_cold int mss2_decode_end(AVCodecContext *avctx)
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_tell(GetByteContext *g)
qpel_mc_func(* qpel_put)[16]
#define ARITH_GET_BIT(prefix)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
const uint8_t ff_wmv2_scantableB[64]
const FFCodec ff_mss2_decoder
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
const uint8_t ff_wmv2_scantableA[64]
static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
int res_transtab
reserved, always 0
uint8_t respic
Frame-level flag for resized images.
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
static char * split(char *message, char delim)
void ff_mpeg_flush(AVCodecContext *avctx)
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
int ff_vc1_decode_init_alloc_tables(VC1Context *v)
#define i(width, name, range_min, range_max)
int frmrtq_postproc
3 bits,
#define AV_PIX_FMT_RGB555
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static av_cold int mss2_decode_init(AVCodecContext *avctx)
int extended_mv
Ext MV in P/B (not in Simple)
av_cold int ff_mss12_decode_end(MSS12Context *c)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
const char * name
Name of the codec implementation.
static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride, uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal, int keyframe, int kf_slipt, int slice, int w, int h)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void ff_free_vlc(VLC *vlc)
int res_y411
reserved, old interlaced mode
static av_cold int wmv9_init(AVCodecContext *avctx)
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
static void arith2_normalise(ArithCoder *c)
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const uint8_t * align_get_bits(GetBitContext *s)
main external API structure.
int res_rtm_flag
reserved, set to 1
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
void ff_mpv_frame_end(MpegEncContext *s)
int vstransform
variable-size [48]x[48] transform type + info
static void arith2_rescale_interval(ArithCoder *c, int range, int low, int high, int n)
int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder, int x, int y, int width, int height)
@ AV_PICTURE_TYPE_P
Predicted.
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
#define avpriv_request_sample(...)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
void ff_er_frame_end(ERContext *s)
void ff_mss12_slicecontext_reset(SliceContext *sc)
int width
picture width / height.
int finterpflag
INTERPFRM present.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
qpel_mc_func avg_qpel_pixels_tab[2][16]
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int quantizer_mode
2 bits, quantizer mode used for sequence, see QUANT_*
av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
av_cold int ff_mss12_decode_init(MSS12Context *c, int version, SliceContext *sc1, SliceContext *sc2)
static int mss2_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
int bitrtq_postproc
5 bits, quantized framerate-based postprocessing strength