58 for (i = e - 1; i >= 0; i--)
61 e = -(is_signed &&
get_rac(c, state + 11 +
FFMIN(e, 10)));
78 while (i < state->error_sum) {
84 ff_dlog(
NULL,
"v:%d bias:%d error:%d drift:%d count:%d k:%d",
87 v ^= ((2 * state->
drift + state->
count) >> 31);
110 #define RENAME(name) name
116 #define RENAME(name) name ## 32
120 int w,
int h,
int stride,
int plane_index,
132 for (y = 0; y <
h; y++) {
133 int16_t *
temp = sample[0];
135 sample[0] = sample[1];
138 sample[1][-1] = sample[0][0];
139 sample[0][
w] = sample[0][w - 1];
143 int ret = decode_line(s, w, sample, plane_index, 8);
146 for (x = 0; x <
w; x++)
147 src[x*pixel_stride + stride * y] = sample[1][x];
153 for (x = 0; x <
w; x++) {
154 ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x];
157 for (x = 0; x <
w; x++) {
172 memset(state, 128,
sizeof(state));
213 }
else if (ps == 2) {
216 }
else if (ps == 3) {
276 memcpy(pdst, psrc,
sizeof(*pdst));
322 if (f->colorspace == 0 && (f->chroma_planes || !fs->transparency)) {
325 const int cx = x >> f->chroma_h_shift;
326 const int cy = y >> f->chroma_v_shift;
329 if (f->chroma_planes) {
330 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1);
331 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1);
333 if (fs->transparency)
334 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3],
width,
height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2, 1);
335 }
else if (f->colorspace == 0) {
338 }
else if (f->use32bit) {
339 uint8_t *
planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
340 p->data[1] + ps * x + y * p->linesize[1],
341 p->data[2] + ps * x + y * p->linesize[2],
342 p->data[3] + ps * x + y * p->linesize[3] };
343 decode_rgb_frame32(fs, planes,
width,
height, p->linesize);
345 uint8_t *
planes[4] = { p->data[0] + ps * x + y * p->linesize[0],
346 p->data[1] + ps * x + y * p->linesize[1],
347 p->data[2] + ps * x + y * p->linesize[2],
348 p->data[3] + ps * x + y * p->linesize[3] };
349 decode_rgb_frame(fs, planes,
width,
height, p->linesize);
354 v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
357 fs->slice_damaged = 1;
374 memset(state, 128,
sizeof(state));
376 for (v = 0; i < 128; v++) {
379 if (len > 128 - i || !len)
383 quant_table[i] = scale * v;
388 for (i = 1; i < 128; i++)
389 quant_table[256 - i] = -quant_table[i];
390 quant_table[128] = -quant_table[127];
401 for (i = 0; i < 5; i++) {
405 context_count *= ret;
406 if (context_count > 32768
U) {
410 return (context_count + 1) / 2;
421 memset(state2, 128,
sizeof(state2));
422 memset(state, 128,
sizeof(state));
441 for (i = 1; i < 256; i++)
514 "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
536 memset(state, 128,
sizeof(state));
549 for (i = 1; i < 256; i++)
555 chroma_planes =
get_rac(c, state);
558 transparency =
get_rac(c, state);
574 if (chroma_h_shift > 4
U || chroma_v_shift > 4
U) {
576 chroma_h_shift, chroma_v_shift);
692 "chroma subsampling not supported in this colorspace\n");
732 if (context_count < 0) {
744 int trailer = 3 + 5*!!f->
ec;
788 "quant_table_index out of range\n");
834 int buf_size = avpkt->
size;
869 "Cannot decode non-keyframe without valid keyframe\n");
884 buf_p = buf + buf_size;
887 int trailer = 3 + 5*!!f->
ec;
1016 fsdst->
ac = fsrc->
ac;
1019 fsdst->
ec = fsrc->
ec;
1052 memcpy(fdst, fsrc,
sizeof(*fdst));
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static av_always_inline int fold(int diff, int bits)
#define AV_PIX_FMT_YUVA422P16
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
8 bits gray, 8 bits alpha
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
static int init_thread_copy(AVCodecContext *avctx)
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
static av_cold int init(AVCodecContext *avctx)
static int decode_slice(AVCodecContext *c, void *arg)
#define MAX_CONTEXT_INPUTS
#define AV_PIX_FMT_GBRP10
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV420P12
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
FF Video Codec 1 (a lossless codec)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
enum AVPictureType last_picture
#define AV_PIX_FMT_GRAY10
static int is_input_end(FFV1Context *s)
#define AV_PIX_FMT_GRAY12
#define av_assert0(cond)
assert() equivalent, that is always enabled.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static int get_rac(RangeCoder *c, uint8_t *const state)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define FF_DEBUG_PICT_INFO
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define AV_PIX_FMT_YUVA420P9
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
uint8_t(*[MAX_QUANT_TABLES] initial_states)[32]
Public header for CRC hash function implementation.
av_cold int ff_ffv1_close(AVCodecContext *avctx)
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
static double av_q2d(AVRational a)
Convert an AVRational to a double.
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
bitstream reader API header.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
#define AV_PIX_FMT_YUV422P12
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
#define AV_PIX_FMT_YUVA420P16
high precision timer, useful to profile code
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static int get_bits_left(GetBitContext *gb)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256]
int skip_alpha
Skip processing alpha if supported by codec.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_GBRAP12
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
int ff_ffv1_allocate_initial_states(FFV1Context *f)
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
#define AV_PIX_FMT_GBRAP16
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int ac
1=range coder <-> 0=golomb rice
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
#define AC_RANGE_CUSTOM_TAB
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
#define AV_PIX_FMT_YUV422P9
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
uint8_t state_transition[256]
uint8_t nb_components
The number of components each pixel has, (1-4)
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride)
static float quant_table[96]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void update_vlc_state(VlcState *const state, const int v)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
av_cold int ff_ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P10
int ac_byte_count
number of bytes used for AC coding
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
static int read_header(FFV1Context *f)
static const float pred[4]
#define AV_PIX_FMT_YUV420P16
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define AV_PIX_FMT_YUV420P14
int context_count[MAX_QUANT_TABLES]
Libavcodec external API header.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Rational number (pair of numerator and denominator).
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
int allocate_progress
Whether to allocate progress for frame threading.
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
void ff_ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P9
uint8_t(* state)[CONTEXT_SIZE]
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static const struct @272 planes[]
uint8_t * bytestream_start
static av_cold int decode_init(AVCodecContext *avctx)
PlaneContext plane[MAX_PLANES]
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
struct FFV1Context * fsrc
int top_field_first
If the content is interlaced, is top field displayed first.
struct AVCodecInternal * internal
Private context used for internal data.
int key_frame
1 -> keyframe, 0-> not
struct FFV1Context * slice_context[MAX_SLICES]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVFieldOrder field_order
Field order.
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
int depth
Number of bits in the component.
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
This structure stores compressed data.
static int read_extra_header(FFV1Context *f)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_YUV422P16
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
int step
Number of elements between 2 horizontally consecutive pixels.
#define AV_PIX_FMT_0RGB32
#define AV_CEIL_RSHIFT(a, b)