47 #define MAX_REFERENCE_FRAMES 8
49 #define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
51 #define MAX_BLOCKSIZE 32
56 #define DIRAC_REF_MASK_REF1 1
57 #define DIRAC_REF_MASK_REF2 2
58 #define DIRAC_REF_MASK_GLOBAL 4
64 #define DELAYED_PIC_REF 4
66 #define CALC_PADDING(size, depth) \
67 (((size + (1 << depth) - 1) >> depth) << depth)
69 #define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
103 typedef struct Plane {
234 return ((x+1)*21845 + 10922) >> 16;
240 int i, remove_idx = -1;
242 for (i = 0; framelist[i]; i++)
243 if (framelist[i]->avframe->display_picture_number == picnum) {
244 remove_pic = framelist[i];
249 for (i = remove_idx; framelist[i]; i++)
250 framelist[i] = framelist[i+1];
258 for (i = 0; i < maxframes; i++)
260 framelist[i] =
frame;
270 int i, w,
h, top_padding;
273 for (i = 0; i < 3; i++) {
343 for (j = 0; j < 3; j++)
344 for (k = 1; k < 4; k++)
351 for (i = 0; i < 3; i++) {
409 #define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
420 if (buf & 0x80000000) {
426 if (buf & 0xAA800000) {
452 coeff = (coeff * qfactor + qoffset) >> 2;
455 coeff = (coeff ^ sign) - sign;
461 #define UNPACK_ARITH(n, type) \
462 static inline void coeff_unpack_arith_##n(DiracArith *c, int qfactor, int qoffset, \
463 SubBand *b, type *buf, int x, int y) \
465 int coeff, sign, sign_pred = 0, pred_ctx = CTX_ZPZN_F1; \
466 const int mstride = -(b->stride >> (1+b->pshift)); \
468 const type *pbuf = (type *)b->parent->ibuf; \
469 const int stride = b->parent->stride >> (1+b->parent->pshift); \
470 pred_ctx += !!pbuf[stride * (y>>1) + (x>>1)] << 1; \
472 if (b->orientation == subband_hl) \
473 sign_pred = buf[mstride]; \
475 pred_ctx += !(buf[-1] | buf[mstride] | buf[-1 + mstride]); \
476 if (b->orientation == subband_lh) \
477 sign_pred = buf[-1]; \
479 pred_ctx += !buf[mstride]; \
481 coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA); \
483 coeff = (coeff * qfactor + qoffset) >> 2; \
484 sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred)); \
485 coeff = (coeff ^ -sign) + sign; \
499 int left,
int right,
int top,
int bottom,
500 int blockcnt_one,
int is_arith)
502 int x, y, zero_block;
503 int qoffset, qfactor;
517 if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
518 int quant = b->quant;
530 if (b->quant > 115) {
543 buf = b->ibuf + top * b->stride;
545 for (y = top; y < bottom; y++) {
546 for (x = left; x < right; x++) {
548 coeff_unpack_arith_10(c, qfactor, qoffset, b, (
int32_t*)(buf)+x, x, y);
550 coeff_unpack_arith_8(c, qfactor, qoffset, b, (int16_t*)(buf)+x, x, y);
556 for (y = top; y < bottom; y++) {
557 for (x = left; x < right; x++) {
574 #define INTRA_DC_PRED(n, type) \
575 static inline void intra_dc_prediction_##n(SubBand *b) \
577 type *buf = (type*)b->ibuf; \
580 for (x = 1; x < b->width; x++) \
581 buf[x] += buf[x-1]; \
582 buf += (b->stride >> (1+b->pshift)); \
584 for (y = 1; y < b->height; y++) { \
585 buf[0] += buf[-(b->stride >> (1+b->pshift))]; \
587 for (x = 1; x < b->width; x++) { \
588 int pred = buf[x - 1] + buf[x - (b->stride >> (1+b->pshift))] + buf[x - (b->stride >> (1+b->pshift))-1]; \
589 buf[x] += divide3(pred); \
591 buf += (b->stride >> (1+b->pshift)); \
604 int cb_x, cb_y, left, right, top, bottom;
607 int cb_width = s->codeblock[b->level + (b->orientation !=
subband_ll)].
width;
608 int cb_height = s->codeblock[b->level + (b->orientation !=
subband_ll)].
height;
609 int blockcnt_one = (cb_width + cb_height) == 2;
620 for (cb_y = 0; cb_y < cb_height; cb_y++) {
621 bottom = (b->height * (cb_y+1LL)) / cb_height;
623 for (cb_x = 0; cb_x < cb_width; cb_x++) {
624 right = (b->width * (cb_x+1LL)) / cb_width;
625 codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
631 if (b->orientation ==
subband_ll && s->num_refs == 0) {
633 intra_dc_prediction_10(b);
635 intra_dc_prediction_8(b);
664 int level, num_bands = 0;
668 for (orientation = !!level; orientation < 4; orientation++) {
670 bands[num_bands++] =
b;
693 #define PARSE_VALUES(type, x, gb, ebits, buf1, buf2) \
694 type *buf = (type *)buf1; \
695 buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
696 if (get_bits_count(gb) >= ebits) \
699 buf = (type *)buf2; \
700 buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
701 if (get_bits_count(gb) >= ebits) \
706 int slice_x,
int slice_y,
int bits_end,
710 int right = b1->
width *(slice_x+1) / s->
num_x;
714 int qfactor, qoffset;
732 for (y = top; y < bottom; y++) {
733 for (x = left; x < right; x++) {
742 for (y = top; y < bottom; y++) {
743 for (x = left; x < right; x++) {
781 for (orientation = !!level; orientation < 4; orientation++) {
790 chroma_bits = 8*slice->
bytes - 7 - length_bits - luma_bits;
794 for (orientation = !!level; orientation < 4; orientation++) {
821 for (orientation = !!level; orientation < 4; orientation++) {
828 for (i = 0; i < 3; i++) {
830 int bits_left = 8 *
length;
833 for (orientation = !!level; orientation < 4; orientation++) {
851 int slice_x, slice_y, bytes = 0, bufsize;
868 for (slice_y = 0; bufsize > 0 && slice_y < s->
num_y; slice_y++) {
869 for (slice_x = 0; bufsize > 0 && slice_x < s->
num_x; slice_x++) {
871 for (i = 0; i < 3; i++) {
872 if (bytes <= bufsize/8)
876 slices[slice_num].
bytes = bytes;
877 slices[slice_num].
slice_x = slice_x;
878 slices[slice_num].
slice_y = slice_y;
883 if (bufsize/8 >= bytes)
892 for (slice_y = 0; bufsize > 0 && slice_y < s->
num_y; slice_y++) {
893 for (slice_x = 0; bufsize > 0 && slice_x < s->
num_x; slice_x++) {
896 slices[slice_num].
bytes = bytes;
897 slices[slice_num].
slice_x = slice_x;
898 slices[slice_num].
slice_y = slice_y;
903 if (bufsize/8 >= bytes)
915 intra_dc_prediction_10(&s->
plane[0].
band[0][0]);
916 intra_dc_prediction_10(&s->
plane[1].
band[0][0]);
917 intra_dc_prediction_10(&s->
plane[2].
band[0][0]);
919 intra_dc_prediction_8(&s->
plane[0].
band[0][0]);
920 intra_dc_prediction_8(&s->
plane[1].
band[0][0]);
921 intra_dc_prediction_8(&s->
plane[2].
band[0][0]);
930 int i, w,
h,
level, orientation;
932 for (i = 0; i < 3; i++) {
944 for (orientation = !!level; orientation < 4; orientation++) {
984 static const uint8_t default_blen[] = { 4, 12, 16, 24 };
1018 "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
1049 for (ref = 0; ref < s->
num_refs; ref++) {
1107 #define CHECKEDREAD(dst, cond, errmsg) \
1108 tmp = svq3_get_ue_golomb(gb); \
1110 av_log(s->avctx, AV_LOG_ERROR, errmsg); \
1111 return AVERROR_INVALIDDATA; \
1173 for (i = 0; i < 4; i++) {
1186 static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
1195 return avgsplit[sbsplit[-1] + sbsplit[-
stride] + sbsplit[-stride-1]];
1205 return block[-1].
ref & refmask;
1210 pred = (block[-1].
ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].
ref & refmask);
1211 return (pred >> 1) & refmask;
1218 memset(block->
u.
dc, 0,
sizeof(block->
u.
dc));
1220 if (x && !(block[-1].ref & 3)) {
1221 for (i = 0; i < 3; i++)
1222 block->
u.
dc[i] += block[-1].
u.
dc[i];
1226 if (y && !(block[-stride].ref & 3)) {
1227 for (i = 0; i < 3; i++)
1228 block->
u.
dc[i] += block[-stride].
u.
dc[i];
1232 if (x && y && !(block[-1-stride].ref & 3)) {
1233 for (i = 0; i < 3; i++)
1234 block->
u.
dc[i] += block[-1-stride].
u.
dc[i];
1239 for (i = 0; i < 3; i++)
1240 block->
u.
dc[i] = (block->
u.
dc[i]+1)>>1;
1241 }
else if (n == 3) {
1242 for (i = 0; i < 3; i++)
1250 int refmask = ref+1;
1254 if (x && (block[-1].ref & mask) == refmask)
1255 pred[n++] = block[-1].
u.
mv[ref];
1257 if (y && (block[-stride].ref & mask) == refmask)
1260 if (x && y && (block[-stride-1].ref & mask) == refmask)
1261 pred[n++] = block[-stride-1].
u.
mv[ref];
1265 block->
u.
mv[ref][0] = 0;
1266 block->
u.
mv[ref][1] = 0;
1269 block->
u.
mv[ref][0] = pred[0][0];
1270 block->
u.
mv[ref][1] = pred[0][1];
1273 block->
u.
mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
1274 block->
u.
mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
1277 block->
u.
mv[ref][0] =
mid_pred(pred[0][0], pred[1][0], pred[2][0]);
1278 block->
u.
mv[ref][1] =
mid_pred(pred[0][1], pred[1][1], pred[2][1]);
1291 int m = (1<<ep) - (c[0]*x + c[1]*y);
1292 int mx = m * ((
A[0][0] * x +
A[0][1]*y) + (1<<ez) * b[0]);
1293 int my = m * ((
A[1][0] * x +
A[1][1]*y) + (1<<ez) * b[1]);
1295 block->
u.
mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
1296 block->
u.
mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
1300 int stride,
int x,
int y)
1314 for (i = 0; i < 3; i++)
1325 if (block->
ref & (i+1)) {
1329 pred_mv(block, stride, x, y, i);
1344 for (x = 1; x <
size; x++)
1347 for (y = 1; y <
size; y++) {
1349 for (x = 0; x <
size; x++)
1376 for (y = 0; y < s->
sbheight; y++) {
1377 for (x = 0; x < s->
sbwidth; x++) {
1388 for (i = 0; i < s->
num_refs; i++) {
1392 for (i = 0; i < 3; i++)
1396 for (x = 0; x < s->
sbwidth; x++) {
1400 for (q = 0; q < blkcnt; q++)
1401 for (p = 0; p < blkcnt; p++) {
1402 int bx = 4 * x + p*step;
1403 int by = 4 * y + q*step;
1415 #define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
1416 (1 + (6*(i) + offset - 1) / (2*offset - 1))
1420 else if (i > blen-1 - 2*offset)
1426 int left,
int right,
int wy)
1429 for (x = 0; left && x < p->
xblen >> 1; x++)
1430 obmc_weight[x] = wy*8;
1431 for (; x < p->
xblen >> right; x++)
1433 for (; x < p->
xblen; x++)
1434 obmc_weight[x] = wy*8;
1440 int left,
int right,
int top,
int bottom)
1443 for (y = 0; top && y < p->
yblen >> 1; y++) {
1447 for (; y < p->
yblen >> bottom; y++) {
1452 for (; y < p->
yblen; y++) {
1464 if (top || bottom || by == 1) {
1499 int x,
int y,
int ref,
int plane)
1503 int motion_x = block->
u.
mv[ref][0];
1504 int motion_y = block->
u.
mv[ref][1];
1505 int mx, my, i, epel, nplanes = 0;
1528 src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->
stride + x;
1532 for (i = 0; i < 4; i++)
1533 src[i] = ref_hpel[i] + y*p->
stride + x;
1557 src[!mx] = src[2 + !!mx];
1559 }
else if (!(my&3)) {
1560 src[0] = src[(my>>1) ];
1561 src[1] = src[(my>>1)+1];
1582 for (i = 0; i < nplanes; i++) {
1590 return (nplanes>>1) + epel;
1594 uint8_t *obmc_weight,
int xblen,
int yblen)
1599 for (y = 0; y < yblen; y++) {
1600 for (x = 0; x < xblen; x += 2) {
1601 dst[x ] += dc * obmc_weight[x ];
1602 dst[x+1] += dc * obmc_weight[x+1];
1610 uint16_t *mctmp,
uint8_t *obmc_weight,
1611 int plane,
int dstx,
int dsty)
1617 switch (block->
ref&3) {
1623 idx =
mc_subpel(s, block, src, dstx, dsty, (block->
ref&3)-1, plane);
1630 idx =
mc_subpel(s, block, src, dstx, dsty, 0, plane);
1632 idx =
mc_subpel(s, block, src, dstx, dsty, 1, plane);
1653 for (x = 1; x < s->
blwidth-1; x++) {
1695 for (i = 1; i < 4; i++) {
1707 ref->
hpel[plane][3], ref->
hpel[plane][0],
1725 int y, i,
comp, dsty;
1730 for (comp = 0; comp < 3; comp++) {
1740 for (comp = 0; comp < 3; comp++) {
1745 for (i = 0; i < 4; i++)
1759 for (y = 0; y < p->
height; y += 16) {
1772 for (i = 0; i < s->
num_refs; i++) {
1781 for (y = 0; y < s->
blheight; y++) {
1784 uint16_t *mctmp = s->
mctmp + y*rowheight;
1797 mc_row(s, blocks, mctmp, comp, dsty);
1818 int chroma_x_shift, chroma_y_shift;
1827 for (i = 0; f->
data[i]; i++) {
1844 unsigned retire, picnum;
1846 int64_t refdist, refnum;
1861 for (i = 0; i < s->
num_refs; i++) {
1863 refdist = INT64_MAX;
1896 if (retire != picnum) {
1959 #define DATA_UNIT_HEADER_SIZE 13
1975 parse_code = buf[4];
2028 if (sscanf(buf+14,
"Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
2029 if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
2032 }
else if (parse_code & 0x8) {
2050 tmp = parse_code & 0x03;
2056 s->
is_arith = (parse_code & 0x48) == 0x08;
2057 s->
low_delay = (parse_code & 0x88) == 0x88;
2062 pic->
reference = (parse_code & 0x0C) == 0x0C;
2103 int buf_size = pkt->
size;
2106 unsigned data_unit_size;
2127 if (buf[buf_idx ] ==
'B' && buf[buf_idx+1] ==
'B' &&
2128 buf[buf_idx+2] ==
'C' && buf[buf_idx+3] ==
'D')
2135 data_unit_size =
AV_RB32(buf+buf_idx+5);
2136 if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
2137 if(data_unit_size > buf_size - buf_idx)
2139 "Data unit with size %d is larger than input buffer, discarding\n",
2151 buf_idx += data_unit_size;
2175 if (delayed_frame) {
const uint8_t ff_interleaved_dirac_golomb_vlc_code[256]
#define CHECKEDREAD(dst, cond, errmsg)
void(* add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
#define UNPACK_ARITH(n, type)
#define BITS_AVAILABLE(name, gb)
const char const char void * val
const int32_t ff_dirac_qscale_tab[116]
struct DiracContext::@43 codeblock[MAX_DWT_LEVELS+1]
#define PARSE_VALUES(type, x, gb, ebits, buf1, buf2)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static av_cold int dirac_decode_end(AVCodecContext *avctx)
static void codeblock(DiracContext *s, SubBand *b, GetBitContext *gb, DiracArith *c, int left, int right, int top, int bottom, int blockcnt_one, int is_arith)
Decode the coeffs in the rectangle defined by left, right, top, bottom [DIRAC_STD] 13...
This structure describes decoded (raw) audio or video data.
dirac_weight_func weight_func
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
DiracFrame * ref_frames[MAX_REFERENCE_FRAMES+1]
static int divide3(int x)
static int dirac_decode_frame_internal(DiracContext *s)
Dirac Specification -> 13.0 Transform data syntax.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void skip_bits_long(GetBitContext *s, int n)
static av_cold int init(AVCodecContext *avctx)
static void propagate_block_data(DiracBlock *block, int stride, int size)
Copies the current block to the other blocks covered by the current superblock split mode...
enum AVColorRange color_range
MPEG vs JPEG YUV range.
dirac_weight_func weight_dirac_pixels_tab[3]
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
#define DATA_UNIT_HEADER_SIZE
Dirac Specification -> 9.6 Parse Info Header Syntax.
#define DECLARE_ALIGNED(n, t, v)
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_ctx)
static int alloc_buffers(DiracContext *s, int stride)
static int decode_hq_slice(AVCodecContext *avctx, void *arg)
VC-2 Specification -> 13.5.3 hq_slice(sx,sy)
static void dirac_decode_flush(AVCodecContext *avctx)
const uint8_t * coeff_data
static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
static int dirac_unpack_idwt_params(DiracContext *s)
Dirac Specification -> 11.3 Wavelet transform data.
#define DIRAC_REF_MASK_REF2
int av_dirac_parse_sequence_header(AVDiracSeqHeader **pdsh, const uint8_t *buf, size_t buf_size, void *log_ctx)
Parse a Dirac sequence header.
unsigned weight_log2denom
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
DiracFrame * delay_frames[MAX_DELAY+1]
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
void(* add_rect_clamped)(uint8_t *dst, const uint16_t *src, int stride, const int16_t *idwt, int idwt_stride, int width, int height)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Interface to Dirac Decoder/Encoder.
static int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
static av_cold int dirac_decode_init(AVCodecContext *avctx)
uint8_t quant[MAX_DWT_LEVELS][4]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static int dirac_get_se_golomb(GetBitContext *gb)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
static void free_sequence_buffers(DiracContext *s)
static int get_bits_count(const GetBitContext *s)
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
bitstream reader API header.
static const uint8_t epel_weights[4][4][4]
static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
Dirac Specification -> 13.5.2 Slices.
void(* avg_dirac_pixels_tab[3][4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
static void pred_block_dc(DiracBlock *block, int stride, int x, int y)
static int get_bits_left(GetBitContext *gb)
const int32_t ff_dirac_qoffset_intra_tab[120]
av_cold void ff_diracdsp_init(DiracDSPContext *c)
#define UPDATE_CACHE(name, gb)
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
void(* dirac_hpel_filter)(uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, const uint8_t *src, int stride, int width, int height)
static const uint16_t mask[17]
#define DIRAC_REF_MASK_GLOBAL
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
struct DiracContext::@45 highquality
static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
const uint8_t ff_dirac_default_qmat[7][4][4]
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane)
For block x,y, determine which of the hpel planes to do bilinear interpolation from and set src[] to ...
const char * name
Name of the codec implementation.
DiracFrame * current_picture
unsigned old_delta_quant
schroedinger older than 1.0.8 doesn't store quant delta if only one codebook exists in a band ...
static const uint8_t offset[127][2]
#define CLOSE_READER(name, gb)
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
const uint8_t ff_interleaved_ue_golomb_vlc_code[256]
static char * split(char *message, char delim)
#define SKIP_BITS(name, gb, num)
#define INTRA_DC_PRED(n, type)
Dirac Specification -> 13.3 intra_dc_prediction(band)
static void init_planes(DiracContext *s)
static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block, int stride, int x, int y)
SubBand band[DWT_LEVELS][4]
enum AVPictureType pict_type
Picture type of the frame.
int display_picture_number
picture number in display order
#define CALC_PADDING(size, depth)
void(* avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
static void block_mc(DiracContext *s, DiracBlock *block, uint16_t *mctmp, uint8_t *obmc_weight, int plane, int dstx, int dsty)
static DiracFrame * remove_frame(DiracFrame *framelist[], int picnum)
void ff_spatial_idwt_slice2(DWTContext *d, int y)
int width
picture width / height.
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static int dirac_unpack_prediction_parameters(DiracContext *s)
Unpack the motion compensation parameters Dirac Specification -> 11.2 Picture prediction data...
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
MpegvideoEncDSPContext mpvencdsp
static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define LAST_SKIP_BITS(name, gb, num)
static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
uint8_t * edge_emu_buffer_base
static void decode_component(DiracContext *s, int comp)
Dirac Specification -> [DIRAC_STD] 13.4.1 core_transform_data()
static void init_obmc_weights(DiracContext *s, Plane *p, int by)
static const float pred[4]
void(* add_dirac_obmc[3])(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
static const int8_t mv[256][2]
static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
Dirac Specification -> 13.4.2 Non-skipped subbands.
Libavcodec external API header.
uint8_t * edge_emu_buffer[4]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
const int ff_dirac_qoffset_inter_tab[122]
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
void(* dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h)
main external API structure.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
DiracFrame all_frames[MAX_FRAMES]
#define OPEN_READER(name, gb)
Arithmetic decoder for Dirac.
dirac_biweight_func biweight_dirac_pixels_tab[3]
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
static unsigned int get_bits1(GetBitContext *s)
BYTE int const BYTE int int int height
static int decode_lowdelay(DiracContext *s)
Dirac Specification -> 13.5.1 low_delay_transform_data()
static int dirac_get_arith_bit(DiracArith *c, int ctx)
enum AVColorSpace colorspace
YUV colorspace type.
rational number numerator/denominator
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define GET_CACHE(name, gb)
dirac_biweight_func biweight_func
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int decode_subband_arith(AVCodecContext *avctx, void *b)
static int weight(int i, int blen, int offset)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
#define SHOW_SBITS(name, gb, num)
#define MAX_REFERENCE_FRAMES
The spec limits this to 3 for frame coding, but in practice can be as high as 6.
GLint GLenum GLboolean GLsizei stride
static int dirac_decode_picture_header(DiracContext *s)
Dirac Specification -> 11.1.1 Picture Header.
common internal api header.
struct DiracContext::@44 lowdelay
static void decode_subband(DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, SubBand *b1, SubBand *b2)
static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
static int dirac_get_arith_int(DiracArith *c, int follow_ctx, int data_ctx)
static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int wy)
Core video DSP helper functions.
void(* put_dirac_pixels_tab[3][4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
dirac_pixels_tab[width][subpel] width is 2 for 32, 1 for 16, 0 for 8 subpel is 0 for fpel and hpel (o...
static int alloc_sequence_buffers(DiracContext *s)
void(* put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int top, int bottom)
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
struct DiracContext::@46 globalmc[2]
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
void(* dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h)
int key_frame
1 -> keyframe, 0-> not
const uint8_t ff_interleaved_golomb_vlc_len[256]
static const double coeff[2][5]
static const uint8_t * align_get_bits(GetBitContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static void * av_mallocz_array(size_t nmemb, size_t size)
static void comp(unsigned char *dst, int dst_stride, unsigned char *src, int src_stride, int add)
static void add_dc(uint16_t *dst, int dc, int stride, uint8_t *obmc_weight, int xblen, int yblen)
#define av_malloc_array(a, b)
uint8_t * hpel_base[3][4]
#define FFSWAP(type, a, b)
This structure stores compressed data.
void(* put_signed_rect_clamped[3])(uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define DIRAC_REF_MASK_REF1
DiracBlock->ref flags, if set then the block does MC from the given ref.
static int dirac_unpack_block_motion_data(DiracContext *s)
Dirac Specification ->
uint8_t obmc_weight[3][MAX_BLOCKSIZE *MAX_BLOCKSIZE]
int ff_spatial_idwt_init(DWTContext *d, DWTPlane *p, enum dwt_type type, int decomposition_count, int bit_depth)