FFmpeg
pixlet.c
Go to the documentation of this file.
1 /*
2  * Apple Pixlet decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdint.h>
23 
24 #include "libavutil/intmath.h"
25 
26 #include "avcodec.h"
27 #include "bytestream.h"
28 #include "codec_internal.h"
29 #include "decode.h"
30 #include "get_bits.h"
31 #include "thread.h"
32 #include "unary.h"
33 
34 #define NB_LEVELS 4
35 
36 #define PIXLET_MAGIC 0xDEADBEEF
37 
38 #define H 0
39 #define V 1
40 
41 typedef struct SubBand {
42  unsigned width, height;
43  unsigned size;
44  unsigned x, y;
45 } SubBand;
46 
47 typedef struct PixletContext {
48  AVClass *class;
49 
52 
53  int levels;
54  int depth;
55  int w, h;
56 
57  int16_t *filter[2];
58  int16_t *prediction;
59  int64_t scaling[4][2][NB_LEVELS];
60  uint16_t lut[65536];
61  SubBand band[4][NB_LEVELS * 3 + 1];
63 
65 {
68  return 0;
69 }
70 
71 static void free_buffers(AVCodecContext *avctx)
72 {
73  PixletContext *ctx = avctx->priv_data;
74 
75  av_freep(&ctx->filter[0]);
76  av_freep(&ctx->filter[1]);
77  av_freep(&ctx->prediction);
78 }
79 
81 {
82  PixletContext *ctx = avctx->priv_data;
83  free_buffers(avctx);
84  ctx->w = 0;
85  ctx->h = 0;
86  return 0;
87 }
88 
89 static int init_decoder(AVCodecContext *avctx)
90 {
91  PixletContext *ctx = avctx->priv_data;
92  int i, plane;
93 
94  ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
95  ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
96  ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
97  if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
98  return AVERROR(ENOMEM);
99 
100  for (plane = 0; plane < 3; plane++) {
101  unsigned shift = plane > 0;
102  unsigned w = ctx->w >> shift;
103  unsigned h = ctx->h >> shift;
104 
105  ctx->band[plane][0].width = w >> NB_LEVELS;
106  ctx->band[plane][0].height = h >> NB_LEVELS;
107  ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
108 
109  for (i = 0; i < NB_LEVELS * 3; i++) {
110  unsigned scale = ctx->levels - (i / 3);
111 
112  ctx->band[plane][i + 1].width = w >> scale;
113  ctx->band[plane][i + 1].height = h >> scale;
114  ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
115 
116  ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
117  ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
118  }
119  }
120 
121  return 0;
122 }
123 
124 static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size,
125  int width, ptrdiff_t stride)
126 {
127  PixletContext *ctx = avctx->priv_data;
128  GetBitContext *bc = &ctx->bc;
129  unsigned cnt1, nbits, k, j = 0, i = 0;
130  int64_t value, state = 3;
131  int rlen, escape, flag = 0;
132 
133  while (i < size) {
134  nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
135 
136  cnt1 = get_unary(bc, 0, 8);
137  if (cnt1 < 8) {
138  value = show_bits(bc, nbits);
139  if (value <= 1) {
140  skip_bits(bc, nbits - 1);
141  escape = ((1 << nbits) - 1) * cnt1;
142  } else {
143  skip_bits(bc, nbits);
144  escape = value + ((1 << nbits) - 1) * cnt1 - 1;
145  }
146  } else {
147  escape = get_bits(bc, 16);
148  }
149 
150  value = -((escape + flag) & 1) | 1;
151  dst[j++] = value * ((escape + flag + 1) >> 1);
152  i++;
153  if (j == width) {
154  j = 0;
155  dst += stride;
156  }
157  state = 120 * (escape + flag) + state - (120 * state >> 8);
158  flag = 0;
159 
160  if (state * 4ULL > 0xFF || i >= size)
161  continue;
162 
163  nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
164  escape = av_mod_uintp2(16383, nbits);
165  cnt1 = get_unary(bc, 0, 8);
166  if (cnt1 > 7) {
167  rlen = get_bits(bc, 16);
168  } else {
169  value = show_bits(bc, nbits);
170  if (value > 1) {
171  skip_bits(bc, nbits);
172  rlen = value + escape * cnt1 - 1;
173  } else {
174  skip_bits(bc, nbits - 1);
175  rlen = escape * cnt1;
176  }
177  }
178 
179  if (rlen > size - i)
180  return AVERROR_INVALIDDATA;
181  i += rlen;
182 
183  for (k = 0; k < rlen; k++) {
184  dst[j++] = 0;
185  if (j == width) {
186  j = 0;
187  dst += stride;
188  }
189  }
190 
191  state = 0;
192  flag = rlen < 0xFFFF ? 1 : 0;
193  }
194 
195  align_get_bits(bc);
196  return get_bits_count(bc) >> 3;
197 }
198 
199 static int read_high_coeffs(AVCodecContext *avctx, const uint8_t *src, int16_t *dst,
200  int size, int c, int a, int d,
201  int width, ptrdiff_t stride)
202 {
203  PixletContext *ctx = avctx->priv_data;
204  GetBitContext *bc = &ctx->bc;
205  unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
206  int ret, escape, pfx, value, yflag, xflag, flag = 0;
207  int64_t state = 3, tmp;
208 
210  if (ret < 0)
211  return ret;
212 
213  if (a ^ (a >> 31)) {
214  nbits = 33 - ff_clz(a ^ (a >> 31));
215  if (nbits > 16)
216  return AVERROR_INVALIDDATA;
217  } else {
218  nbits = 1;
219  }
220 
221  length = 25 - nbits;
222 
223  while (i < size) {
224  if (((state >> 8) + 3) & 0xFFFFFFF)
225  value = ff_clz((state >> 8) + 3) ^ 0x1F;
226  else
227  value = -1;
228 
229  cnt1 = get_unary(bc, 0, length);
230  if (cnt1 >= length) {
231  cnt1 = get_bits(bc, nbits);
232  } else {
233  pfx = 14 + ((((uint64_t)(value - 14)) >> 32) & (value - 14));
234  if (pfx < 1 || pfx > 25)
235  return AVERROR_INVALIDDATA;
236  cnt1 *= (1 << pfx) - 1;
237  shbits = show_bits(bc, pfx);
238  if (shbits <= 1) {
239  skip_bits(bc, pfx - 1);
240  } else {
241  skip_bits(bc, pfx);
242  cnt1 += shbits - 1;
243  }
244  }
245 
246  xflag = flag + cnt1;
247  yflag = xflag;
248 
249  if (flag + cnt1 == 0) {
250  value = 0;
251  } else {
252  xflag &= 1u;
253  tmp = (int64_t)c * ((yflag + 1) >> 1) + (c >> 1);
254  value = xflag + (tmp ^ -xflag);
255  }
256 
257  i++;
258  dst[j++] = value;
259  if (j == width) {
260  j = 0;
261  dst += stride;
262  }
263  state += (int64_t)d * (uint64_t)yflag - ((int64_t)(d * (uint64_t)state) >> 8);
264 
265  flag = 0;
266 
267  if ((uint64_t)state > 0xFF / 4 || i >= size)
268  continue;
269 
270  pfx = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
271  escape = av_mod_uintp2(16383, pfx);
272  cnt1 = get_unary(bc, 0, 8);
273  if (cnt1 < 8) {
274  if (pfx < 1 || pfx > 25)
275  return AVERROR_INVALIDDATA;
276 
277  value = show_bits(bc, pfx);
278  if (value > 1) {
279  skip_bits(bc, pfx);
280  rlen = value + escape * cnt1 - 1;
281  } else {
282  skip_bits(bc, pfx - 1);
283  rlen = escape * cnt1;
284  }
285  } else {
286  if (get_bits1(bc))
287  value = get_bits(bc, 16);
288  else
289  value = get_bits(bc, 8);
290 
291  rlen = value + 8 * escape;
292  }
293 
294  if (rlen > 0xFFFF || i + rlen > size)
295  return AVERROR_INVALIDDATA;
296  i += rlen;
297 
298  for (k = 0; k < rlen; k++) {
299  dst[j++] = 0;
300  if (j == width) {
301  j = 0;
302  dst += stride;
303  }
304  }
305 
306  state = 0;
307  flag = rlen < 0xFFFF ? 1 : 0;
308  }
309 
310  align_get_bits(bc);
311  return get_bits_count(bc) >> 3;
312 }
313 
314 static int read_highpass(AVCodecContext *avctx, const uint8_t *ptr,
315  int plane, AVFrame *frame)
316 {
317  PixletContext *ctx = avctx->priv_data;
318  ptrdiff_t stride = frame->linesize[plane] / 2;
319  int i, ret;
320 
321  for (i = 0; i < ctx->levels * 3; i++) {
322  int32_t a = bytestream2_get_be32(&ctx->gb);
323  int32_t b = bytestream2_get_be32(&ctx->gb);
324  int32_t c = bytestream2_get_be32(&ctx->gb);
325  int32_t d = bytestream2_get_be32(&ctx->gb);
326  int16_t *dest = (int16_t *)frame->data[plane] +
327  ctx->band[plane][i + 1].x +
328  ctx->band[plane][i + 1].y * stride;
329  unsigned size = ctx->band[plane][i + 1].size;
330  uint32_t magic = bytestream2_get_be32(&ctx->gb);
331 
332  if (magic != PIXLET_MAGIC) {
333  av_log(avctx, AV_LOG_ERROR,
334  "wrong magic number: 0x%08"PRIX32" for plane %d, band %d\n",
335  magic, plane, i);
336  return AVERROR_INVALIDDATA;
337  }
338 
339  if (a == INT32_MIN)
340  return AVERROR_INVALIDDATA;
341 
342  ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest, size,
343  c, (b >= FFABS(a)) ? b : a, d,
344  ctx->band[plane][i + 1].width, stride);
345  if (ret < 0) {
346  av_log(avctx, AV_LOG_ERROR,
347  "error in highpass coefficients for plane %d, band %d\n",
348  plane, i);
349  return ret;
350  }
351  bytestream2_skip(&ctx->gb, ret);
352  }
353 
354  return 0;
355 }
356 
357 static void lowpass_prediction(int16_t *dst, int16_t *pred,
358  int width, int height, ptrdiff_t stride)
359 {
360  int16_t val;
361  int i, j;
362 
363  memset(pred, 0, width * sizeof(*pred));
364 
365  for (i = 0; i < height; i++) {
366  val = pred[0] + dst[0];
367  dst[0] = pred[0] = val;
368  for (j = 1; j < width; j++) {
369  val = pred[j] + dst[j];
370  dst[j] = pred[j] = val;
371  dst[j] += dst[j-1];
372  }
373  dst += stride;
374  }
375 }
376 
377 static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
378 {
379  int16_t *low, *high, *ll, *lh, *hl, *hh;
380  int hsize, i, j;
381  int64_t value;
382 
383  hsize = size >> 1;
384  low = tmp + 4;
385  high = &low[hsize + 8];
386 
387  memcpy(low, dest, size);
388  memcpy(high, dest + hsize, size);
389 
390  ll = &low[hsize];
391  lh = &low[hsize];
392  hl = &high[hsize];
393  hh = hl;
394  for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
395  low[i - 5] = low[j - 1];
396  lh[0] = ll[-1];
397  high[i - 5] = high[j - 2];
398  hh[0] = hl[-2];
399  }
400 
401  for (i = 0; i < hsize; i++) {
402  value = (int64_t) low [i + 1] * -INT64_C(325392907) +
403  (int64_t) low [i + 0] * INT64_C(3687786320) +
404  (int64_t) low [i - 1] * -INT64_C(325392907) +
405  (int64_t) high[i + 0] * INT64_C(1518500249) +
406  (int64_t) high[i - 1] * INT64_C(1518500249);
407  dest[i * 2] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32);
408  }
409 
410  for (i = 0; i < hsize; i++) {
411  value = (int64_t) low [i + 2] * -INT64_C(65078576) +
412  (int64_t) low [i + 1] * INT64_C(1583578880) +
413  (int64_t) low [i + 0] * INT64_C(1583578880) +
414  (int64_t) low [i - 1] * -INT64_C(65078576) +
415  (int64_t) high[i + 1] * INT64_C(303700064) +
416  (int64_t) high[i + 0] * -INT64_C(3644400640) +
417  (int64_t) high[i - 1] * INT64_C(303700064);
418  dest[i * 2 + 1] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32);
419  }
420 }
421 
422 static void reconstruction(AVCodecContext *avctx, int16_t *dest,
423  unsigned width, unsigned height, ptrdiff_t stride,
424  int64_t *scaling_h, int64_t *scaling_v)
425 {
426  PixletContext *ctx = avctx->priv_data;
427  unsigned scaled_width, scaled_height;
428  int16_t *ptr, *tmp;
429  int i, j, k;
430 
431  scaled_width = width >> NB_LEVELS;
432  scaled_height = height >> NB_LEVELS;
433  tmp = ctx->filter[0];
434 
435  for (i = 0; i < NB_LEVELS; i++) {
436  int64_t scale_v = scaling_v[i];
437  int64_t scale_h = scaling_h[i];
438  scaled_width <<= 1;
439  scaled_height <<= 1;
440 
441  ptr = dest;
442  for (j = 0; j < scaled_height; j++) {
443  filterfn(ptr, ctx->filter[1], scaled_width, scale_v);
444  ptr += stride;
445  }
446 
447  for (j = 0; j < scaled_width; j++) {
448  ptr = dest + j;
449  for (k = 0; k < scaled_height; k++) {
450  tmp[k] = *ptr;
451  ptr += stride;
452  }
453 
454  filterfn(tmp, ctx->filter[1], scaled_height, scale_h);
455 
456  ptr = dest + j;
457  for (k = 0; k < scaled_height; k++) {
458  *ptr = tmp[k];
459  ptr += stride;
460  }
461  }
462  }
463 }
464 
465 static void build_luma_lut(AVCodecContext *avctx, int depth)
466 {
467  PixletContext *ctx = avctx->priv_data;
468  int max = (1 << depth) - 1;
469 
470  if (ctx->depth == depth)
471  return;
472  ctx->depth = depth;
473 
474  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->lut); i++)
475  ctx->lut[i] = ((int64_t)i * i * 65535LL) / max / max;
476 }
477 
479  int w, int h, int depth)
480 {
481  PixletContext *ctx = avctx->priv_data;
482  uint16_t *dsty = (uint16_t *)frame->data[0];
483  int16_t *srcy = (int16_t *)frame->data[0];
484  ptrdiff_t stridey = frame->linesize[0] / 2;
485  uint16_t *lut = ctx->lut;
486  int i, j;
487 
488  for (j = 0; j < h; j++) {
489  for (i = 0; i < w; i++) {
490  if (srcy[i] <= 0)
491  dsty[i] = 0;
492  else if (srcy[i] > ((1 << depth) - 1))
493  dsty[i] = 65535;
494  else
495  dsty[i] = lut[srcy[i]];
496  }
497  dsty += stridey;
498  srcy += stridey;
499  }
500 }
501 
502 static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
503 {
504  uint16_t *dstu = (uint16_t *)frame->data[1];
505  uint16_t *dstv = (uint16_t *)frame->data[2];
506  int16_t *srcu = (int16_t *)frame->data[1];
507  int16_t *srcv = (int16_t *)frame->data[2];
508  ptrdiff_t strideu = frame->linesize[1] / 2;
509  ptrdiff_t stridev = frame->linesize[2] / 2;
510  const unsigned add = 1 << (depth - 1);
511  const unsigned shift = 16 - depth;
512  int i, j;
513 
514  for (j = 0; j < h; j++) {
515  for (i = 0; i < w; i++) {
516  dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
517  dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
518  }
519  dstu += strideu;
520  dstv += stridev;
521  srcu += strideu;
522  srcv += stridev;
523  }
524 }
525 
526 static int decode_plane(AVCodecContext *avctx, int plane,
527  const AVPacket *avpkt, AVFrame *frame)
528 {
529  PixletContext *ctx = avctx->priv_data;
530  ptrdiff_t stride = frame->linesize[plane] / 2;
531  unsigned shift = plane > 0;
532  int16_t *dst;
533  int i, ret;
534 
535  for (i = ctx->levels - 1; i >= 0; i--) {
536  int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
537  int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
538 
539  if (!h || !v)
540  return AVERROR_INVALIDDATA;
541 
542  ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
543  ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
544  }
545 
546  bytestream2_skip(&ctx->gb, 4);
547 
548  dst = (int16_t *)frame->data[plane];
549  dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
550 
551  ret = init_get_bits8(&ctx->bc, avpkt->data + bytestream2_tell(&ctx->gb),
553  if (ret < 0)
554  return ret;
555 
556  ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1,
557  ctx->band[plane][0].width - 1, 0);
558  if (ret < 0) {
559  av_log(avctx, AV_LOG_ERROR,
560  "error in lowpass coefficients for plane %d, top row\n", plane);
561  return ret;
562  }
563 
564  ret = read_low_coeffs(avctx, dst + stride,
565  ctx->band[plane][0].height - 1, 1, stride);
566  if (ret < 0) {
567  av_log(avctx, AV_LOG_ERROR,
568  "error in lowpass coefficients for plane %d, left column\n",
569  plane);
570  return ret;
571  }
572 
573  ret = read_low_coeffs(avctx, dst + stride + 1,
574  (ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
575  ctx->band[plane][0].width - 1, stride);
576  if (ret < 0) {
577  av_log(avctx, AV_LOG_ERROR,
578  "error in lowpass coefficients for plane %d, rest\n", plane);
579  return ret;
580  }
581 
582  bytestream2_skip(&ctx->gb, ret);
583  if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
584  av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
585  return AVERROR_INVALIDDATA;
586  }
587 
588  ret = read_highpass(avctx, avpkt->data, plane, frame);
589  if (ret < 0)
590  return ret;
591 
592  lowpass_prediction(dst, ctx->prediction, ctx->band[plane][0].width,
593  ctx->band[plane][0].height, stride);
594 
595  reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift,
596  ctx->h >> shift, stride, ctx->scaling[plane][H],
597  ctx->scaling[plane][V]);
598 
599  return 0;
600 }
601 
603  int *got_frame, AVPacket *avpkt)
604 {
605  PixletContext *ctx = avctx->priv_data;
606  int i, w, h, width, height, ret, version;
607  uint32_t pktsize, depth;
608 
609  bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
610 
611  pktsize = bytestream2_get_be32(&ctx->gb);
612  if (pktsize <= 44 + (NB_LEVELS * 8 + 6) * 3 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
613  av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32"\n", pktsize);
614  return AVERROR_INVALIDDATA;
615  }
616 
617  version = bytestream2_get_le32(&ctx->gb);
618  if (version != 1)
619  avpriv_request_sample(avctx, "Version %d", version);
620 
621  bytestream2_skip(&ctx->gb, 4);
622  if (bytestream2_get_be32(&ctx->gb) != 1)
623  return AVERROR_INVALIDDATA;
624  bytestream2_skip(&ctx->gb, 4);
625 
626  width = bytestream2_get_be32(&ctx->gb);
627  height = bytestream2_get_be32(&ctx->gb);
628 
629  if ( width > INT_MAX - (1U << (NB_LEVELS + 1))
630  || height > INT_MAX - (1U << (NB_LEVELS + 1)))
631  return AVERROR_INVALIDDATA;
632 
633  w = FFALIGN(width, 1 << (NB_LEVELS + 1));
634  h = FFALIGN(height, 1 << (NB_LEVELS + 1));
635 
636  ctx->levels = bytestream2_get_be32(&ctx->gb);
637  if (ctx->levels != NB_LEVELS)
638  return AVERROR_INVALIDDATA;
639  depth = bytestream2_get_be32(&ctx->gb);
640  if (depth < 8 || depth > 15) {
641  avpriv_request_sample(avctx, "Depth %d", depth);
642  return AVERROR_INVALIDDATA;
643  }
644 
645  build_luma_lut(avctx, depth);
646 
647  ret = ff_set_dimensions(avctx, w, h);
648  if (ret < 0)
649  return ret;
650  avctx->width = width;
651  avctx->height = height;
652 
653  if (ctx->w != w || ctx->h != h) {
654  free_buffers(avctx);
655  ctx->w = w;
656  ctx->h = h;
657 
658  ret = init_decoder(avctx);
659  if (ret < 0) {
660  free_buffers(avctx);
661  ctx->w = 0;
662  ctx->h = 0;
663  return ret;
664  }
665  }
666 
667  bytestream2_skip(&ctx->gb, 8);
668 
670  p->key_frame = 1;
672 
673  ret = ff_thread_get_buffer(avctx, p, 0);
674  if (ret < 0)
675  return ret;
676 
677  for (i = 0; i < 3; i++) {
678  ret = decode_plane(avctx, i, avpkt, p);
679  if (ret < 0)
680  return ret;
681  if (avctx->flags & AV_CODEC_FLAG_GRAY)
682  break;
683  }
684 
685  postprocess_luma(avctx, p, ctx->w, ctx->h, ctx->depth);
686  postprocess_chroma(p, ctx->w >> 1, ctx->h >> 1, ctx->depth);
687 
688  *got_frame = 1;
689 
690  return pktsize;
691 }
692 
694  .p.name = "pixlet",
695  CODEC_LONG_NAME("Apple Pixlet"),
696  .p.type = AVMEDIA_TYPE_VIDEO,
697  .p.id = AV_CODEC_ID_PIXLET,
698  .init = pixlet_init,
699  .close = pixlet_close,
701  .priv_data_size = sizeof(PixletContext),
702  .p.capabilities = AV_CODEC_CAP_DR1 |
704  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
705 };
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:592
V
#define V
Definition: pixlet.c:39
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
free_buffers
static void free_buffers(AVCodecContext *avctx)
Definition: pixlet.c:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
PixletContext
Definition: pixlet.c:47
pixlet_decode_frame
static int pixlet_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: pixlet.c:602
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:256
ff_clz
#define ff_clz
Definition: intmath.h:143
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:275
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
read_low_coeffs
static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size, int width, ptrdiff_t stride)
Definition: pixlet.c:124
AVPacket::data
uint8_t * data
Definition: packet.h:374
PixletContext::w
int w
Definition: pixlet.c:55
b
#define b
Definition: input.c:41
SubBand::x
unsigned x
Definition: pixlet.c:44
PixletContext::bc
GetBitContext bc
Definition: pixlet.c:51
FFCodec
Definition: codec_internal.h:127
PixletContext::depth
int depth
Definition: pixlet.c:54
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
SubBand::y
unsigned y
Definition: pixlet.c:44
thread.h
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:371
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
PixletContext::filter
int16_t * filter[2]
Definition: pixlet.c:57
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_CODEC_ID_PIXLET
@ AV_CODEC_ID_PIXLET
Definition: codec_id.h:277
GetBitContext
Definition: get_bits.h:107
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
PixletContext::lut
uint16_t lut[65536]
Definition: pixlet.c:60
read_high_coeffs
static int read_high_coeffs(AVCodecContext *avctx, const uint8_t *src, int16_t *dst, int size, int c, int a, int d, int width, ptrdiff_t stride)
Definition: pixlet.c:199
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:422
val
static double val(void *priv, double ch)
Definition: aeval.c:77
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
filterfn
static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
Definition: pixlet.c:377
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
decode_plane
static int decode_plane(AVCodecContext *avctx, int plane, const AVPacket *avpkt, AVFrame *frame)
Definition: pixlet.c:526
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:470
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
get_bits.h
SubBand::size
unsigned size
Definition: pixlet.c:43
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:110
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1009
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
postprocess_chroma
static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
Definition: pixlet.c:502
PixletContext::levels
int levels
Definition: pixlet.c:53
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
build_luma_lut
static void build_luma_lut(AVCodecContext *avctx, int depth)
Definition: pixlet.c:465
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:427
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:301
AVPacket::size
int size
Definition: packet.h:375
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:257
size
int size
Definition: twinvq_data.h:10344
SubBand
Definition: cfhd.h:108
PixletContext::band
SubBand band[4][NB_LEVELS *3+1]
Definition: pixlet.c:61
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
H
#define H
Definition: pixlet.c:38
version
version
Definition: libkvazaar.c:313
unary.h
PixletContext::gb
GetByteContext gb
Definition: pixlet.c:50
PixletContext::h
int h
Definition: pixlet.c:55
pixlet_init
static av_cold int pixlet_init(AVCodecContext *avctx)
Definition: pixlet.c:64
flag
#define flag(name)
Definition: cbs_av1.c:553
postprocess_luma
static void postprocess_luma(AVCodecContext *avctx, AVFrame *frame, int w, int h, int depth)
Definition: pixlet.c:478
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
lowpass_prediction
static void lowpass_prediction(int16_t *dst, int16_t *pred, int width, int height, ptrdiff_t stride)
Definition: pixlet.c:357
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:361
PIXLET_MAGIC
#define PIXLET_MAGIC
Definition: pixlet.c:36
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
AVCodecContext::height
int height
Definition: avcodec.h:598
PixletContext::prediction
int16_t * prediction
Definition: pixlet.c:58
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
pixlet_close
static av_cold int pixlet_close(AVCodecContext *avctx)
Definition: pixlet.c:80
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
SubBand::width
unsigned width
Definition: pixlet.c:42
pred
static const float pred[4]
Definition: siprdata.h:259
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:540
reconstruction
static void reconstruction(AVCodecContext *avctx, int16_t *dest, unsigned width, unsigned height, ptrdiff_t stride, int64_t *scaling_h, int64_t *scaling_v)
Definition: pixlet.c:422
ff_pixlet_decoder
const FFCodec ff_pixlet_decoder
Definition: pixlet.c:693
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:426
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
PixletContext::scaling
int64_t scaling[4][2][NB_LEVELS]
Definition: pixlet.c:59
d
d
Definition: ffmpeg_filter.c:156
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
read_highpass
static int read_highpass(AVCodecContext *avctx, const uint8_t *ptr, int plane, AVFrame *frame)
Definition: pixlet.c:314
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
init_decoder
static int init_decoder(AVCodecContext *avctx)
Definition: pixlet.c:89
h
h
Definition: vp9dsp_template.c:2038
NB_LEVELS
#define NB_LEVELS
Definition: pixlet.c:34
state
static struct @345 state
SubBand::height
int height
Definition: cfhd.h:113
intmath.h