FFmpeg
xan.c
Go to the documentation of this file.
1 /*
2  * Wing Commander/Xan Video Decoder
3  * Copyright (C) 2003 The FFmpeg project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Xan video decoder for Wing Commander III computer game
25  * by Mario Brito (mbrito@student.dei.uc.pt)
26  * and Mike Melanson (melanson@pcisys.net)
27  *
28  * The xan_wc3 decoder outputs PAL8 data.
29  */
30 
31 #include <string.h>
32 
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/mem.h"
35 
36 #define BITSTREAM_READER_LE
37 #include "avcodec.h"
38 #include "bytestream.h"
39 #include "codec_internal.h"
40 #include "decode.h"
41 #include "get_bits.h"
42 
43 #define RUNTIME_GAMMA 0
44 
45 #define VGA__TAG MKTAG('V', 'G', 'A', ' ')
46 #define PALT_TAG MKTAG('P', 'A', 'L', 'T')
47 #define SHOT_TAG MKTAG('S', 'H', 'O', 'T')
48 #define PALETTE_COUNT 256
49 #define PALETTE_SIZE (PALETTE_COUNT * 3)
50 #define PALETTES_MAX 256
51 
52 typedef struct XanContext {
53 
56 
57  const uint8_t *buf;
58  int size;
59 
60  /* scratch space */
61  uint8_t *buffer1;
63  uint8_t *buffer2;
65 
66  unsigned *palettes;
69 
71 
72 } XanContext;
73 
75 {
76  XanContext *s = avctx->priv_data;
77 
78  av_frame_free(&s->last_frame);
79 
80  av_freep(&s->buffer1);
81  av_freep(&s->buffer2);
82  av_freep(&s->palettes);
83 
84  return 0;
85 }
86 
88 {
89  XanContext *s = avctx->priv_data;
90 
91  s->avctx = avctx;
92  s->frame_size = 0;
93 
94  avctx->pix_fmt = AV_PIX_FMT_PAL8;
95 
96  s->buffer1_size = avctx->width * avctx->height;
97  s->buffer1 = av_malloc(s->buffer1_size);
98  if (!s->buffer1)
99  return AVERROR(ENOMEM);
100  s->buffer2_size = avctx->width * avctx->height;
101  s->buffer2 = av_malloc(s->buffer2_size + 130);
102  if (!s->buffer2)
103  return AVERROR(ENOMEM);
104 
105  s->last_frame = av_frame_alloc();
106  if (!s->last_frame)
107  return AVERROR(ENOMEM);
108 
109  return 0;
110 }
111 
112 static int xan_huffman_decode(uint8_t *dest, int dest_len,
113  const uint8_t *src, int src_len)
114 {
115  uint8_t byte = *src++;
116  uint8_t ival = byte + 0x16;
117  const uint8_t * ptr = src + byte*2;
118  int ptr_len = src_len - 1 - byte*2;
119  uint8_t val = ival;
120  uint8_t *dest_end = dest + dest_len;
121  uint8_t *dest_start = dest;
122  int ret;
123  GetBitContext gb;
124 
125  if ((ret = init_get_bits8(&gb, ptr, ptr_len)) < 0)
126  return ret;
127 
128  while (val != 0x16) {
129  unsigned idx;
130  if (get_bits_left(&gb) < 1)
131  return AVERROR_INVALIDDATA;
132  idx = val - 0x17 + get_bits1(&gb) * byte;
133  if (idx >= 2 * byte)
134  return AVERROR_INVALIDDATA;
135  val = src[idx];
136 
137  if (val < 0x16) {
138  if (dest >= dest_end)
139  return dest_len;
140  *dest++ = val;
141  val = ival;
142  }
143  }
144 
145  return dest - dest_start;
146 }
147 
148 /**
149  * unpack simple compression
150  *
151  * @param dest destination buffer of dest_len, must be padded with at least 130 bytes
152  */
153 static void xan_unpack(uint8_t *dest, int dest_len,
154  const uint8_t *src, int src_len)
155 {
156  uint8_t opcode;
157  int size;
158  uint8_t *dest_org = dest;
159  uint8_t *dest_end = dest + dest_len;
161 
162  bytestream2_init(&ctx, src, src_len);
163  while (dest < dest_end && bytestream2_get_bytes_left(&ctx)) {
164  opcode = bytestream2_get_byte(&ctx);
165 
166  if (opcode < 0xe0) {
167  int size2, back;
168  if ((opcode & 0x80) == 0) {
169  size = opcode & 3;
170 
171  back = ((opcode & 0x60) << 3) + bytestream2_get_byte(&ctx) + 1;
172  size2 = ((opcode & 0x1c) >> 2) + 3;
173  } else if ((opcode & 0x40) == 0) {
174  size = bytestream2_peek_byte(&ctx) >> 6;
175 
176  back = (bytestream2_get_be16(&ctx) & 0x3fff) + 1;
177  size2 = (opcode & 0x3f) + 4;
178  } else {
179  size = opcode & 3;
180 
181  back = ((opcode & 0x10) << 12) + bytestream2_get_be16(&ctx) + 1;
182  size2 = ((opcode & 0x0c) << 6) + bytestream2_get_byte(&ctx) + 5;
183  }
184 
185  if (dest_end - dest < size + size2 ||
186  dest + size - dest_org < back ||
188  return;
190  dest += size;
191  av_memcpy_backptr(dest, back, size2);
192  dest += size2;
193  } else {
194  int finish = opcode >= 0xfc;
195  size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4;
196 
197  if (dest_end - dest < size || bytestream2_get_bytes_left(&ctx) < size)
198  return;
200  dest += size;
201  if (finish)
202  return;
203  }
204  }
205 }
206 
208  const uint8_t *pixel_buffer, int x, int y, int pixel_count)
209 {
210  int stride;
211  int line_inc;
212  int index;
213  int current_x;
214  int width = s->avctx->width;
215  uint8_t *palette_plane;
216 
217  palette_plane = frame->data[0];
218  stride = frame->linesize[0];
219  line_inc = stride - width;
220  index = y * stride + x;
221  current_x = x;
222  while (pixel_count && index < s->frame_size) {
223  int count = FFMIN(pixel_count, width - current_x);
224  memcpy(palette_plane + index, pixel_buffer, count);
225  pixel_count -= count;
226  index += count;
227  pixel_buffer += count;
228  current_x += count;
229 
230  if (current_x >= width) {
231  index += line_inc;
232  current_x = 0;
233  }
234  }
235 }
236 
238  int x, int y,
239  int pixel_count, int motion_x,
240  int motion_y)
241 {
242  int stride;
243  int line_inc;
244  int curframe_index, prevframe_index;
245  int curframe_x, prevframe_x;
246  int width = s->avctx->width;
247  uint8_t *palette_plane, *prev_palette_plane;
248 
249  if (y + motion_y < 0 || y + motion_y >= s->avctx->height ||
250  x + motion_x < 0 || x + motion_x >= s->avctx->width)
251  return;
252 
253  palette_plane = frame->data[0];
254  prev_palette_plane = s->last_frame->data[0];
255  if (!prev_palette_plane)
256  prev_palette_plane = palette_plane;
257  stride = frame->linesize[0];
258  line_inc = stride - width;
259  curframe_index = y * stride + x;
260  curframe_x = x;
261  prevframe_index = (y + motion_y) * stride + x + motion_x;
262  prevframe_x = x + motion_x;
263 
264  if (prev_palette_plane == palette_plane && FFABS(motion_x + width*motion_y) < pixel_count) {
265  avpriv_request_sample(s->avctx, "Overlapping copy");
266  return ;
267  }
268 
269  while (pixel_count &&
270  curframe_index < s->frame_size &&
271  prevframe_index < s->frame_size) {
272  int count = FFMIN3(pixel_count, width - curframe_x,
273  width - prevframe_x);
274 
275  memcpy(palette_plane + curframe_index,
276  prev_palette_plane + prevframe_index, count);
277  pixel_count -= count;
278  curframe_index += count;
279  prevframe_index += count;
280  curframe_x += count;
281  prevframe_x += count;
282 
283  if (curframe_x >= width) {
284  curframe_index += line_inc;
285  curframe_x = 0;
286  }
287 
288  if (prevframe_x >= width) {
289  prevframe_index += line_inc;
290  prevframe_x = 0;
291  }
292  }
293 }
294 
296 {
297 
298  int width = s->avctx->width;
299  int height = s->avctx->height;
300  int total_pixels = width * height;
301  uint8_t opcode;
302  uint8_t flag = 0;
303  int size = 0;
304  int motion_x, motion_y;
305  int x, y, ret;
306 
307  uint8_t *opcode_buffer = s->buffer1;
308  uint8_t *opcode_buffer_end = s->buffer1 + s->buffer1_size;
309  int opcode_buffer_size = s->buffer1_size;
310  const uint8_t *imagedata_buffer = s->buffer2;
311 
312  /* pointers to segments inside the compressed chunk */
313  const uint8_t *huffman_segment;
314  GetByteContext size_segment;
315  GetByteContext vector_segment;
316  const uint8_t *imagedata_segment;
317  int huffman_offset, size_offset, vector_offset, imagedata_offset,
318  imagedata_size;
319 
320  if (s->size < 8)
321  return AVERROR_INVALIDDATA;
322 
323  huffman_offset = AV_RL16(&s->buf[0]);
324  size_offset = AV_RL16(&s->buf[2]);
325  vector_offset = AV_RL16(&s->buf[4]);
326  imagedata_offset = AV_RL16(&s->buf[6]);
327 
328  if (huffman_offset >= s->size ||
329  size_offset >= s->size ||
330  vector_offset >= s->size ||
331  imagedata_offset >= s->size)
332  return AVERROR_INVALIDDATA;
333 
334  huffman_segment = s->buf + huffman_offset;
335  bytestream2_init(&size_segment, s->buf + size_offset, s->size - size_offset);
336  bytestream2_init(&vector_segment, s->buf + vector_offset, s->size - vector_offset);
337  imagedata_segment = s->buf + imagedata_offset;
338 
339  if ((ret = xan_huffman_decode(opcode_buffer, opcode_buffer_size,
340  huffman_segment, s->size - huffman_offset)) < 0)
341  return AVERROR_INVALIDDATA;
342  opcode_buffer_end = opcode_buffer + ret;
343 
344  if (imagedata_segment[0] == 2) {
345  xan_unpack(s->buffer2, s->buffer2_size,
346  &imagedata_segment[1], s->size - imagedata_offset - 1);
347  imagedata_size = s->buffer2_size;
348  } else {
349  imagedata_size = s->size - imagedata_offset - 1;
350  imagedata_buffer = &imagedata_segment[1];
351  }
352 
353  /* use the decoded data segments to build the frame */
354  x = y = 0;
355  while (total_pixels && opcode_buffer < opcode_buffer_end) {
356 
357  opcode = *opcode_buffer++;
358  size = 0;
359 
360  switch (opcode) {
361 
362  case 0:
363  flag ^= 1;
364  continue;
365 
366  case 1:
367  case 2:
368  case 3:
369  case 4:
370  case 5:
371  case 6:
372  case 7:
373  case 8:
374  size = opcode;
375  break;
376 
377  case 12:
378  case 13:
379  case 14:
380  case 15:
381  case 16:
382  case 17:
383  case 18:
384  size += (opcode - 10);
385  break;
386 
387  case 9:
388  case 19:
389  if (bytestream2_get_bytes_left(&size_segment) < 1) {
390  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
391  return AVERROR_INVALIDDATA;
392  }
393  size = bytestream2_get_byte(&size_segment);
394  break;
395 
396  case 10:
397  case 20:
398  if (bytestream2_get_bytes_left(&size_segment) < 2) {
399  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
400  return AVERROR_INVALIDDATA;
401  }
402  size = bytestream2_get_be16(&size_segment);
403  break;
404 
405  case 11:
406  case 21:
407  if (bytestream2_get_bytes_left(&size_segment) < 3) {
408  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
409  return AVERROR_INVALIDDATA;
410  }
411  size = bytestream2_get_be24(&size_segment);
412  break;
413  }
414 
415  if (size > total_pixels)
416  break;
417 
418  if (opcode < 12) {
419  flag ^= 1;
420  if (flag) {
421  /* run of (size) pixels is unchanged from last frame */
422  xan_wc3_copy_pixel_run(s, frame, x, y, size, 0, 0);
423  } else {
424  /* output a run of pixels from imagedata_buffer */
425  if (imagedata_size < size)
426  break;
427  xan_wc3_output_pixel_run(s, frame, imagedata_buffer, x, y, size);
428  imagedata_buffer += size;
429  imagedata_size -= size;
430  }
431  } else {
432  uint8_t vector;
433  if (bytestream2_get_bytes_left(&vector_segment) <= 0) {
434  av_log(s->avctx, AV_LOG_ERROR, "vector_segment overread\n");
435  return AVERROR_INVALIDDATA;
436  }
437  /* run-based motion compensation from last frame */
438  vector = bytestream2_get_byte(&vector_segment);
439  motion_x = sign_extend(vector >> 4, 4);
440  motion_y = sign_extend(vector & 0xF, 4);
441 
442  /* copy a run of pixels from the previous frame */
443  xan_wc3_copy_pixel_run(s, frame, x, y, size, motion_x, motion_y);
444 
445  flag = 0;
446  }
447 
448  /* coordinate accounting */
449  total_pixels -= size;
450  y += (x + size) / width;
451  x = (x + size) % width;
452  }
453  return 0;
454 }
455 
456 #if RUNTIME_GAMMA
457 static inline unsigned mul(unsigned a, unsigned b)
458 {
459  return (a * b) >> 16;
460 }
461 
462 static inline unsigned pow4(unsigned a)
463 {
464  unsigned square = mul(a, a);
465  return mul(square, square);
466 }
467 
468 static inline unsigned pow5(unsigned a)
469 {
470  return mul(pow4(a), a);
471 }
472 
473 static uint8_t gamma_corr(uint8_t in) {
474  unsigned lo, hi = 0xff40, target;
475  int i = 15;
476  in = (in << 2) | (in >> 6);
477  /* equivalent float code:
478  if (in >= 252)
479  return 253;
480  return round(pow(in / 256.0, 0.8) * 256);
481  */
482  lo = target = in << 8;
483  do {
484  unsigned mid = (lo + hi) >> 1;
485  unsigned pow = pow5(mid);
486  if (pow > target) hi = mid;
487  else lo = mid;
488  } while (--i);
489  return (pow4((lo + hi) >> 1) + 0x80) >> 8;
490 }
491 #else
492 /**
493  * This is a gamma correction that xan3 applies to all palette entries.
494  *
495  * There is a peculiarity, namely that the values are clamped to 253 -
496  * it seems likely that this table was calculated by a buggy fixed-point
497  * implementation, the one above under RUNTIME_GAMMA behaves like this for
498  * example.
499  * The exponent value of 0.8 can be explained by this as well, since 0.8 = 4/5
500  * and thus pow(x, 0.8) is still easy to calculate.
501  * Also, the input values are first rotated to the left by 2.
502  */
503 static const uint8_t gamma_lookup[256] = {
504  0x00, 0x09, 0x10, 0x16, 0x1C, 0x21, 0x27, 0x2C,
505  0x31, 0x35, 0x3A, 0x3F, 0x43, 0x48, 0x4C, 0x50,
506  0x54, 0x59, 0x5D, 0x61, 0x65, 0x69, 0x6D, 0x71,
507  0x75, 0x79, 0x7D, 0x80, 0x84, 0x88, 0x8C, 0x8F,
508  0x93, 0x97, 0x9A, 0x9E, 0xA2, 0xA5, 0xA9, 0xAC,
509  0xB0, 0xB3, 0xB7, 0xBA, 0xBE, 0xC1, 0xC5, 0xC8,
510  0xCB, 0xCF, 0xD2, 0xD5, 0xD9, 0xDC, 0xDF, 0xE3,
511  0xE6, 0xE9, 0xED, 0xF0, 0xF3, 0xF6, 0xFA, 0xFD,
512  0x03, 0x0B, 0x12, 0x18, 0x1D, 0x23, 0x28, 0x2D,
513  0x32, 0x36, 0x3B, 0x40, 0x44, 0x49, 0x4D, 0x51,
514  0x56, 0x5A, 0x5E, 0x62, 0x66, 0x6A, 0x6E, 0x72,
515  0x76, 0x7A, 0x7D, 0x81, 0x85, 0x89, 0x8D, 0x90,
516  0x94, 0x98, 0x9B, 0x9F, 0xA2, 0xA6, 0xAA, 0xAD,
517  0xB1, 0xB4, 0xB8, 0xBB, 0xBF, 0xC2, 0xC5, 0xC9,
518  0xCC, 0xD0, 0xD3, 0xD6, 0xDA, 0xDD, 0xE0, 0xE4,
519  0xE7, 0xEA, 0xED, 0xF1, 0xF4, 0xF7, 0xFA, 0xFD,
520  0x05, 0x0D, 0x13, 0x19, 0x1F, 0x24, 0x29, 0x2E,
521  0x33, 0x38, 0x3C, 0x41, 0x45, 0x4A, 0x4E, 0x52,
522  0x57, 0x5B, 0x5F, 0x63, 0x67, 0x6B, 0x6F, 0x73,
523  0x77, 0x7B, 0x7E, 0x82, 0x86, 0x8A, 0x8D, 0x91,
524  0x95, 0x99, 0x9C, 0xA0, 0xA3, 0xA7, 0xAA, 0xAE,
525  0xB2, 0xB5, 0xB9, 0xBC, 0xBF, 0xC3, 0xC6, 0xCA,
526  0xCD, 0xD0, 0xD4, 0xD7, 0xDA, 0xDE, 0xE1, 0xE4,
527  0xE8, 0xEB, 0xEE, 0xF1, 0xF5, 0xF8, 0xFB, 0xFD,
528  0x07, 0x0E, 0x15, 0x1A, 0x20, 0x25, 0x2A, 0x2F,
529  0x34, 0x39, 0x3D, 0x42, 0x46, 0x4B, 0x4F, 0x53,
530  0x58, 0x5C, 0x60, 0x64, 0x68, 0x6C, 0x70, 0x74,
531  0x78, 0x7C, 0x7F, 0x83, 0x87, 0x8B, 0x8E, 0x92,
532  0x96, 0x99, 0x9D, 0xA1, 0xA4, 0xA8, 0xAB, 0xAF,
533  0xB2, 0xB6, 0xB9, 0xBD, 0xC0, 0xC4, 0xC7, 0xCB,
534  0xCE, 0xD1, 0xD5, 0xD8, 0xDB, 0xDF, 0xE2, 0xE5,
535  0xE9, 0xEC, 0xEF, 0xF2, 0xF6, 0xF9, 0xFC, 0xFD
536 };
537 #endif
538 
540  int *got_frame, AVPacket *avpkt)
541 {
542  const uint8_t *buf = avpkt->data;
543  int ret, buf_size = avpkt->size;
544  XanContext *s = avctx->priv_data;
546  int tag = 0;
547 
548  bytestream2_init(&ctx, buf, buf_size);
549  while (bytestream2_get_bytes_left(&ctx) > 8 && tag != VGA__TAG) {
550  unsigned *tmpptr;
551  uint32_t new_pal;
552  int size;
553  int i;
554  tag = bytestream2_get_le32(&ctx);
555  size = bytestream2_get_be32(&ctx);
556  if (size < 0) {
557  av_log(avctx, AV_LOG_ERROR, "Invalid tag size %d\n", size);
558  return AVERROR_INVALIDDATA;
559  }
561  switch (tag) {
562  case PALT_TAG:
563  if (size < PALETTE_SIZE)
564  return AVERROR_INVALIDDATA;
565  if (s->palettes_count >= PALETTES_MAX)
566  return AVERROR_INVALIDDATA;
567  tmpptr = av_realloc_array(s->palettes,
568  s->palettes_count + 1, AVPALETTE_SIZE);
569  if (!tmpptr)
570  return AVERROR(ENOMEM);
571  s->palettes = tmpptr;
572  tmpptr += s->palettes_count * AVPALETTE_COUNT;
573  for (i = 0; i < PALETTE_COUNT; i++) {
574 #if RUNTIME_GAMMA
575  int r = gamma_corr(bytestream2_get_byteu(&ctx));
576  int g = gamma_corr(bytestream2_get_byteu(&ctx));
577  int b = gamma_corr(bytestream2_get_byteu(&ctx));
578 #else
579  int r = gamma_lookup[bytestream2_get_byteu(&ctx)];
580  int g = gamma_lookup[bytestream2_get_byteu(&ctx)];
581  int b = gamma_lookup[bytestream2_get_byteu(&ctx)];
582 #endif
583  *tmpptr++ = (0xFFU << 24) | (r << 16) | (g << 8) | b;
584  }
585  s->palettes_count++;
586  break;
587  case SHOT_TAG:
588  if (size < 4)
589  return AVERROR_INVALIDDATA;
590  new_pal = bytestream2_get_le32(&ctx);
591  if (new_pal < s->palettes_count) {
592  s->cur_palette = new_pal;
593  } else
594  av_log(avctx, AV_LOG_ERROR, "Invalid palette selected\n");
595  break;
596  case VGA__TAG:
597  break;
598  default:
600  break;
601  }
602  }
603  buf_size = bytestream2_get_bytes_left(&ctx);
604 
605  if (s->palettes_count <= 0) {
606  av_log(s->avctx, AV_LOG_ERROR, "No palette found\n");
607  return AVERROR_INVALIDDATA;
608  }
609 
610  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
611  return ret;
612 
613  if (!s->frame_size)
614  s->frame_size = frame->linesize[0] * s->avctx->height;
615 
616  memcpy(frame->data[1],
617  s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE);
618 
619  s->buf = ctx.buffer;
620  s->size = buf_size;
621 
622  if (xan_wc3_decode_frame(s, frame) < 0)
623  return AVERROR_INVALIDDATA;
624 
625  av_frame_unref(s->last_frame);
626  if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
627  return ret;
628 
629  *got_frame = 1;
630 
631  /* always report that the buffer was completely consumed */
632  return buf_size;
633 }
634 
636  .p.name = "xan_wc3",
637  CODEC_LONG_NAME("Wing Commander III / Xan"),
638  .p.type = AVMEDIA_TYPE_VIDEO,
639  .p.id = AV_CODEC_ID_XAN_WC3,
640  .priv_data_size = sizeof(XanContext),
642  .close = xan_decode_end,
644  .p.capabilities = AV_CODEC_CAP_DR1,
645  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
646 };
gamma_lookup
static const uint8_t gamma_lookup[256]
This is a gamma correction that xan3 applies to all palette entries.
Definition: xan.c:503
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
XanContext::last_frame
AVFrame * last_frame
Definition: xan.c:55
xan_wc3_copy_pixel_run
static void xan_wc3_copy_pixel_run(XanContext *s, AVFrame *frame, int x, int y, int pixel_count, int motion_x, int motion_y)
Definition: xan.c:237
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AVPacket::data
uint8_t * data
Definition: packet.h:374
b
#define b
Definition: input.c:41
FFCodec
Definition: codec_internal.h:127
XanContext::buffer1
uint8_t * buffer1
Definition: xan.c:61
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
xan_unpack
static void xan_unpack(uint8_t *dest, int dest_len, const uint8_t *src, int src_len)
unpack simple compression
Definition: xan.c:153
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
return
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
Definition: filter_design.txt:264
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:342
GetBitContext
Definition: get_bits.h:107
SHOT_TAG
#define SHOT_TAG
Definition: xan.c:47
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
xan_decode_end
static av_cold int xan_decode_end(AVCodecContext *avctx)
Definition: xan.c:74
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
XanContext::buf
const uint8_t * buf
Definition: xan.c:57
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
av_memcpy_backptr
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
Definition: mem.c:445
XanContext::palettes
unsigned * palettes
Definition: xan.c:66
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:215
g
const char * g
Definition: vf_curves.c:127
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:404
frame_size
int frame_size
Definition: mxfenc.c:2205
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
XanContext::frame_size
int frame_size
Definition: xan.c:70
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
mul
static float mul(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:39
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
PALETTE_COUNT
#define PALETTE_COUNT
Definition: xan.c:48
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
XanContext::buffer2_size
int buffer2_size
Definition: xan.c:64
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
PALT_TAG
#define PALT_TAG
Definition: xan.c:46
VGA__TAG
#define VGA__TAG
Definition: xan.c:45
index
int index
Definition: gxfenc.c:89
AVPALETTE_COUNT
#define AVPALETTE_COUNT
Definition: pixfmt.h:33
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1473
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_xan_wc3_decoder
const FFCodec ff_xan_wc3_decoder
Definition: xan.c:635
AVPacket::size
int size
Definition: packet.h:375
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
codec_internal.h
size
int size
Definition: twinvq_data.h:10344
XanContext::avctx
AVCodecContext * avctx
Definition: xan.c:54
PALETTE_SIZE
#define PALETTE_SIZE
Definition: xan.c:49
xan_huffman_decode
static int xan_huffman_decode(uint8_t *dest, int dest_len, const uint8_t *src, int src_len)
Definition: xan.c:112
xan_decode_init
static av_cold int xan_decode_init(AVCodecContext *avctx)
Definition: xan.c:87
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_CODEC_ID_XAN_WC3
@ AV_CODEC_ID_XAN_WC3
Definition: codec_id.h:92
flag
#define flag(name)
Definition: cbs_av1.c:553
XanContext::size
int size
Definition: xan.c:58
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
XanContext
Definition: xan.c:52
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1641
ret
ret
Definition: filter_design.txt:187
xan_wc3_decode_frame
static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
Definition: xan.c:295
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
square
static int square(int x)
Definition: roqvideoenc.c:195
U
#define U(x)
Definition: vpx_arith.h:37
XanContext::buffer1_size
int buffer1_size
Definition: xan.c:62
AVCodecContext
main external API structure.
Definition: avcodec.h:426
xan_wc3_output_pixel_run
static void xan_wc3_output_pixel_run(XanContext *s, AVFrame *frame, const uint8_t *pixel_buffer, int x, int y, int pixel_count)
Definition: xan.c:207
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
XanContext::cur_palette
int cur_palette
Definition: xan.c:68
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
XanContext::palettes_count
int palettes_count
Definition: xan.c:67
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
xan_decode_frame
static int xan_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: xan.c:539
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
XanContext::buffer2
uint8_t * buffer2
Definition: xan.c:63
PALETTES_MAX
#define PALETTES_MAX
Definition: xan.c:50