FFmpeg
a64multienc.c
Go to the documentation of this file.
1 /*
2  * a64 video encoder - multicolor modes
3  * Copyright (c) 2009 Tobias Bindhammer
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * a64 video encoder - multicolor modes
25  */
26 
27 #include "config_components.h"
28 
29 #include "a64colors.h"
30 #include "a64tables.h"
31 #include "codec_internal.h"
32 #include "elbg.h"
33 #include "encode.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/intreadwrite.h"
36 #include "libavutil/mem.h"
37 
38 #define DITHERSTEPS 8
39 #define CHARSET_CHARS 256
40 #define INTERLACED 1
41 #define CROP_SCREENS 1
42 
43 #define C64XRES 320
44 #define C64YRES 200
45 
46 typedef struct A64Context {
47  /* variables for multicolor modes */
48  struct ELBGContext *elbg;
52  unsigned mc_frame_counter;
54  int *mc_charmap;
55  int *mc_best_cb;
56  int mc_luma_vals[5];
57  uint8_t *mc_colram;
58  uint8_t *mc_palette;
60 
61  /* pts of the next packet that will be output */
63 } A64Context;
64 
65 /* gray gradient */
66 static const uint8_t mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
67 
68 /* other possible gradients - to be tested */
69 //static const uint8_t mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
70 //static const uint8_t mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
71 
72 static void to_meta_with_crop(AVCodecContext *avctx,
73  const AVFrame *p, int *dest)
74 {
75  int blockx, blocky, x, y;
76  int luma = 0;
77  int height = FFMIN(avctx->height, C64YRES);
78  int width = FFMIN(avctx->width , C64XRES);
79  const uint8_t *src = p->data[0];
80 
81  for (blocky = 0; blocky < C64YRES; blocky += 8) {
82  for (blockx = 0; blockx < C64XRES; blockx += 8) {
83  for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
84  for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
85  if(x < width && y < height) {
86  if (x + 1 < width) {
87  /* build average over 2 pixels */
88  luma = (src[(x + 0 + y * p->linesize[0])] +
89  src[(x + 1 + y * p->linesize[0])]) / 2;
90  } else {
91  luma = src[(x + y * p->linesize[0])];
92  }
93  /* write blocks as linear data now so they are suitable for elbg */
94  dest[0] = luma;
95  }
96  dest++;
97  }
98  }
99  }
100  }
101 }
102 
103 static void render_charset(AVCodecContext *avctx, uint8_t *charset,
104  uint8_t *colrammap)
105 {
106  A64Context *c = avctx->priv_data;
107  uint8_t row1, row2;
108  int charpos, x, y;
109  int a, b;
110  uint8_t pix;
111  int lowdiff, highdiff;
112  int *best_cb = c->mc_best_cb;
113  uint8_t index1[256];
114  uint8_t index2[256];
115  uint8_t dither[256];
116  int i;
117  int distance;
118 
119  /* Generate lookup-tables for dither and index before looping.
120  * This code relies on c->mc_luma_vals[c->mc_pal_size - 1] being
121  * the maximum of all the mc_luma_vals values and on the minimum
122  * being zero; this ensures that dither is properly initialized. */
123  i = 0;
124  for (a=0; a < 256; a++) {
125  if(i < c->mc_pal_size -1 && a == c->mc_luma_vals[i + 1]) {
126  distance = c->mc_luma_vals[i + 1] - c->mc_luma_vals[i];
127  for(b = 0; b <= distance; b++) {
128  dither[c->mc_luma_vals[i] + b] = b * (DITHERSTEPS - 1) / distance;
129  }
130  i++;
131  }
132  if(i >= c->mc_pal_size - 1) dither[a] = 0;
133  index1[a] = i;
134  index2[a] = FFMIN(i + 1, c->mc_pal_size - 1);
135  }
136 
137  /* and render charset */
138  for (charpos = 0; charpos < CHARSET_CHARS; charpos++) {
139  lowdiff = 0;
140  highdiff = 0;
141  for (y = 0; y < 8; y++) {
142  row1 = 0; row2 = 0;
143  for (x = 0; x < 4; x++) {
144  pix = best_cb[y * 4 + x];
145 
146  /* accumulate error for brightest/darkest color */
147  if (index1[pix] >= 3)
148  highdiff += pix - c->mc_luma_vals[3];
149  if (index1[pix] < 1)
150  lowdiff += c->mc_luma_vals[1] - pix;
151 
152  row1 <<= 2;
153 
154  if (INTERLACED) {
155  row2 <<= 2;
156  if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 0][x & 3])
157  row1 |= 3-(index2[pix] & 3);
158  else
159  row1 |= 3-(index1[pix] & 3);
160 
161  if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 1][x & 3])
162  row2 |= 3-(index2[pix] & 3);
163  else
164  row2 |= 3-(index1[pix] & 3);
165  }
166  else {
167  if (multi_dither_patterns[dither[pix]][(y & 3)][x & 3])
168  row1 |= 3-(index2[pix] & 3);
169  else
170  row1 |= 3-(index1[pix] & 3);
171  }
172  }
173  charset[y+0x000] = row1;
174  if (INTERLACED) charset[y+0x800] = row2;
175  }
176  /* do we need to adjust pixels? */
177  if (highdiff > 0 && lowdiff > 0 && c->mc_use_5col) {
178  if (lowdiff > highdiff) {
179  for (x = 0; x < 32; x++)
180  best_cb[x] = FFMIN(c->mc_luma_vals[3], best_cb[x]);
181  } else {
182  for (x = 0; x < 32; x++)
183  best_cb[x] = FFMAX(c->mc_luma_vals[1], best_cb[x]);
184  }
185  charpos--; /* redo now adjusted char */
186  /* no adjustment needed, all fine */
187  } else {
188  /* advance pointers */
189  best_cb += 32;
190  charset += 8;
191 
192  /* remember colorram value */
193  colrammap[charpos] = (highdiff > 0);
194  }
195  }
196 }
197 
199 {
200  A64Context *c = avctx->priv_data;
201 
202  avpriv_elbg_free(&c->elbg);
203 
204  av_freep(&c->mc_meta_charset);
205  av_freep(&c->mc_best_cb);
206  av_freep(&c->mc_charmap);
207  av_freep(&c->mc_colram);
208  return 0;
209 }
210 
212 {
213  A64Context *c = avctx->priv_data;
214  int a;
215  av_lfg_init(&c->randctx, 1);
216 
217  if (avctx->global_quality < 1) {
218  c->mc_lifetime = 4;
219  } else {
220  c->mc_lifetime = avctx->global_quality / FF_QP2LAMBDA;
221  }
222 
223  av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
224 
225  c->mc_frame_counter = 0;
226  c->mc_use_5col = avctx->codec->id == AV_CODEC_ID_A64_MULTI5;
227  c->mc_pal_size = 4 + c->mc_use_5col;
228 
229  /* precalc luma values for later use */
230  for (a = 0; a < c->mc_pal_size; a++) {
231  c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 +
232  a64_palette[mc_colors[a]][1] * 0.59 +
233  a64_palette[mc_colors[a]][2] * 0.11;
234  }
235 
236  if (!(c->mc_meta_charset = av_calloc(c->mc_lifetime, 32000 * sizeof(int))) ||
237  !(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
238  !(c->mc_charmap = av_calloc(c->mc_lifetime, 1000 * sizeof(int))) ||
239  !(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t)))) {
240  av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
241  return AVERROR(ENOMEM);
242  }
243 
244  /* set up extradata */
245  if (!(avctx->extradata = av_mallocz(8 * 4 + AV_INPUT_BUFFER_PADDING_SIZE))) {
246  av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for extradata.\n");
247  return AVERROR(ENOMEM);
248  }
249  avctx->extradata_size = 8 * 4;
250  AV_WB32(avctx->extradata, c->mc_lifetime);
251  AV_WB32(avctx->extradata + 16, INTERLACED);
252 
253  if (!avctx->codec_tag)
254  avctx->codec_tag = AV_RL32("a64m");
255 
256  c->next_pts = AV_NOPTS_VALUE;
257 
258  return 0;
259 }
260 
261 static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
262 {
263  int a;
264  uint8_t temp;
265  /* only needs to be done in 5col mode */
266  /* XXX could be squeezed to 0x80 bytes */
267  for (a = 0; a < 256; a++) {
268  temp = colram[charmap[a + 0x000]] << 0;
269  temp |= colram[charmap[a + 0x100]] << 1;
270  temp |= colram[charmap[a + 0x200]] << 2;
271  if (a < 0xe8) temp |= colram[charmap[a + 0x300]] << 3;
272  buf[a] = temp << 2;
273  }
274 }
275 
277  const AVFrame *p, int *got_packet)
278 {
279  A64Context *c = avctx->priv_data;
280 
281  int frame;
282  int x, y;
283  int b_height;
284  int b_width;
285 
286  int req_size, ret;
287  uint8_t *buf = NULL;
288 
289  int *charmap = c->mc_charmap;
290  uint8_t *colram = c->mc_colram;
291  int *meta = c->mc_meta_charset;
292  int *best_cb = c->mc_best_cb;
293 
294  int charset_size = 0x800 * (INTERLACED + 1);
295  int colram_size = 0x100 * c->mc_use_5col;
296  int screen_size;
297 
298  if(CROP_SCREENS) {
299  b_height = FFMIN(avctx->height,C64YRES) >> 3;
300  b_width = FFMIN(avctx->width ,C64XRES) >> 3;
301  screen_size = b_width * b_height;
302  } else {
303  b_height = C64YRES >> 3;
304  b_width = C64XRES >> 3;
305  screen_size = 0x400;
306  }
307 
308  /* no data, means end encoding asap */
309  if (!p) {
310  /* all done, end encoding */
311  if (!c->mc_lifetime) return 0;
312  /* no more frames in queue, prepare to flush remaining frames */
313  if (!c->mc_frame_counter) {
314  c->mc_lifetime = 0;
315  }
316  /* still frames in queue so limit lifetime to remaining frames */
317  else c->mc_lifetime = c->mc_frame_counter;
318  /* still new data available */
319  } else {
320  /* fill up mc_meta_charset with data until lifetime exceeds */
321  if (c->mc_frame_counter < c->mc_lifetime) {
322  to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
323  c->mc_frame_counter++;
324  if (c->next_pts == AV_NOPTS_VALUE)
325  c->next_pts = p->pts;
326  /* lifetime is not reached so wait for next frame first */
327  return 0;
328  }
329  }
330 
331  /* lifetime reached so now convert X frames at once */
332  if (c->mc_frame_counter == c->mc_lifetime) {
333  req_size = 0;
334  /* any frames to encode? */
335  if (c->mc_lifetime) {
336  int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
337  if ((ret = ff_get_encode_buffer(avctx, pkt, alloc_size, 0)) < 0)
338  return ret;
339  buf = pkt->data;
340 
341  /* calc optimal new charset + charmaps */
342  ret = avpriv_elbg_do(&c->elbg, meta, 32, 1000 * c->mc_lifetime,
343  best_cb, CHARSET_CHARS, 50, charmap, &c->randctx, 0);
344  if (ret < 0)
345  return ret;
346 
347  /* create colorram map and a c64 readable charset */
348  render_charset(avctx, buf, colram);
349 
350  /* advance pointers */
351  buf += charset_size;
352  req_size += charset_size;
353  }
354 
355  /* write x frames to buf */
356  for (frame = 0; frame < c->mc_lifetime; frame++) {
357  /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */
358  for (y = 0; y < b_height; y++) {
359  for (x = 0; x < b_width; x++) {
360  buf[y * b_width + x] = charmap[y * b_width + x];
361  }
362  }
363  /* advance pointers */
364  buf += screen_size;
365  req_size += screen_size;
366 
367  /* compress and copy colram to buf */
368  if (c->mc_use_5col) {
369  a64_compress_colram(buf, charmap, colram);
370  /* advance pointers */
371  buf += colram_size;
372  req_size += colram_size;
373  }
374 
375  /* advance to next charmap */
376  charmap += 1000;
377  }
378 
379  AV_WB32(avctx->extradata + 4, c->mc_frame_counter);
380  AV_WB32(avctx->extradata + 8, charset_size);
381  AV_WB32(avctx->extradata + 12, screen_size + colram_size);
382 
383  /* reset counter */
384  c->mc_frame_counter = 0;
385 
386  pkt->pts = pkt->dts = c->next_pts;
387  c->next_pts = AV_NOPTS_VALUE;
388 
389  av_assert0(pkt->size == req_size);
390  *got_packet = !!req_size;
391  }
392  return 0;
393 }
394 
395 #if CONFIG_A64MULTI_ENCODER
397  .p.name = "a64multi",
398  CODEC_LONG_NAME("Multicolor charset for Commodore 64"),
399  .p.type = AVMEDIA_TYPE_VIDEO,
400  .p.id = AV_CODEC_ID_A64_MULTI,
401  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
402  .priv_data_size = sizeof(A64Context),
405  .close = a64multi_close_encoder,
406  .p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
407  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
408 };
409 #endif
410 #if CONFIG_A64MULTI5_ENCODER
412  .p.name = "a64multi5",
413  CODEC_LONG_NAME("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
414  .p.type = AVMEDIA_TYPE_VIDEO,
415  .p.id = AV_CODEC_ID_A64_MULTI5,
416  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
417  .priv_data_size = sizeof(A64Context),
420  .close = a64multi_close_encoder,
421  .p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
422  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
423 };
424 #endif
mc_colors
static const uint8_t mc_colors[5]
Definition: a64multienc.c:66
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
a64multi_close_encoder
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
Definition: a64multienc.c:198
ff_a64multi_encoder
const FFCodec ff_a64multi_encoder
A64Context
Definition: a64multienc.c:46
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DITHERSTEPS
#define DITHERSTEPS
Definition: a64multienc.c:38
CROP_SCREENS
#define CROP_SCREENS
Definition: a64multienc.c:41
av_lfg_init
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
Definition: lfg.c:32
int64_t
long long int64_t
Definition: coverity.c:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
AVPacket::data
uint8_t * data
Definition: packet.h:539
A64Context::mc_charmap
int * mc_charmap
Definition: a64multienc.c:54
AV_CODEC_ID_A64_MULTI
@ AV_CODEC_ID_A64_MULTI
Definition: codec_id.h:195
encode.h
b
#define b
Definition: input.c:41
a64multi_encode_init
static av_cold int a64multi_encode_init(AVCodecContext *avctx)
Definition: a64multienc.c:211
a64colors.h
FFCodec
Definition: codec_internal.h:127
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
A64Context::mc_palette
uint8_t * mc_palette
Definition: a64multienc.c:58
A64Context::mc_luma_vals
int mc_luma_vals[5]
Definition: a64multienc.c:56
CHARSET_CHARS
#define CHARSET_CHARS
Definition: a64multienc.c:39
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
ff_a64multi5_encoder
const FFCodec ff_a64multi5_encoder
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:320
A64Context::randctx
AVLFG randctx
Definition: a64multienc.c:49
A64Context::mc_use_5col
int mc_use_5col
Definition: a64multienc.c:51
A64Context::mc_meta_charset
int * mc_meta_charset
Definition: a64multienc.c:53
avpriv_elbg_do
int avpriv_elbg_do(ELBGContext **elbgp, int *points, int dim, int numpoints, int *codebook, int num_cb, int max_steps, int *closest_cb, AVLFG *rand_state, uintptr_t flags)
Implementation of the Enhanced LBG Algorithm Based on the paper "Neural Networks 14:1219-1237" that c...
Definition: elbg.c:463
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
a64multi_encode_frame
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet)
Definition: a64multienc.c:276
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
C64XRES
#define C64XRES
Definition: a64multienc.c:43
intreadwrite.h
to_meta_with_crop
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
Definition: a64multienc.c:72
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1249
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
C64YRES
#define C64YRES
Definition: a64multienc.c:44
render_charset
static void render_charset(AVCodecContext *avctx, uint8_t *charset, uint8_t *colrammap)
Definition: a64multienc.c:103
multi_dither_patterns
static const uint8_t multi_dither_patterns[9][4][4]
dither patterns used vor rendering the multicolor charset
Definition: a64tables.h:36
A64Context::mc_pal_size
int mc_pal_size
Definition: a64multienc.c:59
A64Context::mc_colram
uint8_t * mc_colram
Definition: a64multienc.c:57
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
elbg.h
NULL
#define NULL
Definition: coverity.c:32
A64Context::mc_lifetime
int mc_lifetime
Definition: a64multienc.c:50
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
A64Context::mc_best_cb
int * mc_best_cb
Definition: a64multienc.c:55
AVLFG
Context structure for the Lagged Fibonacci PRNG.
Definition: lfg.h:33
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
INTERLACED
#define INTERLACED
Definition: a64multienc.c:40
height
#define height
Definition: dsp.h:85
codec_internal.h
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
avpriv_elbg_free
av_cold void avpriv_elbg_free(ELBGContext **elbgp)
Free an ELBGContext and reset the pointer to it.
Definition: elbg.c:516
a64_compress_colram
static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
Definition: a64multienc.c:261
A64Context::elbg
struct ELBGContext * elbg
Definition: a64multienc.c:48
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
a64_palette
static const uint8_t a64_palette[16][3]
Definition: a64colors.h:33
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
interlaced_dither_patterns
static const uint8_t interlaced_dither_patterns[9][8][4]
Definition: a64tables.h:93
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
ELBGContext
ELBG internal data.
Definition: elbg.c:47
a64tables.h
A64Context::mc_frame_counter
unsigned mc_frame_counter
Definition: a64multienc.c:52
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_A64_MULTI5
@ AV_CODEC_ID_A64_MULTI5
Definition: codec_id.h:196
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::height
int height
Definition: avcodec.h:624
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:451
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:106
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
temp
else temp
Definition: vf_mcdeint.c:263
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:476
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:231
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
width
#define width
Definition: dsp.h:85
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
A64Context::next_pts
int64_t next_pts
Definition: a64multienc.c:62
src
#define src
Definition: vp8dsp.c:248
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:62