FFmpeg
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "encode.h"
36 #include "bswapdsp.h"
37 #include "bytestream.h"
38 #include "lossless_videoencdsp.h"
39 #include "put_bits.h"
40 #include "utvideo.h"
41 #include "huffman.h"
42 
43 typedef struct UtvideoContext {
44  const AVClass *class;
47 
48  uint32_t frame_info_size, flags;
49  int planes;
50  int slices;
51  int compression;
52  int frame_pred;
53 
54  ptrdiff_t slice_stride;
55  uint8_t *slice_bits, *slice_buffer[4];
56  int slice_bits_size;
58 
59 typedef struct HuffEntry {
60  uint16_t sym;
61  uint8_t len;
62  uint32_t code;
63 } HuffEntry;
64 
65 /* Compare huffman tree nodes */
66 static int ut_huff_cmp_len(const void *a, const void *b)
67 {
68  const HuffEntry *aa = a, *bb = b;
69  return (aa->len - bb->len)*256 + aa->sym - bb->sym;
70 }
71 
72 /* Compare huffentry symbols */
73 static int huff_cmp_sym(const void *a, const void *b)
74 {
75  const HuffEntry *aa = a, *bb = b;
76  return aa->sym - bb->sym;
77 }
78 
80 {
81  UtvideoContext *c = avctx->priv_data;
82  int i;
83 
84  av_freep(&c->slice_bits);
85  for (i = 0; i < 4; i++)
86  av_freep(&c->slice_buffer[i]);
87 
88  return 0;
89 }
90 
92 {
93  UtvideoContext *c = avctx->priv_data;
94  int i, subsampled_height;
95  uint32_t original_format;
96 
97  c->frame_info_size = 4;
98  c->slice_stride = FFALIGN(avctx->width, 32);
99 
100  switch (avctx->pix_fmt) {
101  case AV_PIX_FMT_GBRP:
102  c->planes = 3;
103  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
104  original_format = UTVIDEO_RGB;
105  break;
106  case AV_PIX_FMT_GBRAP:
107  c->planes = 4;
108  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
109  original_format = UTVIDEO_RGBA;
110  avctx->bits_per_coded_sample = 32;
111  break;
112  case AV_PIX_FMT_YUV420P:
113  if (avctx->width & 1 || avctx->height & 1) {
114  av_log(avctx, AV_LOG_ERROR,
115  "4:2:0 video requires even width and height.\n");
116  return AVERROR_INVALIDDATA;
117  }
118  c->planes = 3;
119  if (avctx->colorspace == AVCOL_SPC_BT709)
120  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
121  else
122  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
123  original_format = UTVIDEO_420;
124  break;
125  case AV_PIX_FMT_YUV422P:
126  if (avctx->width & 1) {
127  av_log(avctx, AV_LOG_ERROR,
128  "4:2:2 video requires even width.\n");
129  return AVERROR_INVALIDDATA;
130  }
131  c->planes = 3;
132  if (avctx->colorspace == AVCOL_SPC_BT709)
133  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
134  else
135  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
136  original_format = UTVIDEO_422;
137  break;
138  case AV_PIX_FMT_YUV444P:
139  c->planes = 3;
140  if (avctx->colorspace == AVCOL_SPC_BT709)
141  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
142  else
143  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
144  original_format = UTVIDEO_444;
145  break;
146  default:
147  av_unreachable("Already checked via CODEC_PIXFMTS");
148  }
149 
150  ff_bswapdsp_init(&c->bdsp);
151  ff_llvidencdsp_init(&c->llvidencdsp);
152 
153  if (c->frame_pred == PRED_GRADIENT) {
154  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
155  return AVERROR_PATCHWELCOME;
156  }
157 
158  /*
159  * Check the asked slice count for obviously invalid
160  * values (> 256 or negative).
161  */
162  if (avctx->slices > 256 || avctx->slices < 0) {
163  av_log(avctx, AV_LOG_ERROR,
164  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
165  avctx->slices);
166  return AVERROR(EINVAL);
167  }
168 
169  /* Check that the slice count is not larger than the subsampled height */
170  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
171  if (avctx->slices > subsampled_height) {
172  av_log(avctx, AV_LOG_ERROR,
173  "Slice count %d is larger than the subsampling-applied height %d.\n",
174  avctx->slices, subsampled_height);
175  return AVERROR(EINVAL);
176  }
177 
178  /* extradata size is 4 * 32 bits */
179  avctx->extradata_size = 16;
180 
181  avctx->extradata = av_mallocz(avctx->extradata_size +
183 
184  if (!avctx->extradata) {
185  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
186  return AVERROR(ENOMEM);
187  }
188 
189  for (i = 0; i < c->planes; i++) {
190  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
192  if (!c->slice_buffer[i]) {
193  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
194  return AVERROR(ENOMEM);
195  }
196  }
197 
198  /*
199  * Set the version of the encoder.
200  * Last byte is "implementation ID", which is
201  * obtained from the creator of the format.
202  * Libavcodec has been assigned with the ID 0xF0.
203  */
204  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
205 
206  /*
207  * Set the "original format"
208  * Not used for anything during decoding.
209  */
210  AV_WL32(avctx->extradata + 4, original_format);
211 
212  /* Write 4 as the 'frame info size' */
213  AV_WL32(avctx->extradata + 8, c->frame_info_size);
214 
215  /*
216  * Set how many slices are going to be used.
217  * By default uses multiple slices depending on the subsampled height.
218  * This enables multithreading in the official decoder.
219  */
220  if (!avctx->slices) {
221  c->slices = subsampled_height / 120;
222 
223  if (!c->slices)
224  c->slices = 1;
225  else if (c->slices > 256)
226  c->slices = 256;
227  } else {
228  c->slices = avctx->slices;
229  }
230 
231  /* Set compression mode */
232  c->compression = COMP_HUFF;
233 
234  /*
235  * Set the encoding flags:
236  * - Slice count minus 1
237  * - Interlaced encoding mode flag, set to zero for now.
238  * - Compression mode (none/huff)
239  * And write the flags.
240  */
241  c->flags = (c->slices - 1U) << 24;
242  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
243  c->flags |= c->compression;
244 
245  AV_WL32(avctx->extradata + 12, c->flags);
246 
247  return 0;
248 }
249 
250 static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride,
251  uint8_t *const src[4], int planes, const int stride[4],
252  int width, int height)
253 {
254  int i, j;
255  int k = 2 * dst_stride;
256  const uint8_t *sg = src[0];
257  const uint8_t *sb = src[1];
258  const uint8_t *sr = src[2];
259  const uint8_t *sa = src[3];
260  unsigned int g;
261 
262  for (j = 0; j < height; j++) {
263  if (planes == 3) {
264  for (i = 0; i < width; i++) {
265  g = sg[i];
266  dst[0][k] = g;
267  g += 0x80;
268  dst[1][k] = sb[i] - g;
269  dst[2][k] = sr[i] - g;
270  k++;
271  }
272  } else {
273  for (i = 0; i < width; i++) {
274  g = sg[i];
275  dst[0][k] = g;
276  g += 0x80;
277  dst[1][k] = sb[i] - g;
278  dst[2][k] = sr[i] - g;
279  dst[3][k] = sa[i];
280  k++;
281  }
282  sa += stride[3];
283  }
284  k += dst_stride - width;
285  sg += stride[0];
286  sb += stride[1];
287  sr += stride[2];
288  }
289 }
290 
291 #undef A
292 #undef B
293 
294 /* Write data to a plane with median prediction */
295 static void median_predict(UtvideoContext *c, const uint8_t *src, uint8_t *dst,
296  ptrdiff_t stride, int width, int height)
297 {
298  int i, j;
299  int A, B;
300  uint8_t prev;
301 
302  /* First line uses left neighbour prediction */
303  prev = 0x80; /* Set the initial value */
304  for (i = 0; i < width; i++) {
305  *dst++ = src[i] - prev;
306  prev = src[i];
307  }
308 
309  if (height == 1)
310  return;
311 
312  src += stride;
313 
314  /*
315  * Second line uses top prediction for the first sample,
316  * and median for the rest.
317  */
318  A = B = 0;
319 
320  /* Rest of the coded part uses median prediction */
321  for (j = 1; j < height; j++) {
322  c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
323  dst += width;
324  src += stride;
325  }
326 }
327 
328 /* Count the usage of values in a plane */
329 static void count_usage(uint8_t *src, int width,
330  int height, uint64_t *counts)
331 {
332  int i, j;
333 
334  for (j = 0; j < height; j++) {
335  for (i = 0; i < width; i++) {
336  counts[src[i]]++;
337  }
338  src += width;
339  }
340 }
341 
342 /* Calculate the actual huffman codes from the code lengths */
343 static void calculate_codes(HuffEntry *he)
344 {
345  int last, i;
346  uint32_t code;
347 
348  qsort(he, 256, sizeof(*he), ut_huff_cmp_len);
349 
350  last = 255;
351  while (he[last].len == 255 && last)
352  last--;
353 
354  code = 0;
355  for (i = last; i >= 0; i--) {
356  he[i].code = code >> (32 - he[i].len);
357  code += 0x80000000u >> (he[i].len - 1);
358  }
359 
360  qsort(he, 256, sizeof(*he), huff_cmp_sym);
361 }
362 
363 /* Write huffman bit codes to a memory block */
364 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
365  int width, int height, HuffEntry *he)
366 {
367  PutBitContext pb;
368  int i, j;
369  int count;
370 
371  init_put_bits(&pb, dst, dst_size);
372 
373  /* Write the codes */
374  for (j = 0; j < height; j++) {
375  for (i = 0; i < width; i++)
376  put_bits(&pb, he[src[i]].len, he[src[i]].code);
377 
378  src += width;
379  }
380 
381  /* Pad output to a 32-bit boundary */
382  count = put_bits_count(&pb) & 0x1F;
383 
384  if (count)
385  put_bits(&pb, 32 - count, 0);
386 
387  /* Flush the rest with zeroes */
388  flush_put_bits(&pb);
389 
390  /* Return the amount of bytes written */
391  return put_bytes_output(&pb);
392 }
393 
394 static int encode_plane(AVCodecContext *avctx, const uint8_t *src,
395  uint8_t *dst, ptrdiff_t stride, int plane_no,
396  int width, int height, PutByteContext *pb)
397 {
398  UtvideoContext *c = avctx->priv_data;
399  uint8_t lengths[256];
400  uint64_t counts[256] = { 0 };
401 
402  HuffEntry he[256];
403 
404  uint32_t offset = 0, slice_len = 0;
405  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
406  int i, sstart, send = 0;
407  int symbol;
408  int ret;
409 
410  /* Do prediction / make planes */
411  switch (c->frame_pred) {
412  case PRED_NONE:
413  for (i = 0; i < c->slices; i++) {
414  sstart = send;
415  send = height * (i + 1) / c->slices & cmask;
416  av_image_copy_plane(dst + sstart * width, width,
417  src + sstart * stride, stride,
418  width, send - sstart);
419  }
420  break;
421  case PRED_LEFT:
422  for (i = 0; i < c->slices; i++) {
423  sstart = send;
424  send = height * (i + 1) / c->slices & cmask;
425  c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart);
426  }
427  break;
428  case PRED_MEDIAN:
429  for (i = 0; i < c->slices; i++) {
430  sstart = send;
431  send = height * (i + 1) / c->slices & cmask;
432  median_predict(c, src + sstart * stride, dst + sstart * width,
433  stride, width, send - sstart);
434  }
435  break;
436  default:
437  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
438  c->frame_pred);
440  }
441 
442  /* Count the usage of values */
443  count_usage(dst, width, height, counts);
444 
445  /* Check for a special case where only one symbol was used */
446  for (symbol = 0; symbol < 256; symbol++) {
447  /* If non-zero count is found, see if it matches width * height */
448  if (counts[symbol]) {
449  /* Special case if only one symbol was used */
450  if (counts[symbol] == width * (int64_t)height) {
451  /*
452  * Write a zero for the single symbol
453  * used in the plane, else 0xFF.
454  */
455  for (i = 0; i < 256; i++) {
456  if (i == symbol)
457  bytestream2_put_byte(pb, 0);
458  else
459  bytestream2_put_byte(pb, 0xFF);
460  }
461 
462  /* Write zeroes for lengths */
463  for (i = 0; i < c->slices; i++)
464  bytestream2_put_le32(pb, 0);
465 
466  /* And that's all for that plane folks */
467  return 0;
468  }
469  break;
470  }
471  }
472 
473  /* Calculate huffman lengths */
474  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
475  return ret;
476 
477  /*
478  * Write the plane's header into the output packet:
479  * - huffman code lengths (256 bytes)
480  * - slice end offsets (gotten from the slice lengths)
481  */
482  for (i = 0; i < 256; i++) {
483  bytestream2_put_byte(pb, lengths[i]);
484 
485  he[i].len = lengths[i];
486  he[i].sym = i;
487  }
488 
489  /* Calculate the huffman codes themselves */
490  calculate_codes(he);
491 
492  send = 0;
493  for (i = 0; i < c->slices; i++) {
494  sstart = send;
495  send = height * (i + 1) / c->slices & cmask;
496 
497  /*
498  * Write the huffman codes to a buffer,
499  * get the offset in bytes.
500  */
501  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
502  width * height + 4, width,
503  send - sstart, he);
504 
505  slice_len = offset - slice_len;
506 
507  /* Byteswap the written huffman codes */
508  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
509  (uint32_t *) c->slice_bits,
510  slice_len >> 2);
511 
512  /* Write the offset to the stream */
513  bytestream2_put_le32(pb, offset);
514 
515  /* Seek to the data part of the packet */
516  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
517  offset - slice_len, SEEK_CUR);
518 
519  /* Write the slices' data into the output packet */
520  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
521 
522  /* Seek back to the slice offsets */
523  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
524  SEEK_CUR);
525 
526  slice_len = offset;
527  }
528 
529  /* And at the end seek to the end of written slice(s) */
530  bytestream2_seek_p(pb, offset, SEEK_CUR);
531 
532  return 0;
533 }
534 
536  const AVFrame *pic, int *got_packet)
537 {
538  UtvideoContext *c = avctx->priv_data;
539  PutByteContext pb;
540 
541  uint32_t frame_info;
542 
543  uint8_t *dst;
544 
545  int width = avctx->width, height = avctx->height;
546  int i, ret = 0;
547 
548  /* Allocate a new packet if needed, and set it to the pointer dst */
549  ret = ff_alloc_packet(avctx, pkt, (256 + 4 * c->slices + width * height)
550  * c->planes + 4);
551 
552  if (ret < 0)
553  return ret;
554 
555  dst = pkt->data;
556 
558 
559  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
560 
561  if (!c->slice_bits) {
562  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
563  return AVERROR(ENOMEM);
564  }
565 
566  /* In case of RGB, mangle the planes to Ut Video's format */
567  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP)
568  mangle_rgb_planes(c->slice_buffer, c->slice_stride, pic->data,
569  c->planes, pic->linesize, width, height);
570 
571  /* Deal with the planes */
572  switch (avctx->pix_fmt) {
573  case AV_PIX_FMT_GBRP:
574  case AV_PIX_FMT_GBRAP:
575  for (i = 0; i < c->planes; i++) {
576  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
577  c->slice_buffer[i], c->slice_stride, i,
578  width, height, &pb);
579 
580  if (ret) {
581  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
582  return ret;
583  }
584  }
585  break;
586  case AV_PIX_FMT_YUV444P:
587  for (i = 0; i < c->planes; i++) {
588  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
589  pic->linesize[i], i, width, height, &pb);
590 
591  if (ret) {
592  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
593  return ret;
594  }
595  }
596  break;
597  case AV_PIX_FMT_YUV422P:
598  for (i = 0; i < c->planes; i++) {
599  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
600  pic->linesize[i], i, width >> !!i, height, &pb);
601 
602  if (ret) {
603  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
604  return ret;
605  }
606  }
607  break;
608  case AV_PIX_FMT_YUV420P:
609  for (i = 0; i < c->planes; i++) {
610  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
611  pic->linesize[i], i, width >> !!i, height >> !!i,
612  &pb);
613 
614  if (ret) {
615  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
616  return ret;
617  }
618  }
619  break;
620  default:
621  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
622  avctx->pix_fmt);
623  return AVERROR_INVALIDDATA;
624  }
625 
626  /*
627  * Write frame information (LE 32-bit unsigned)
628  * into the output packet.
629  * Contains the prediction method.
630  */
631  frame_info = c->frame_pred << 8;
632  bytestream2_put_le32(&pb, frame_info);
633 
634  pkt->size = bytestream2_tell_p(&pb);
635 
636  /* Packet should be done */
637  *got_packet = 1;
638 
639  return 0;
640 }
641 
642 #define OFFSET(x) offsetof(UtvideoContext, x)
643 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
644 static const AVOption options[] = {
645 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, .unit = "pred" },
646  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, .unit = "pred" },
647  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, .unit = "pred" },
648  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, .unit = "pred" },
649 
650  { NULL},
651 };
652 
653 static const AVClass utvideo_class = {
654  .class_name = "utvideo",
655  .item_name = av_default_item_name,
656  .option = options,
657  .version = LIBAVUTIL_VERSION_INT,
658 };
659 
661  .p.name = "utvideo",
662  CODEC_LONG_NAME("Ut Video"),
663  .p.type = AVMEDIA_TYPE_VIDEO,
664  .p.id = AV_CODEC_ID_UTVIDEO,
665  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
667  .priv_data_size = sizeof(UtvideoContext),
668  .p.priv_class = &utvideo_class,
669  .init = utvideo_encode_init,
671  .close = utvideo_encode_close,
674  .color_ranges = AVCOL_RANGE_MPEG,
675  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
676 };
A
#define A(x)
Definition: vpx_arith.h:28
utvideo.h
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:386
bswapdsp.h
frame_info
static int FUNC() frame_info(CodedBitstreamContext *ctx, RWContext *rw, APVRawFrameInfo *current)
Definition: cbs_apv_syntax_template.c:63
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
PRED_NONE
@ PRED_NONE
Definition: utvideo.h:33
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:422
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:99
UTVIDEO_RGB
@ UTVIDEO_RGB
Definition: utvideo.h:52
HuffEntry::len
uint8_t len
Definition: exr.c:97
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
AVPacket::data
uint8_t * data
Definition: packet.h:535
count_usage
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:329
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:36
FFCodec
Definition: codec_internal.h:127
UtvideoContext::slices
int slices
Definition: utvideodec.c:54
utvideo_encode_frame
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:535
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
write_huff_codes
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:364
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:83
UTVIDEO_422
@ UTVIDEO_422
Definition: utvideo.h:55
COMP_HUFF
@ COMP_HUFF
Definition: utvideo.h:41
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:353
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:90
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
intreadwrite.h
huff_cmp_sym
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:73
g
const char * g
Definition: vf_curves.c:128
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
HuffEntry::sym
uint16_t sym
Definition: exr.c:98
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
B
#define B
Definition: huffyuv.h:42
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:205
mangle_rgb_planes
static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, uint8_t *const src[4], int planes, const int stride[4], int width, int height)
Definition: utvideoenc.c:250
UTVIDEO_RGBA
@ UTVIDEO_RGBA
Definition: utvideo.h:53
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
ff_utvideo_encoder
const FFCodec ff_utvideo_encoder
Definition: utvideoenc.c:660
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:109
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
UtvideoContext::slice_bits
uint8_t * slice_bits
Definition: utvideodec.c:61
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:60
UtvideoContext::frame_pred
int frame_pred
Definition: utvideodec.c:57
options
Definition: swscale.c:43
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:35
UTVIDEO_444
@ UTVIDEO_444
Definition: utvideo.h:56
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
utvideo_encode_init
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:91
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:100
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
PutByteContext
Definition: bytestream.h:37
UtvideoContext::slice_stride
ptrdiff_t slice_stride
Definition: utvideoenc.c:54
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:536
height
#define height
Definition: dsp.h:89
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
VE
#define VE
Definition: utvideoenc.c:643
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
utvideo_encode_close
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:79
ut_huff_cmp_len
static int ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideoenc.c:66
lossless_videoencdsp.h
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1546
options
static const AVOption options[]
Definition: utvideoenc.c:644
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
planes
static const struct @500 planes[]
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
UtvideoContext
Definition: utvideodec.c:46
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:733
avcodec.h
median_predict
static void median_predict(UtvideoContext *c, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: utvideoenc.c:295
stride
#define stride
Definition: h264pred_template.c:536
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
HuffEntry::code
uint32_t code
Definition: exr.c:99
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
U
#define U(x)
Definition: vpx_arith.h:37
UtvideoContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: utvideoenc.c:46
UtvideoContext::frame_info_size
uint32_t frame_info_size
Definition: utvideodec.c:52
UtvideoContext::slice_buffer
uint8_t * slice_buffer[4]
Definition: utvideoenc.c:55
AVCodecContext
main external API structure.
Definition: avcodec.h:431
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
huffman.h
HuffEntry
Definition: exr.c:96
UtvideoContext::compression
int compression
Definition: utvideodec.c:55
UtvideoContext::flags
uint32_t flags
Definition: utvideodec.c:52
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
encode_plane
static int encode_plane(AVCodecContext *avctx, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:394
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
utvideo_class
static const AVClass utvideo_class
Definition: utvideoenc.c:653
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
AVPacket
This structure stores compressed data.
Definition: packet.h:512
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
UtvideoContext::slice_bits_size
int slice_bits_size
Definition: utvideodec.c:62
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
UtvideoContext::bdsp
BswapDSPContext bdsp
Definition: utvideodec.c:49
bytestream.h
imgutils.h
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:34
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
BswapDSPContext
Definition: bswapdsp.h:24
width
#define width
Definition: dsp.h:89
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:675
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
UtvideoContext::planes
int planes
Definition: utvideodec.c:53
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
calculate_codes
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:343
src
#define src
Definition: vp8dsp.c:248
UTVIDEO_420
@ UTVIDEO_420
Definition: utvideo.h:54
OFFSET
#define OFFSET(x)
Definition: utvideoenc.c:642