FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include <float.h>
37 
38 #include "libavutil/attributes.h"
39 #include "libavutil/avstring.h"
40 #include "libavutil/error.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/mem.h"
43 #include "libavutil/opt.h"
44 #include "libavutil/reverse.h"
45 #include "avcodec.h"
46 #include "bytestream.h"
47 #include "codec_internal.h"
48 #include "decode.h"
49 #include "faxcompr.h"
50 #include "lzw.h"
51 #include "tiff.h"
52 #include "tiff_common.h"
53 #include "tiff_data.h"
54 #include "mjpegdec.h"
55 #include "thread.h"
56 #include "get_bits.h"
57 
58 typedef struct TiffContext {
59  AVClass *class;
62 
63  /* JPEG decoding for DNG */
64  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
65  AVPacket *jpkt; // encoded JPEG tile
66  AVFrame *jpgframe; // decoded JPEG tile
67 
69  uint16_t get_page;
71 
73  int width, height;
74  unsigned int bpp, bppcount;
75  uint32_t palette[256];
77  int le;
80  int planar;
81  int subsampling[2];
82  int fax_opts;
83  int predictor;
85  uint32_t res[4];
87  unsigned last_tag;
88 
89  int is_bayer;
91  uint8_t pattern[4];
92 
93  float analog_balance[4];
94  float as_shot_neutral[4];
95  float as_shot_white[4];
96  float color_matrix[3][4];
97  float camera_calibration[4][4];
98  float premultiply[4];
99  float black_level[4];
100 
101  unsigned white_level;
102  uint16_t dng_lut[65536];
103 
104  uint32_t sub_ifd;
105  uint16_t cur_page;
106 
108  int sot;
111 
112  /* Tile support */
113  int is_tiled;
116 
117  int is_jpeg;
118 
119  uint8_t *deinvert_buf;
121  uint8_t *yuv_line;
122  unsigned int yuv_line_size;
123 
126 } TiffContext;
127 
128 static const float d65_white[3] = { 0.950456f, 1.f, 1.088754f };
129 
130 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
131  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
132  s->tiff_type = tiff_type;
133 }
134 
135 static void free_geotags(TiffContext *const s)
136 {
137  for (int i = 0; i < s->geotag_count; i++)
138  av_freep(&s->geotags[i].val);
139  av_freep(&s->geotags);
140  s->geotag_count = 0;
141 }
142 
143 static const char *get_geokey_name(int key)
144 {
145 #define RET_GEOKEY_STR(TYPE, array)\
146  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
147  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
148  return tiff_##array##_name_type_string + tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].offset;
149 
150  RET_GEOKEY_STR(VERT, vert);
151  RET_GEOKEY_STR(PROJ, proj);
152  RET_GEOKEY_STR(GEOG, geog);
153  RET_GEOKEY_STR(CONF, conf);
154 
155  return NULL;
156 }
157 
158 static int get_geokey_type(int key)
159 {
160 #define RET_GEOKEY_TYPE(TYPE, array)\
161  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
162  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
163  return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].type;
164  RET_GEOKEY_TYPE(VERT, vert);
165  RET_GEOKEY_TYPE(PROJ, proj);
166  RET_GEOKEY_TYPE(GEOG, geog);
167  RET_GEOKEY_TYPE(CONF, conf);
168 
169  return AVERROR_INVALIDDATA;
170 }
171 
172 static int cmp_id_key(const void *id, const void *k)
173 {
174  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
175 }
176 
177 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
178 {
179  const TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
180  if(r)
181  return r->name;
182 
183  return NULL;
184 }
185 
186 static const char *get_geokey_val(int key, uint16_t val)
187 {
189  return "undefined";
191  return "User-Defined";
192 
193 #define RET_GEOKEY_VAL(TYPE, array)\
194  if (val >= TIFF_##TYPE##_OFFSET &&\
195  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
196  return tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET];
197 
198  switch (key) {
200  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
201  break;
203  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
204  break;
208  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
209  break;
212  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
213  break;
215  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
216  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
217  break;
219  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
220  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
221  break;
223  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
224  break;
226  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
227  break;
233  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
234  break;
236  RET_GEOKEY_VAL(VERT_CS, vert_cs);
237  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
238  break;
239 
240  }
241 
242  return NULL;
243 }
244 
245 static char *doubles2str(double *dp, int count, const char *sep)
246 {
247  int i;
248  char *ap, *ap0;
249  uint64_t component_len;
250  if (!sep) sep = ", ";
251  component_len = 24LL + strlen(sep);
252  if (count >= (INT_MAX - 1)/component_len)
253  return NULL;
254  ap = av_malloc(component_len * count + 1);
255  if (!ap)
256  return NULL;
257  ap0 = ap;
258  ap[0] = '\0';
259  for (i = 0; i < count; i++) {
260  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
261  if(l >= component_len) {
262  av_free(ap0);
263  return NULL;
264  }
265  ap += l;
266  }
267  ap0[strlen(ap0) - strlen(sep)] = '\0';
268  return ap0;
269 }
270 
271 static int add_metadata(int count, int type,
272  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
273 {
274  switch(type) {
275  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
276  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
277  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
278  default : return AVERROR_INVALIDDATA;
279  };
280 }
281 
282 /**
283  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
284  */
285 static uint16_t av_always_inline dng_process_color16(uint16_t value,
286  const uint16_t *lut,
287  float black_level,
288  float scale_factor)
289 {
290  float value_norm;
291 
292  // Lookup table lookup
293  value = lut[value];
294 
295  // Black level subtraction
296  // Color scaling
297  value_norm = ((float)value - black_level) * scale_factor;
298 
299  value = av_clip_uint16(lrintf(value_norm));
300 
301  return value;
302 }
303 
304 static uint16_t av_always_inline dng_process_color8(uint16_t value,
305  const uint16_t *lut,
306  float black_level,
307  float scale_factor)
308 {
309  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
310 }
311 
312 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
313  const uint8_t *src, int src_stride, int width, int height,
314  int is_single_comp, int is_u16, int odd_line)
315 {
316  float scale_factor[4];
317  int line, col;
318 
319  if (s->is_bayer) {
320  for (int i = 0; i < 4; i++)
321  scale_factor[i] = s->premultiply[s->pattern[i]] * 65535.f / (s->white_level - s->black_level[i]);
322  } else {
323  for (int i = 0; i < 4; i++)
324  scale_factor[i] = s->premultiply[ i ] * 65535.f / (s->white_level - s->black_level[i]);
325  }
326 
327  if (is_single_comp) {
328  if (!is_u16)
329  return; /* <= 8bpp unsupported */
330 
331  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
332  (split vertically in the middle). */
333  for (line = 0; line < height / 2; line++) {
334  uint16_t *dst_u16 = (uint16_t *)dst;
335  const uint16_t *src_u16 = (const uint16_t *)src;
336 
337  /* Blit first half of input row row to initial row of output */
338  for (col = 0; col < width; col++)
339  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[col&1], scale_factor[col&1]);
340 
341  /* Advance the destination pointer by a row (source pointer remains in the same place) */
342  dst += dst_stride * sizeof(uint16_t);
343  dst_u16 = (uint16_t *)dst;
344 
345  /* Blit second half of input row row to next row of output */
346  for (col = 0; col < width; col++)
347  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[(col&1) + 2], scale_factor[(col&1) + 2]);
348 
349  dst += dst_stride * sizeof(uint16_t);
350  src += src_stride * sizeof(uint16_t);
351  }
352  } else {
353  /* Input and output image are the same size and the MJpeg decoder has done per-component
354  deinterleaving, so blitting here is straightforward. */
355  if (is_u16) {
356  for (line = 0; line < height; line++) {
357  uint16_t *dst_u16 = (uint16_t *)dst;
358  const uint16_t *src_u16 = (const uint16_t *)src;
359 
360  for (col = 0; col < width; col++)
361  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut,
362  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
363  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
364 
365  dst += dst_stride * sizeof(uint16_t);
366  src += src_stride * sizeof(uint16_t);
367  }
368  } else {
369  for (line = 0; line < height; line++) {
370  uint8_t *dst_u8 = dst;
371  const uint8_t *src_u8 = src;
372 
373  for (col = 0; col < width; col++)
374  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut,
375  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
376  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
377 
378  dst += dst_stride;
379  src += src_stride;
380  }
381  }
382  }
383 }
384 
386  unsigned int bpp, uint8_t* dst,
387  int usePtr, const uint8_t *src,
388  uint8_t c, int width, int offset)
389 {
390  switch (bpp) {
391  case 1:
392  while (--width >= 0) {
393  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
394  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
395  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
396  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
397  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
398  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
399  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
400  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
401  }
402  break;
403  case 2:
404  while (--width >= 0) {
405  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
406  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
407  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
408  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
409  }
410  break;
411  case 4:
412  while (--width >= 0) {
413  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
414  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
415  }
416  break;
417  case 10:
418  case 12:
419  case 14: {
420  uint16_t *dst16 = (uint16_t *)dst;
421  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
422  uint8_t shift = is_dng ? 0 : 16 - bpp;
423  GetBitContext gb;
424 
425  init_get_bits8(&gb, src, width);
426  for (int i = 0; i < s->width; i++) {
427  dst16[i] = get_bits(&gb, bpp) << shift;
428  }
429  }
430  break;
431  default:
432  if (usePtr) {
433  memcpy(dst + offset, src, width);
434  } else {
435  memset(dst + offset, c, width);
436  }
437  }
438 }
439 
440 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
441 {
442  int i;
443 
444  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
445  if (!s->deinvert_buf)
446  return AVERROR(ENOMEM);
447  for (i = 0; i < size; i++)
448  s->deinvert_buf[i] = ff_reverse[src[i]];
449 
450  return 0;
451 }
452 
453 static void unpack_gray(TiffContext *s, AVFrame *p,
454  const uint8_t *src, int lnum, int width, int bpp)
455 {
456  GetBitContext gb;
457  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
458 
459  init_get_bits8(&gb, src, width);
460 
461  for (int i = 0; i < s->width; i++) {
462  dst[i] = get_bits(&gb, bpp);
463  }
464 }
465 
466 static void unpack_yuv(TiffContext *s, AVFrame *p,
467  const uint8_t *src, int lnum)
468 {
469  int i, j, k;
470  int w = (s->width - 1) / s->subsampling[0] + 1;
471  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
472  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
473  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
474  for (i = 0; i < w; i++) {
475  for (j = 0; j < s->subsampling[1]; j++)
476  for (k = 0; k < s->subsampling[0]; k++)
477  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
478  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
479  *pu++ = *src++;
480  *pv++ = *src++;
481  }
482  }else{
483  for (i = 0; i < w; i++) {
484  for (j = 0; j < s->subsampling[1]; j++)
485  for (k = 0; k < s->subsampling[0]; k++)
486  p->data[0][(lnum + j) * p->linesize[0] +
487  i * s->subsampling[0] + k] = *src++;
488  *pu++ = *src++;
489  *pv++ = *src++;
490  }
491  }
492 }
493 
494 #if CONFIG_ZLIB
495 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
496  int size)
497 {
498  z_stream zstream = { 0 };
499  int zret;
500 
501  zstream.next_in = src;
502  zstream.avail_in = size;
503  zstream.next_out = dst;
504  zstream.avail_out = *len;
505  zret = inflateInit(&zstream);
506  if (zret != Z_OK) {
507  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
508  return zret;
509  }
510  zret = inflate(&zstream, Z_SYNC_FLUSH);
511  inflateEnd(&zstream);
512  *len = zstream.total_out;
513  return zret == Z_STREAM_END ? Z_OK : zret;
514 }
515 
516 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
517  const uint8_t *src, int size, int width, int lines,
518  int strip_start, int is_yuv)
519 {
520  uint8_t *zbuf;
521  unsigned long outlen;
522  int ret, line;
523  outlen = width * lines;
524  zbuf = av_malloc(outlen);
525  if (!zbuf)
526  return AVERROR(ENOMEM);
527  if (s->fill_order) {
528  if ((ret = deinvert_buffer(s, src, size)) < 0) {
529  av_free(zbuf);
530  return ret;
531  }
532  src = s->deinvert_buf;
533  }
534  ret = tiff_uncompress(zbuf, &outlen, src, size);
535  if (ret != Z_OK) {
536  av_log(s->avctx, AV_LOG_ERROR,
537  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
538  (unsigned long)width * lines, ret);
539  av_free(zbuf);
540  return AVERROR_UNKNOWN;
541  }
542  src = zbuf;
543  for (line = 0; line < lines; line++) {
544  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
545  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
546  } else {
547  memcpy(dst, src, width);
548  }
549  if (is_yuv) {
550  unpack_yuv(s, p, dst, strip_start + line);
551  line += s->subsampling[1] - 1;
552  }
553  dst += stride;
554  src += width;
555  }
556  av_free(zbuf);
557  return 0;
558 }
559 #endif
560 
561 #if CONFIG_LZMA
562 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
563  int size)
564 {
565  lzma_stream stream = LZMA_STREAM_INIT;
566  lzma_ret ret;
567 
568  stream.next_in = src;
569  stream.avail_in = size;
570  stream.next_out = dst;
571  stream.avail_out = *len;
572  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
573  if (ret != LZMA_OK) {
574  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
575  return ret;
576  }
577  ret = lzma_code(&stream, LZMA_RUN);
578  lzma_end(&stream);
579  *len = stream.total_out;
580  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
581 }
582 
583 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
584  const uint8_t *src, int size, int width, int lines,
585  int strip_start, int is_yuv)
586 {
587  uint64_t outlen = width * (uint64_t)lines;
588  int ret, line;
589  uint8_t *buf = av_malloc(outlen);
590  if (!buf)
591  return AVERROR(ENOMEM);
592  if (s->fill_order) {
593  if ((ret = deinvert_buffer(s, src, size)) < 0) {
594  av_free(buf);
595  return ret;
596  }
597  src = s->deinvert_buf;
598  }
599  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
600  if (ret != LZMA_OK) {
601  av_log(s->avctx, AV_LOG_ERROR,
602  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
603  (uint64_t)width * lines, ret);
604  av_free(buf);
605  return AVERROR_UNKNOWN;
606  }
607  src = buf;
608  for (line = 0; line < lines; line++) {
609  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
610  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
611  } else {
612  memcpy(dst, src, width);
613  }
614  if (is_yuv) {
615  unpack_yuv(s, p, dst, strip_start + line);
616  line += s->subsampling[1] - 1;
617  }
618  dst += stride;
619  src += width;
620  }
621  av_free(buf);
622  return 0;
623 }
624 #endif
625 
626 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
627  const uint8_t *src, int size, int width, int lines)
628 {
629  int line;
630  int ret;
631 
632  if (s->fill_order) {
633  if ((ret = deinvert_buffer(s, src, size)) < 0)
634  return ret;
635  src = s->deinvert_buf;
636  }
637  ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
638  s->compr, s->fax_opts);
639  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
640  for (line = 0; line < lines; line++) {
641  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
642  dst += stride;
643  }
644  return ret;
645 }
646 
648  int tile_byte_count, int dst_x, int dst_y, int w, int h)
649 {
650  TiffContext *s = avctx->priv_data;
651  uint8_t *dst_data, *src_data;
652  uint32_t dst_offset; /* offset from dst buffer in pixels */
653  int is_single_comp, is_u16, pixel_size;
654  int ret;
655 
656  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
657  return AVERROR_INVALIDDATA;
658 
659  /* Prepare a packet and send to the MJPEG decoder */
660  av_packet_unref(s->jpkt);
661  s->jpkt->data = (uint8_t*)s->gb.buffer;
662  s->jpkt->size = tile_byte_count;
663 
664  if (s->is_bayer) {
665  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
666  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
667  image or not from its own data (and we need that information when decoding it). */
668  mjpegdecctx->bayer = 1;
669  }
670 
671  ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
672  if (ret < 0) {
673  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
674  return ret;
675  }
676 
677  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
678  if (ret < 0) {
679  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
680 
681  /* Normally skip, error if explode */
682  if (avctx->err_recognition & AV_EF_EXPLODE)
683  return AVERROR_INVALIDDATA;
684  else
685  return 0;
686  }
687 
688  is_u16 = (s->bpp > 8);
689 
690  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
691 
692  if (s->jpgframe->width != s->avctx_mjpeg->width ||
693  s->jpgframe->height != s->avctx_mjpeg->height ||
694  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
695  return AVERROR_INVALIDDATA;
696 
697  /* See dng_blit for explanation */
698  if (s->avctx_mjpeg->width == w * 2 &&
699  s->avctx_mjpeg->height == h / 2 &&
700  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
701  is_single_comp = 1;
702  } else if (s->avctx_mjpeg->width >= w &&
703  s->avctx_mjpeg->height >= h &&
704  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
705  ) {
706  is_single_comp = 0;
707  } else
708  return AVERROR_INVALIDDATA;
709 
710  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
711 
712  if (is_single_comp && !is_u16) {
713  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
714  av_frame_unref(s->jpgframe);
715  return AVERROR_PATCHWELCOME;
716  }
717 
718  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
719  dst_data = frame->data[0] + dst_offset * pixel_size;
720  src_data = s->jpgframe->data[0];
721 
722  dng_blit(s,
723  dst_data,
724  frame->linesize[0] / pixel_size,
725  src_data,
726  s->jpgframe->linesize[0] / pixel_size,
727  w,
728  h,
729  is_single_comp,
730  is_u16, 0);
731 
732  av_frame_unref(s->jpgframe);
733 
734  return 0;
735 }
736 
737 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
738  const uint8_t *src, int size, int strip_start, int lines)
739 {
740  PutByteContext pb;
741  int c, line, pixels, code, ret;
742  const uint8_t *ssrc = src;
743  int width = ((s->width * s->bpp) + 7) >> 3;
745  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
746  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
747  desc->nb_components >= 3;
748  int is_dng;
749 
750  if (s->planar)
751  width /= s->bppcount;
752 
753  if (size <= 0)
754  return AVERROR_INVALIDDATA;
755 
756  if (is_yuv) {
757  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
758  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
759  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
760  if (s->yuv_line == NULL) {
761  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
762  return AVERROR(ENOMEM);
763  }
764  dst = s->yuv_line;
765  stride = 0;
766 
767  width = (s->width - 1) / s->subsampling[0] + 1;
768  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
769  av_assert0(width <= bytes_per_row);
770  av_assert0(s->bpp == 24);
771  }
772  if (s->is_bayer) {
773  av_assert0(width == (s->bpp * s->width + 7) >> 3);
774  }
775  av_assert0(!(s->is_bayer && is_yuv));
776  if (p->format == AV_PIX_FMT_GRAY12) {
777  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
778  if (s->yuv_line == NULL) {
779  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
780  return AVERROR(ENOMEM);
781  }
782  dst = s->yuv_line;
783  stride = 0;
784  }
785 
786  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
787 #if CONFIG_ZLIB
788  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
789  strip_start, is_yuv);
790 #else
791  av_log(s->avctx, AV_LOG_ERROR,
792  "zlib support not enabled, "
793  "deflate compression not supported\n");
794  return AVERROR(ENOSYS);
795 #endif
796  }
797  if (s->compr == TIFF_LZMA) {
798 #if CONFIG_LZMA
799  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
800  strip_start, is_yuv);
801 #else
802  av_log(s->avctx, AV_LOG_ERROR,
803  "LZMA support not enabled\n");
804  return AVERROR(ENOSYS);
805 #endif
806  }
807  if (s->compr == TIFF_LZW) {
808  if (s->fill_order) {
809  if ((ret = deinvert_buffer(s, src, size)) < 0)
810  return ret;
811  ssrc = src = s->deinvert_buf;
812  }
813  if (size > 1 && !src[0] && (src[1]&1)) {
814  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
815  }
816  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
817  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
818  return ret;
819  }
820  for (line = 0; line < lines; line++) {
821  pixels = ff_lzw_decode(s->lzw, dst, width);
822  if (pixels < width) {
823  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
824  pixels, width);
825  return AVERROR_INVALIDDATA;
826  }
827  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
828  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
829  if (is_yuv) {
830  unpack_yuv(s, p, dst, strip_start + line);
831  line += s->subsampling[1] - 1;
832  } else if (p->format == AV_PIX_FMT_GRAY12) {
833  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
834  }
835  dst += stride;
836  }
837  return 0;
838  }
839  if (s->compr == TIFF_CCITT_RLE ||
840  s->compr == TIFF_G3 ||
841  s->compr == TIFF_G4) {
842  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
843  return AVERROR_INVALIDDATA;
844 
845  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
846  }
847 
848  bytestream2_init(&s->gb, src, size);
849  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
850 
851  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
852 
853  /* Decode JPEG-encoded DNGs with strips */
854  if (s->compr == TIFF_NEWJPEG && is_dng) {
855  if (s->strips > 1) {
856  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
857  return AVERROR_PATCHWELCOME;
858  }
859  if (!s->is_bayer)
860  return AVERROR_PATCHWELCOME;
861  if ((ret = dng_decode_jpeg(s->avctx, p, s->stripsize, 0, 0, s->width, s->height)) < 0)
862  return ret;
863  return 0;
864  }
865 
866  if (is_dng && stride == 0)
867  return AVERROR_INVALIDDATA;
868 
869  for (line = 0; line < lines; line++) {
870  if (src - ssrc > size) {
871  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
872  return AVERROR_INVALIDDATA;
873  }
874 
875  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
876  break;
877  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
878  switch (s->compr) {
879  case TIFF_RAW:
880  if (ssrc + size - src < width)
881  return AVERROR_INVALIDDATA;
882 
883  if (!s->fill_order) {
884  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
885  dst, 1, src, 0, width, 0);
886  } else {
887  int i;
888  for (i = 0; i < width; i++)
889  dst[i] = ff_reverse[src[i]];
890  }
891 
892  /* Color processing for DNG images with uncompressed strips (non-tiled) */
893  if (is_dng) {
894  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
895 
896  is_u16 = (s->bpp / s->bppcount > 8);
897  pixel_size_bits = (is_u16 ? 16 : 8);
898  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
899 
900  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
901  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
902  dng_blit(s,
903  dst,
904  0, // no stride, only 1 line
905  dst,
906  0, // no stride, only 1 line
907  elements,
908  1,
909  0, // single-component variation is only preset in JPEG-encoded DNGs
910  is_u16,
911  (line + strip_start)&1);
912  }
913 
914  src += width;
915  break;
916  case TIFF_PACKBITS:
917  for (pixels = 0; pixels < width;) {
918  if (ssrc + size - src < 2) {
919  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
920  return AVERROR_INVALIDDATA;
921  }
922  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
923  if (code >= 0) {
924  code++;
925  if (pixels + code > width ||
926  ssrc + size - src < code) {
927  av_log(s->avctx, AV_LOG_ERROR,
928  "Copy went out of bounds\n");
929  return AVERROR_INVALIDDATA;
930  }
931  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
932  dst, 1, src, 0, code, pixels);
933  src += code;
934  pixels += code;
935  } else if (code != -128) { // -127..-1
936  code = (-code) + 1;
937  if (pixels + code > width) {
938  av_log(s->avctx, AV_LOG_ERROR,
939  "Run went out of bounds\n");
940  return AVERROR_INVALIDDATA;
941  }
942  c = *src++;
943  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
944  dst, 0, NULL, c, code, pixels);
945  pixels += code;
946  }
947  }
948  if (s->fill_order) {
949  int i;
950  for (i = 0; i < width; i++)
951  dst[i] = ff_reverse[dst[i]];
952  }
953  break;
954  }
955  if (is_yuv) {
956  unpack_yuv(s, p, dst, strip_start + line);
957  line += s->subsampling[1] - 1;
958  } else if (p->format == AV_PIX_FMT_GRAY12) {
959  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
960  }
961  dst += stride;
962  }
963  return 0;
964 }
965 
967  const AVPacket *avpkt)
968 {
969  TiffContext *s = avctx->priv_data;
970  int tile_idx;
971  int tile_offset_offset, tile_offset;
972  int tile_byte_count_offset, tile_byte_count;
973  int tile_count_x, tile_count_y;
974  int tile_width, tile_length;
975  int has_width_leftover, has_height_leftover;
976  int tile_x = 0, tile_y = 0;
977  int pos_x = 0, pos_y = 0;
978  int ret;
979 
980  if (s->tile_width <= 0 || s->tile_length <= 0)
981  return AVERROR_INVALIDDATA;
982 
983  has_width_leftover = (s->width % s->tile_width != 0);
984  has_height_leftover = (s->height % s->tile_length != 0);
985 
986  /* Calculate tile counts (round up) */
987  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
988  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
989 
990  /* Iterate over the number of tiles */
991  for (tile_idx = 0; tile_idx < tile_count_x * tile_count_y; tile_idx++) {
992  tile_x = tile_idx % tile_count_x;
993  tile_y = tile_idx / tile_count_x;
994 
995  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
996  tile_width = s->width % s->tile_width;
997  else
998  tile_width = s->tile_width;
999 
1000  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1001  tile_length = s->height % s->tile_length;
1002  else
1003  tile_length = s->tile_length;
1004 
1005  /* Read tile offset */
1006  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1007  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1008  tile_offset = ff_tget_long(&s->gb, s->le);
1009 
1010  /* Read tile byte size */
1011  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1012  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1013  tile_byte_count = ff_tget_long(&s->gb, s->le);
1014 
1015  /* Seek to tile data */
1016  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1017 
1018  /* Decode JPEG tile and copy it in the reference frame */
1019  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1020 
1021  if (ret < 0)
1022  return ret;
1023 
1024  /* Advance current positions */
1025  pos_x += tile_width;
1026  if (tile_x == tile_count_x - 1) { // If on the right edge
1027  pos_x = 0;
1028  pos_y += tile_length;
1029  }
1030  }
1031 
1032  /* Frame is ready to be output */
1033  frame->pict_type = AV_PICTURE_TYPE_I;
1034  frame->flags |= AV_FRAME_FLAG_KEY;
1035 
1036  return avpkt->size;
1037 }
1038 
1040 {
1041  int ret;
1042  int create_gray_palette = 0;
1043 
1044  // make sure there is no aliasing in the following switch
1045  if (s->bpp > 128 || s->bppcount >= 10) {
1046  av_log(s->avctx, AV_LOG_ERROR,
1047  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1048  s->bpp, s->bppcount);
1049  return AVERROR_INVALIDDATA;
1050  }
1051 
1052  switch (s->planar * 10000 + s->bpp * 10 + s->bppcount + s->is_bayer * 100000) {
1053  case 11:
1054  if (!s->palette_is_set) {
1055  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1056  break;
1057  }
1058  case 21:
1059  case 41:
1060  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1061  if (!s->palette_is_set) {
1062  create_gray_palette = 1;
1063  }
1064  break;
1065  case 81:
1066  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1067  break;
1068  case 121:
1069  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1070  break;
1071  case 100081:
1072  switch (AV_RL32(s->pattern)) {
1073  case 0x02010100:
1074  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1075  break;
1076  case 0x00010102:
1077  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1078  break;
1079  case 0x01000201:
1080  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1081  break;
1082  case 0x01020001:
1083  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1084  break;
1085  default:
1086  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1087  AV_RL32(s->pattern));
1088  return AVERROR_PATCHWELCOME;
1089  }
1090  break;
1091  case 100101:
1092  case 100121:
1093  case 100141:
1094  case 100161:
1095  switch (AV_RL32(s->pattern)) {
1096  case 0x02010100:
1097  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1098  break;
1099  case 0x00010102:
1100  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1101  break;
1102  case 0x01000201:
1103  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1104  break;
1105  case 0x01020001:
1106  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1107  break;
1108  default:
1109  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1110  AV_RL32(s->pattern));
1111  return AVERROR_PATCHWELCOME;
1112  }
1113  break;
1114  case 243:
1115  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1116  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1117  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1118  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1119  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1120  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1121  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1122  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1123  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1124  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1125  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1126  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1127  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1128  } else {
1129  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1130  return AVERROR_PATCHWELCOME;
1131  }
1132  } else
1133  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1134  break;
1135  case 161:
1136  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1137  break;
1138  case 162:
1139  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1140  break;
1141  case 322:
1142  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1143  break;
1144  case 324:
1145  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1146  break;
1147  case 405:
1148  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1149  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1150  else {
1151  av_log(s->avctx, AV_LOG_ERROR,
1152  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1153  return AVERROR_PATCHWELCOME;
1154  }
1155  break;
1156  case 483:
1157  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1158  break;
1159  case 644:
1160  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1161  break;
1162  case 10243:
1163  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1164  break;
1165  case 10324:
1166  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1167  break;
1168  case 10483:
1169  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1170  break;
1171  case 10644:
1172  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1173  break;
1174  case 963:
1175  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBF32LE : AV_PIX_FMT_RGBF32BE;
1176  break;
1177  case 1284:
1178  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBAF32LE : AV_PIX_FMT_RGBAF32BE;
1179  break;
1180  case 10963:
1181  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRPF32LE : AV_PIX_FMT_GBRPF32BE;
1182  break;
1183  case 11284:
1184  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAPF32LE : AV_PIX_FMT_GBRAPF32BE;
1185  break;
1186  default:
1187  av_log(s->avctx, AV_LOG_ERROR,
1188  "This format is not supported (bpp=%d, bppcount=%d)\n",
1189  s->bpp, s->bppcount);
1190  return AVERROR_INVALIDDATA;
1191  }
1192 
1193  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1194  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1195  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1196  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1197  desc->nb_components < 3) {
1198  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1199  return AVERROR_INVALIDDATA;
1200  }
1201  }
1202 
1203  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1204  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1205  if (ret < 0)
1206  return ret;
1207  }
1208 
1209  if (s->avctx->skip_frame >= AVDISCARD_ALL)
1210  return 0;
1211 
1212  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1213  return ret;
1214  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1215  if (!create_gray_palette)
1216  memcpy(frame->data[1], s->palette, sizeof(s->palette));
1217  else {
1218  /* make default grayscale pal */
1219  int i;
1220  uint32_t *pal = (uint32_t *)frame->data[1];
1221  for (i = 0; i < 1<<s->bpp; i++)
1222  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1223  }
1224  }
1225  return 1;
1226 }
1227 
1228 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1229 {
1230  int offset = tag == TIFF_YRES ? 2 : 0;
1231  s->res[offset++] = num;
1232  s->res[offset] = den;
1233  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1234  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1235  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1236  if (num > INT64_MAX || den > INT64_MAX) {
1237  num = num >> 1;
1238  den = den >> 1;
1239  }
1240  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1241  num, den, INT32_MAX);
1242  if (!s->avctx->sample_aspect_ratio.den)
1243  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1244  }
1245 }
1246 
1248 {
1249  AVFrameSideData *sd;
1250  GetByteContext gb_temp;
1251  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1252  int i, start;
1253  int pos;
1254  int ret;
1255  double *dp;
1256 
1257  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1258  if (ret < 0) {
1259  goto end;
1260  }
1261  if (tag <= s->last_tag)
1262  return AVERROR_INVALIDDATA;
1263 
1264  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1265  if (tag != TIFF_STRIP_SIZE)
1266  s->last_tag = tag;
1267 
1268  off = bytestream2_tell(&s->gb);
1269  if (count == 1) {
1270  switch (type) {
1271  case TIFF_BYTE:
1272  case TIFF_SHORT:
1273  case TIFF_LONG:
1274  value = ff_tget(&s->gb, type, s->le);
1275  break;
1276  case TIFF_RATIONAL:
1277  value = ff_tget_long(&s->gb, s->le);
1278  value2 = ff_tget_long(&s->gb, s->le);
1279  if (!value2) {
1280  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator in rational\n");
1281  value2 = 1;
1282  }
1283 
1284  break;
1285  case TIFF_STRING:
1286  if (count <= 4) {
1287  break;
1288  }
1289  default:
1290  value = UINT_MAX;
1291  }
1292  }
1293 
1294  switch (tag) {
1295  case TIFF_SUBFILE:
1296  s->is_thumbnail = (value != 0);
1297  break;
1298  case TIFF_WIDTH:
1299  s->width = value;
1300  break;
1301  case TIFF_HEIGHT:
1302  s->height = value;
1303  break;
1304  case TIFF_BPP:
1305  if (count > 5 || count <= 0) {
1306  av_log(s->avctx, AV_LOG_ERROR,
1307  "This format is not supported (bpp=%d, %d components)\n",
1308  value, count);
1309  return AVERROR_INVALIDDATA;
1310  }
1311  s->bppcount = count;
1312  if (count == 1)
1313  s->bpp = value;
1314  else {
1315  switch (type) {
1316  case TIFF_BYTE:
1317  case TIFF_SHORT:
1318  case TIFF_LONG:
1319  s->bpp = 0;
1320  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1321  return AVERROR_INVALIDDATA;
1322  for (i = 0; i < count; i++)
1323  s->bpp += ff_tget(&s->gb, type, s->le);
1324  break;
1325  default:
1326  s->bpp = -1;
1327  }
1328  }
1329  break;
1331  if (count != 1) {
1332  av_log(s->avctx, AV_LOG_ERROR,
1333  "Samples per pixel requires a single value, many provided\n");
1334  return AVERROR_INVALIDDATA;
1335  }
1336  if (value > 5 || value <= 0) {
1337  av_log(s->avctx, AV_LOG_ERROR,
1338  "Invalid samples per pixel %d\n", value);
1339  return AVERROR_INVALIDDATA;
1340  }
1341  if (s->bppcount == 1)
1342  s->bpp *= value;
1343  s->bppcount = value;
1344  break;
1345  case TIFF_COMPR:
1346  s->compr = value;
1347  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1348  s->predictor = 0;
1349  switch (s->compr) {
1350  case TIFF_RAW:
1351  case TIFF_PACKBITS:
1352  case TIFF_LZW:
1353  case TIFF_CCITT_RLE:
1354  break;
1355  case TIFF_G3:
1356  case TIFF_G4:
1357  s->fax_opts = 0;
1358  break;
1359  case TIFF_DEFLATE:
1360  case TIFF_ADOBE_DEFLATE:
1361 #if CONFIG_ZLIB
1362  break;
1363 #else
1364  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1365  return AVERROR(ENOSYS);
1366 #endif
1367  case TIFF_JPEG:
1368  case TIFF_NEWJPEG:
1369  s->is_jpeg = 1;
1370  break;
1371  case TIFF_LZMA:
1372 #if CONFIG_LZMA
1373  break;
1374 #else
1375  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1376  return AVERROR(ENOSYS);
1377 #endif
1378  default:
1379  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1380  s->compr);
1381  return AVERROR_INVALIDDATA;
1382  }
1383  break;
1384  case TIFF_ROWSPERSTRIP:
1385  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1386  value = s->height;
1387  s->rps = FFMIN(value, s->height);
1388  break;
1389  case TIFF_STRIP_OFFS:
1390  if (count == 1) {
1391  if (value > INT_MAX) {
1392  av_log(s->avctx, AV_LOG_ERROR,
1393  "strippos %u too large\n", value);
1394  return AVERROR_INVALIDDATA;
1395  }
1396  s->strippos = 0;
1397  s->stripoff = value;
1398  } else
1399  s->strippos = off;
1400  s->strips = count;
1401  if (s->strips == s->bppcount)
1402  s->rps = s->height;
1403  s->sot = type;
1404  break;
1405  case TIFF_STRIP_SIZE:
1406  if (count == 1) {
1407  if (value > INT_MAX) {
1408  av_log(s->avctx, AV_LOG_ERROR,
1409  "stripsize %u too large\n", value);
1410  return AVERROR_INVALIDDATA;
1411  }
1412  s->stripsizesoff = 0;
1413  s->stripsize = value;
1414  s->strips = 1;
1415  } else {
1416  s->stripsizesoff = off;
1417  }
1418  s->strips = count;
1419  s->sstype = type;
1420  break;
1421  case TIFF_XRES:
1422  case TIFF_YRES:
1423  set_sar(s, tag, value, value2);
1424  break;
1425  case TIFF_TILE_OFFSETS:
1426  s->tile_offsets_offset = off;
1427  s->is_tiled = 1;
1428  break;
1429  case TIFF_TILE_BYTE_COUNTS:
1430  s->tile_byte_counts_offset = off;
1431  break;
1432  case TIFF_TILE_LENGTH:
1433  s->tile_length = value;
1434  break;
1435  case TIFF_TILE_WIDTH:
1436  s->tile_width = value;
1437  break;
1438  case TIFF_PREDICTOR:
1439  s->predictor = value;
1440  break;
1441  case TIFF_SUB_IFDS:
1442  if (count == 1)
1443  s->sub_ifd = value;
1444  else if (count > 1)
1445  s->sub_ifd = ff_tget_long(&s->gb, s->le); /** Only get the first SubIFD */
1446  break;
1449  if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut))
1450  return AVERROR_INVALIDDATA;
1451  for (int i = 0; i < count; i++)
1452  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1453  s->white_level = s->dng_lut[count-1];
1454  break;
1455  case DNG_BLACK_LEVEL:
1456  if (count > FF_ARRAY_ELEMS(s->black_level))
1457  return AVERROR_INVALIDDATA;
1458  s->black_level[0] = value / (float)value2;
1459  for (int i = 0; i < count && count > 1; i++) {
1460  if (type == TIFF_RATIONAL) {
1461  value = ff_tget_long(&s->gb, s->le);
1462  value2 = ff_tget_long(&s->gb, s->le);
1463  if (!value2) {
1464  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1465  value2 = 1;
1466  }
1467 
1468  s->black_level[i] = value / (float)value2;
1469  } else if (type == TIFF_SRATIONAL) {
1470  int value = ff_tget_long(&s->gb, s->le);
1471  int value2 = ff_tget_long(&s->gb, s->le);
1472  if (!value2) {
1473  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1474  value2 = 1;
1475  }
1476 
1477  s->black_level[i] = value / (float)value2;
1478  } else {
1479  s->black_level[i] = ff_tget(&s->gb, type, s->le);
1480  }
1481  }
1482  for (int i = count; i < 4 && count > 0; i++)
1483  s->black_level[i] = s->black_level[count - 1];
1484  break;
1485  case DNG_WHITE_LEVEL:
1486  s->white_level = value;
1487  break;
1488  case TIFF_CFA_PATTERN_DIM:
1489  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1490  ff_tget(&s->gb, type, s->le) != 2)) {
1491  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1492  return AVERROR_INVALIDDATA;
1493  }
1494  break;
1495  case TIFF_CFA_PATTERN:
1496  s->is_bayer = 1;
1497  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1498  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1499  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1500  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1501  break;
1502  case TIFF_PHOTOMETRIC:
1503  switch (value) {
1506  case TIFF_PHOTOMETRIC_RGB:
1510  case TIFF_PHOTOMETRIC_CFA:
1511  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1512  s->photometric = value;
1513  break;
1521  "PhotometricInterpretation 0x%04X",
1522  value);
1523  return AVERROR_PATCHWELCOME;
1524  default:
1525  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1526  "unknown\n", value);
1527  return AVERROR_INVALIDDATA;
1528  }
1529  break;
1530  case TIFF_FILL_ORDER:
1531  if (value < 1 || value > 2) {
1532  av_log(s->avctx, AV_LOG_ERROR,
1533  "Unknown FillOrder value %d, trying default one\n", value);
1534  value = 1;
1535  }
1536  s->fill_order = value - 1;
1537  break;
1538  case TIFF_PAL: {
1539  GetByteContext pal_gb[3];
1540  off = type_sizes[type];
1541  if (count / 3 > 256 ||
1542  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1543  return AVERROR_INVALIDDATA;
1544 
1545  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1546  bytestream2_skip(&pal_gb[1], count / 3 * off);
1547  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1548 
1549  off = (type_sizes[type] - 1) << 3;
1550  if (off > 31U) {
1551  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1552  return AVERROR_INVALIDDATA;
1553  }
1554 
1555  for (i = 0; i < count / 3; i++) {
1556  uint32_t p = 0xFF000000;
1557  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1558  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1559  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1560  s->palette[i] = p;
1561  }
1562  s->palette_is_set = 1;
1563  break;
1564  }
1565  case TIFF_PLANAR:
1566  s->planar = value == 2;
1567  break;
1569  if (count != 2) {
1570  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1571  return AVERROR_INVALIDDATA;
1572  }
1573  for (i = 0; i < count; i++) {
1574  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1575  if (s->subsampling[i] <= 0) {
1576  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1577  s->subsampling[i] = 1;
1578  return AVERROR_INVALIDDATA;
1579  }
1580  }
1581  break;
1582  case TIFF_T4OPTIONS:
1583  if (s->compr == TIFF_G3)
1584  s->fax_opts = value;
1585  break;
1586  case TIFF_T6OPTIONS:
1587  if (s->compr == TIFF_G4)
1588  s->fax_opts = value;
1589  break;
1590 #define ADD_METADATA(count, name, sep)\
1591  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1592  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1593  goto end;\
1594  }
1596  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1597  break;
1599  ADD_METADATA(count, "ModelTransformationTag", NULL);
1600  break;
1601  case TIFF_MODEL_TIEPOINT:
1602  ADD_METADATA(count, "ModelTiepointTag", NULL);
1603  break;
1605  if (s->geotag_count) {
1606  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1607  return AVERROR_INVALIDDATA;
1608  }
1609  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1610  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1611  s->geotag_count = ff_tget_short(&s->gb, s->le);
1612  if (s->geotag_count > count / 4 - 1) {
1613  s->geotag_count = count / 4 - 1;
1614  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1615  }
1616  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1617  || s->geotag_count == 0) {
1618  s->geotag_count = 0;
1619  return -1;
1620  }
1621  s->geotags = av_calloc(s->geotag_count, sizeof(*s->geotags));
1622  if (!s->geotags) {
1623  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1624  s->geotag_count = 0;
1625  goto end;
1626  }
1627  for (i = 0; i < s->geotag_count; i++) {
1628  unsigned val;
1629  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1630  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1631  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1632  val = ff_tget_short(&s->gb, s->le);
1633 
1634  if (!s->geotags[i].type) {
1635  const char *str = get_geokey_val(s->geotags[i].key, val);
1636 
1637  s->geotags[i].val = str ? av_strdup(str) : av_asprintf("Unknown-%u", val);
1638  if (!s->geotags[i].val)
1639  return AVERROR(ENOMEM);
1640  } else
1641  s->geotags[i].offset = val;
1642  }
1643  break;
1645  if (count >= INT_MAX / sizeof(int64_t))
1646  return AVERROR_INVALIDDATA;
1647  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1648  return AVERROR_INVALIDDATA;
1649  dp = av_malloc_array(count, sizeof(double));
1650  if (!dp) {
1651  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1652  goto end;
1653  }
1654  for (i = 0; i < count; i++)
1655  dp[i] = ff_tget_double(&s->gb, s->le);
1656  for (i = 0; i < s->geotag_count; i++) {
1657  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1658  if (s->geotags[i].count == 0
1659  || s->geotags[i].offset + s->geotags[i].count > count) {
1660  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1661  } else if (s->geotags[i].val) {
1662  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1663  } else {
1664  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1665  if (!ap) {
1666  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1667  av_freep(&dp);
1668  return AVERROR(ENOMEM);
1669  }
1670  s->geotags[i].val = ap;
1671  }
1672  }
1673  }
1674  av_freep(&dp);
1675  break;
1676  case TIFF_GEO_ASCII_PARAMS:
1677  pos = bytestream2_tell(&s->gb);
1678  for (i = 0; i < s->geotag_count; i++) {
1679  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1680  if (s->geotags[i].count == 0
1681  || s->geotags[i].offset + s->geotags[i].count > count) {
1682  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1683  } else {
1684  char *ap;
1685 
1686  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1687  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1688  return AVERROR_INVALIDDATA;
1689  if (s->geotags[i].val)
1690  return AVERROR_INVALIDDATA;
1691  ap = av_malloc(s->geotags[i].count);
1692  if (!ap) {
1693  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1694  return AVERROR(ENOMEM);
1695  }
1696  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1697  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1698  s->geotags[i].val = ap;
1699  }
1700  }
1701  }
1702  break;
1703  case TIFF_ICC_PROFILE:
1704  gb_temp = s->gb;
1705  bytestream2_seek(&gb_temp, off, SEEK_SET);
1706 
1707  if (bytestream2_get_bytes_left(&gb_temp) < count)
1708  return AVERROR_INVALIDDATA;
1709 
1711  if (ret < 0)
1712  return ret;
1713  if (sd)
1714  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1715  break;
1716  case TIFF_ARTIST:
1717  ADD_METADATA(count, "artist", NULL);
1718  break;
1719  case TIFF_COPYRIGHT:
1720  ADD_METADATA(count, "copyright", NULL);
1721  break;
1722  case TIFF_DATE:
1723  ADD_METADATA(count, "date", NULL);
1724  break;
1725  case TIFF_DOCUMENT_NAME:
1726  ADD_METADATA(count, "document_name", NULL);
1727  break;
1728  case TIFF_HOST_COMPUTER:
1729  ADD_METADATA(count, "computer", NULL);
1730  break;
1732  ADD_METADATA(count, "description", NULL);
1733  break;
1734  case TIFF_MAKE:
1735  ADD_METADATA(count, "make", NULL);
1736  break;
1737  case TIFF_MODEL:
1738  ADD_METADATA(count, "model", NULL);
1739  break;
1740  case TIFF_PAGE_NAME:
1741  ADD_METADATA(count, "page_name", NULL);
1742  break;
1743  case TIFF_PAGE_NUMBER:
1744  ADD_METADATA(count, "page_number", " / ");
1745  // need to seek back to re-read the page number
1746  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1747  // read the page number
1748  s->cur_page = ff_tget_short(&s->gb, s->le);
1749  // get back to where we were before the previous seek
1750  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1751  break;
1752  case TIFF_SOFTWARE_NAME:
1753  ADD_METADATA(count, "software", NULL);
1754  break;
1755  case DNG_VERSION:
1756  if (count == 4) {
1757  unsigned int ver[4];
1758  ver[0] = ff_tget(&s->gb, type, s->le);
1759  ver[1] = ff_tget(&s->gb, type, s->le);
1760  ver[2] = ff_tget(&s->gb, type, s->le);
1761  ver[3] = ff_tget(&s->gb, type, s->le);
1762 
1763  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1764  ver[0], ver[1], ver[2], ver[3]);
1765 
1767  }
1768  break;
1769  case DNG_ANALOG_BALANCE:
1770  if (type != TIFF_RATIONAL)
1771  break;
1772 
1773  for (int i = 0; i < 3; i++) {
1774  value = ff_tget_long(&s->gb, s->le);
1775  value2 = ff_tget_long(&s->gb, s->le);
1776  if (!value2) {
1777  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1778  value2 = 1;
1779  }
1780 
1781  s->analog_balance[i] = value / (float)value2;
1782  }
1783  break;
1784  case DNG_AS_SHOT_NEUTRAL:
1785  if (type != TIFF_RATIONAL)
1786  break;
1787 
1788  for (int i = 0; i < 3; i++) {
1789  value = ff_tget_long(&s->gb, s->le);
1790  value2 = ff_tget_long(&s->gb, s->le);
1791  if (!value2) {
1792  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1793  value2 = 1;
1794  }
1795 
1796  s->as_shot_neutral[i] = value / (float)value2;
1797  }
1798  break;
1799  case DNG_AS_SHOT_WHITE_XY:
1800  if (type != TIFF_RATIONAL)
1801  break;
1802 
1803  for (int i = 0; i < 2; i++) {
1804  value = ff_tget_long(&s->gb, s->le);
1805  value2 = ff_tget_long(&s->gb, s->le);
1806  if (!value2) {
1807  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1808  value2 = 1;
1809  }
1810 
1811  s->as_shot_white[i] = value / (float)value2;
1812  }
1813  s->as_shot_white[2] = 1.f - s->as_shot_white[0] - s->as_shot_white[1];
1814  for (int i = 0; i < 3; i++) {
1815  s->as_shot_white[i] /= d65_white[i];
1816  }
1817  break;
1818  case DNG_COLOR_MATRIX1:
1819  case DNG_COLOR_MATRIX2:
1820  for (int i = 0; i < 3; i++) {
1821  for (int j = 0; j < 3; j++) {
1822  int value = ff_tget_long(&s->gb, s->le);
1823  int value2 = ff_tget_long(&s->gb, s->le);
1824  if (!value2) {
1825  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1826  value2 = 1;
1827  }
1828  s->color_matrix[i][j] = value / (float)value2;
1829  }
1830  s->use_color_matrix = 1;
1831  }
1832  break;
1835  for (int i = 0; i < 3; i++) {
1836  for (int j = 0; j < 3; j++) {
1837  int value = ff_tget_long(&s->gb, s->le);
1838  int value2 = ff_tget_long(&s->gb, s->le);
1839  if (!value2) {
1840  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1841  value2 = 1;
1842  }
1843  s->camera_calibration[i][j] = value / (float)value2;
1844  }
1845  }
1846  break;
1847  case CINEMADNG_TIME_CODES:
1848  case CINEMADNG_FRAME_RATE:
1849  case CINEMADNG_T_STOP:
1850  case CINEMADNG_REEL_NAME:
1853  break;
1854  default:
1855  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1856  av_log(s->avctx, AV_LOG_ERROR,
1857  "Unknown or unsupported tag %d/0x%0X\n",
1858  tag, tag);
1859  return AVERROR_INVALIDDATA;
1860  }
1861  }
1862 end:
1863  if (s->bpp > 128U) {
1864  av_log(s->avctx, AV_LOG_ERROR,
1865  "This format is not supported (bpp=%d, %d components)\n",
1866  s->bpp, count);
1867  s->bpp = 0;
1868  return AVERROR_INVALIDDATA;
1869  }
1870  bytestream2_seek(&s->gb, start, SEEK_SET);
1871  return 0;
1872 }
1873 
1874 static const float xyz2rgb[3][3] = {
1875  { 0.412453f, 0.357580f, 0.180423f },
1876  { 0.212671f, 0.715160f, 0.072169f },
1877  { 0.019334f, 0.119193f, 0.950227f },
1878 };
1879 
1881  float rgb2cam[3][4],
1882  double cam2xyz[4][3])
1883 {
1884  double cam2rgb[4][3], num;
1885  int i, j, k;
1886 
1887  for (i = 0; i < 3; i++) {
1888  for (j = 0; j < 3; j++) {
1889  cam2rgb[i][j] = 0.;
1890  for (k = 0; k < 3; k++)
1891  cam2rgb[i][j] += cam2xyz[i][k] * xyz2rgb[k][j];
1892  }
1893  }
1894 
1895  for (i = 0; i < 3; i++) {
1896  for (num = j = 0; j < 3; j++)
1897  num += cam2rgb[i][j];
1898  if (!num)
1899  num = 1;
1900  for (j = 0; j < 3; j++)
1901  cam2rgb[i][j] /= num;
1902  s->premultiply[i] = 1.f / num;
1903  }
1904 }
1905 
1906 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
1907  int *got_frame, AVPacket *avpkt)
1908 {
1909  TiffContext *const s = avctx->priv_data;
1910  unsigned off, last_off = 0;
1911  int le, ret, plane, planes;
1912  int i, j, entries, stride;
1913  unsigned soff, ssize;
1914  uint8_t *dst;
1915  GetByteContext stripsizes;
1916  GetByteContext stripdata;
1917  int retry_for_subifd, retry_for_page;
1918  int is_dng;
1919  int has_tile_bits, has_strip_bits;
1920 
1921  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1922 
1923  // parse image header
1924  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1925  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1926  return ret;
1927  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1928  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1929  return AVERROR_INVALIDDATA;
1930  }
1931  s->le = le;
1932  // TIFF_BPP is not a required tag and defaults to 1
1933 
1934  s->tiff_type = TIFF_TYPE_TIFF;
1935  s->use_color_matrix = 0;
1936 again:
1937  s->is_thumbnail = 0;
1938  s->bppcount = s->bpp = 1;
1939  s->photometric = TIFF_PHOTOMETRIC_NONE;
1940  s->compr = TIFF_RAW;
1941  s->fill_order = 0;
1942  s->white_level = 0;
1943  s->is_bayer = 0;
1944  s->is_tiled = 0;
1945  s->is_jpeg = 0;
1946  s->cur_page = 0;
1947  s->last_tag = 0;
1948 
1949  for (i = 0; i < 65536; i++)
1950  s->dng_lut[i] = i;
1951 
1952  for (i = 0; i < FF_ARRAY_ELEMS(s->black_level); i++)
1953  s->black_level[i] = 0.f;
1954 
1955  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_neutral); i++)
1956  s->as_shot_neutral[i] = 0.f;
1957 
1958  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_white); i++)
1959  s->as_shot_white[i] = 1.f;
1960 
1961  for (i = 0; i < FF_ARRAY_ELEMS(s->analog_balance); i++)
1962  s->analog_balance[i] = 1.f;
1963 
1964  for (i = 0; i < FF_ARRAY_ELEMS(s->premultiply); i++)
1965  s->premultiply[i] = 1.f;
1966 
1967  for (i = 0; i < 4; i++)
1968  for (j = 0; j < 4; j++)
1969  s->camera_calibration[i][j] = i == j;
1970 
1971  free_geotags(s);
1972 
1973  // Reset these offsets so we can tell if they were set this frame
1974  s->stripsizesoff = s->strippos = 0;
1975  /* parse image file directory */
1976  bytestream2_seek(&s->gb, off, SEEK_SET);
1977  entries = ff_tget_short(&s->gb, le);
1978  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1979  return AVERROR_INVALIDDATA;
1980  for (i = 0; i < entries; i++) {
1981  if ((ret = tiff_decode_tag(s, p)) < 0)
1982  return ret;
1983  }
1984 
1985  if (s->get_thumbnail && !s->is_thumbnail) {
1986  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1987  return AVERROR_EOF;
1988  }
1989 
1990  /** whether we should process this IFD's SubIFD */
1991  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1992  /** whether we should process this multi-page IFD's next page */
1993  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1994 
1995  if (retry_for_page) {
1996  // set offset to the next IFD
1997  off = ff_tget_long(&s->gb, le);
1998  } else if (retry_for_subifd) {
1999  // set offset to the SubIFD
2000  off = s->sub_ifd;
2001  }
2002 
2003  if (retry_for_subifd || retry_for_page) {
2004  if (!off) {
2005  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
2006  return AVERROR_INVALIDDATA;
2007  }
2008  if (off <= last_off) {
2009  avpriv_request_sample(s->avctx, "non increasing IFD offset");
2010  return AVERROR_INVALIDDATA;
2011  }
2012  last_off = off;
2013  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
2014  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
2015  return AVERROR_INVALIDDATA;
2016  }
2017  s->sub_ifd = 0;
2018  goto again;
2019  }
2020 
2021  /* At this point we've decided on which (Sub)IFD to process */
2022 
2023  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
2024 
2025  for (i = 0; i<s->geotag_count; i++) {
2026  const char *keyname = get_geokey_name(s->geotags[i].key);
2027  if (!keyname) {
2028  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
2029  continue;
2030  }
2031  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
2032  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
2033  continue;
2034  }
2035  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, AV_DICT_DONT_STRDUP_VAL);
2036  s->geotags[i].val = NULL;
2037  if (ret<0) {
2038  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
2039  return ret;
2040  }
2041  }
2042 
2043  if (is_dng) {
2044  double cam2xyz[4][3];
2045  float cmatrix[3][4];
2046  float pmin = FLT_MAX;
2047  int bps;
2048 
2049  for (i = 0; i < 3; i++) {
2050  for (j = 0; j < 3; j++)
2051  s->camera_calibration[i][j] *= s->analog_balance[i];
2052  }
2053 
2054  if (!s->use_color_matrix) {
2055  for (i = 0; i < 3; i++) {
2056  if (s->camera_calibration[i][i])
2057  s->premultiply[i] /= s->camera_calibration[i][i];
2058  }
2059  } else {
2060  for (int c = 0; c < 3; c++) {
2061  for (i = 0; i < 3; i++) {
2062  cam2xyz[c][i] = 0.;
2063  for (j = 0; j < 3; j++)
2064  cam2xyz[c][i] += s->camera_calibration[c][j] * s->color_matrix[j][i] * s->as_shot_white[i];
2065  }
2066  }
2067 
2068  camera_xyz_coeff(s, cmatrix, cam2xyz);
2069  }
2070 
2071  for (int c = 0; c < 3; c++)
2072  pmin = fminf(pmin, s->premultiply[c]);
2073 
2074  for (int c = 0; c < 3; c++)
2075  s->premultiply[c] /= pmin;
2076 
2077  if (s->bpp % s->bppcount)
2078  return AVERROR_INVALIDDATA;
2079  bps = s->bpp / s->bppcount;
2080  if (bps < 8 || bps > 32)
2081  return AVERROR_INVALIDDATA;
2082 
2083  if (s->white_level == 0)
2084  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
2085 
2086  if (s->white_level <= s->black_level[0]) {
2087  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%g) must be less than WhiteLevel (%"PRId32")\n",
2088  s->black_level[0], s->white_level);
2089  return AVERROR_INVALIDDATA;
2090  }
2091 
2092  if (s->planar)
2093  return AVERROR_PATCHWELCOME;
2094  }
2095 
2096  if (!s->is_tiled && !s->strippos && !s->stripoff) {
2097  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
2098  return AVERROR_INVALIDDATA;
2099  }
2100 
2101  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length;
2102  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
2103 
2104  if (has_tile_bits && has_strip_bits) {
2105  int tiled_dng = s->is_tiled && is_dng;
2106  av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
2107  if (!tiled_dng)
2108  return AVERROR_INVALIDDATA;
2109  }
2110 
2111  /* now we have the data and may start decoding */
2112  if ((ret = init_image(s, p)) <= 0)
2113  return ret;
2114 
2115  if (!s->is_tiled || has_strip_bits) {
2116  if (s->strips == 1 && !s->stripsize) {
2117  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
2118  s->stripsize = avpkt->size - s->stripoff;
2119  }
2120 
2121  if (s->stripsizesoff) {
2122  if (s->stripsizesoff >= (unsigned)avpkt->size)
2123  return AVERROR_INVALIDDATA;
2124  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
2125  avpkt->size - s->stripsizesoff);
2126  }
2127  if (s->strippos) {
2128  if (s->strippos >= (unsigned)avpkt->size)
2129  return AVERROR_INVALIDDATA;
2130  bytestream2_init(&stripdata, avpkt->data + s->strippos,
2131  avpkt->size - s->strippos);
2132  }
2133 
2134  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
2135  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
2136  return AVERROR_INVALIDDATA;
2137  }
2138  }
2139 
2140  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
2141  s->photometric == TIFF_PHOTOMETRIC_CFA) {
2143  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
2145  }
2146 
2147  /* Handle DNG images with JPEG-compressed tiles */
2148 
2149  if (is_dng && s->is_tiled) {
2150  if (!s->is_jpeg) {
2151  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
2152  return AVERROR_PATCHWELCOME;
2153  } else if (!s->is_bayer) {
2154  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
2155  return AVERROR_PATCHWELCOME;
2156  } else {
2157  if ((ret = dng_decode_tiles(avctx, p, avpkt)) > 0)
2158  *got_frame = 1;
2159  return ret;
2160  }
2161  }
2162 
2163  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
2164 
2165  planes = s->planar ? s->bppcount : 1;
2166  for (plane = 0; plane < planes; plane++) {
2167  uint8_t *five_planes = NULL;
2168  int remaining = avpkt->size;
2169  int decoded_height;
2170  stride = p->linesize[plane];
2171  dst = p->data[plane];
2172  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2173  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
2174  stride = stride * 5 / 4;
2175  five_planes =
2176  dst = av_malloc(stride * s->height);
2177  if (!dst)
2178  return AVERROR(ENOMEM);
2179  }
2180  for (i = 0; i < s->height; i += s->rps) {
2181  if (i)
2182  dst += s->rps * stride;
2183  if (s->stripsizesoff)
2184  ssize = ff_tget(&stripsizes, s->sstype, le);
2185  else
2186  ssize = s->stripsize;
2187 
2188  if (s->strippos)
2189  soff = ff_tget(&stripdata, s->sot, le);
2190  else
2191  soff = s->stripoff;
2192 
2193  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2194  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2195  av_freep(&five_planes);
2196  return AVERROR_INVALIDDATA;
2197  }
2198  remaining -= ssize;
2199  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2200  FFMIN(s->rps, s->height - i))) < 0) {
2201  if (avctx->err_recognition & AV_EF_EXPLODE) {
2202  av_freep(&five_planes);
2203  return ret;
2204  }
2205  break;
2206  }
2207  }
2208  decoded_height = FFMIN(i, s->height);
2209 
2210  if (s->predictor == 2) {
2211  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2212  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2213  return AVERROR_PATCHWELCOME;
2214  }
2215  dst = five_planes ? five_planes : p->data[plane];
2216  soff = s->bpp >> 3;
2217  if (s->planar)
2218  soff = FFMAX(soff / s->bppcount, 1);
2219  ssize = s->width * soff;
2220  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2221  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2222  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2223  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2224  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2225  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2226  for (i = 0; i < decoded_height; i++) {
2227  for (j = soff; j < ssize; j += 2)
2228  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2229  dst += stride;
2230  }
2231  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2232  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2233  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2234  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2235  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2236  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2237  for (i = 0; i < decoded_height; i++) {
2238  for (j = soff; j < ssize; j += 2)
2239  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2240  dst += stride;
2241  }
2242  } else {
2243  for (i = 0; i < decoded_height; i++) {
2244  for (j = soff; j < ssize; j++)
2245  dst[j] += dst[j - soff];
2246  dst += stride;
2247  }
2248  }
2249  }
2250 
2251  /* Floating point predictor
2252  TIFF Technical Note 3 http://chriscox.org/TIFFTN3d1.pdf */
2253  if (s->predictor == 3) {
2254  int channels = s->bppcount;
2255  int group_size;
2256  uint8_t *tmpbuf;
2257  int bpc;
2258 
2259  dst = five_planes ? five_planes : p->data[plane];
2260  soff = s->bpp >> 3;
2261  if (s->planar) {
2262  soff = FFMAX(soff / s->bppcount, 1);
2263  channels = 1;
2264  }
2265  ssize = s->width * soff;
2266  bpc = FFMAX(soff / s->bppcount, 1); /* Bytes per component */
2267  group_size = s->width * channels;
2268 
2269  tmpbuf = av_malloc(ssize);
2270  if (!tmpbuf)
2271  return AVERROR(ENOMEM);
2272 
2273  if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32LE ||
2274  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32LE) {
2275  for (i = 0; i < decoded_height; i++) {
2276  /* Copy first sample byte for each channel */
2277  for (j = 0; j < channels; j++)
2278  tmpbuf[j] = dst[j];
2279 
2280  /* Decode horizontal differences */
2281  for (j = channels; j < ssize; j++)
2282  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2283 
2284  /* Combine shuffled bytes from their separate groups. Each
2285  byte of every floating point value in a row of pixels is
2286  split and combined into separate groups. A group of all
2287  the sign/exponents bytes in the row and groups for each
2288  of the upper, mid, and lower mantissa bytes in the row. */
2289  for (j = 0; j < group_size; j++) {
2290  for (int k = 0; k < bpc; k++) {
2291  dst[bpc * j + k] = tmpbuf[(bpc - k - 1) * group_size + j];
2292  }
2293  }
2294  dst += stride;
2295  }
2296  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32BE ||
2297  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32BE) {
2298  /* Same as LE only the shuffle at the end is reversed */
2299  for (i = 0; i < decoded_height; i++) {
2300  for (j = 0; j < channels; j++)
2301  tmpbuf[j] = dst[j];
2302 
2303  for (j = channels; j < ssize; j++)
2304  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2305 
2306  for (j = 0; j < group_size; j++) {
2307  for (int k = 0; k < bpc; k++) {
2308  dst[bpc * j + k] = tmpbuf[k * group_size + j];
2309  }
2310  }
2311  dst += stride;
2312  }
2313  } else {
2314  av_log(s->avctx, AV_LOG_ERROR, "unsupported floating point pixel format\n");
2315  }
2316  av_free(tmpbuf);
2317  }
2318 
2319  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2320  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2321  dst = p->data[plane];
2322  for (i = 0; i < s->height; i++) {
2323  for (j = 0; j < stride; j++)
2324  dst[j] = c - dst[j];
2325  dst += stride;
2326  }
2327  }
2328 
2329  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2330  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2331  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2332  uint8_t *src = five_planes ? five_planes : p->data[plane];
2333  dst = p->data[plane];
2334  for (i = 0; i < s->height; i++) {
2335  for (j = 0; j < s->width; j++) {
2336  int k = 255 - src[x * j + 3];
2337  int r = (255 - src[x * j ]) * k;
2338  int g = (255 - src[x * j + 1]) * k;
2339  int b = (255 - src[x * j + 2]) * k;
2340  dst[4 * j ] = r * 257 >> 16;
2341  dst[4 * j + 1] = g * 257 >> 16;
2342  dst[4 * j + 2] = b * 257 >> 16;
2343  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2344  }
2345  src += stride;
2346  dst += p->linesize[plane];
2347  }
2348  av_freep(&five_planes);
2349  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2350  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2351  dst = p->data[plane];
2352  for (i = 0; i < s->height; i++) {
2353  for (j = 0; j < s->width; j++) {
2354  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2355  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2356  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2357  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2358  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2359  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2360  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2361  AV_WB16(dst + 8 * j + 6, 65535);
2362  }
2363  dst += p->linesize[plane];
2364  }
2365  }
2366  }
2367 
2368  if (s->planar && s->bppcount > 2) {
2369  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2370  FFSWAP(int, p->linesize[0], p->linesize[2]);
2371  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2372  FFSWAP(int, p->linesize[0], p->linesize[1]);
2373  }
2374 
2375  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2376  uint16_t *dst = (uint16_t *)p->data[0];
2377  for (i = 0; i < s->height; i++) {
2378  for (j = 0; j < s->width; j++)
2379  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2380  dst += stride / 2;
2381  }
2382  }
2383 
2384  p->flags |= AV_FRAME_FLAG_KEY;
2385  *got_frame = 1;
2386 
2387  return avpkt->size;
2388 }
2389 
2391 {
2392  TiffContext *s = avctx->priv_data;
2393  const AVCodec *codec;
2394  int ret;
2395 
2396  s->width = 0;
2397  s->height = 0;
2398  s->subsampling[0] =
2399  s->subsampling[1] = 1;
2400  s->avctx = avctx;
2401  ff_lzw_decode_open(&s->lzw);
2402  if (!s->lzw)
2403  return AVERROR(ENOMEM);
2405 
2406  /* Allocate JPEG frame */
2407  s->jpgframe = av_frame_alloc();
2408  s->jpkt = av_packet_alloc();
2409  if (!s->jpgframe || !s->jpkt)
2410  return AVERROR(ENOMEM);
2411 
2412  /* Prepare everything needed for JPEG decoding */
2414  if (!codec)
2415  return AVERROR_BUG;
2416  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2417  if (!s->avctx_mjpeg)
2418  return AVERROR(ENOMEM);
2419  s->avctx_mjpeg->flags = avctx->flags;
2420  s->avctx_mjpeg->flags2 = avctx->flags2;
2421  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2422  s->avctx_mjpeg->max_pixels = avctx->max_pixels;
2423  ret = avcodec_open2(s->avctx_mjpeg, codec, NULL);
2424  if (ret < 0) {
2425  return ret;
2426  }
2427 
2428  return 0;
2429 }
2430 
2431 static av_cold int tiff_end(AVCodecContext *avctx)
2432 {
2433  TiffContext *const s = avctx->priv_data;
2434 
2435  free_geotags(s);
2436 
2437  ff_lzw_decode_close(&s->lzw);
2438  av_freep(&s->deinvert_buf);
2439  s->deinvert_buf_size = 0;
2440  av_freep(&s->yuv_line);
2441  s->yuv_line_size = 0;
2442  av_frame_free(&s->jpgframe);
2443  av_packet_free(&s->jpkt);
2444  avcodec_free_context(&s->avctx_mjpeg);
2445  return 0;
2446 }
2447 
2448 #define OFFSET(x) offsetof(TiffContext, x)
2449 static const AVOption tiff_options[] = {
2450  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2451  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2452  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2453  { NULL },
2454 };
2455 
2456 static const AVClass tiff_decoder_class = {
2457  .class_name = "TIFF decoder",
2458  .item_name = av_default_item_name,
2459  .option = tiff_options,
2460  .version = LIBAVUTIL_VERSION_INT,
2461 };
2462 
2464  .p.name = "tiff",
2465  CODEC_LONG_NAME("TIFF image"),
2466  .p.type = AVMEDIA_TYPE_VIDEO,
2467  .p.id = AV_CODEC_ID_TIFF,
2468  .priv_data_size = sizeof(TiffContext),
2469  .init = tiff_init,
2470  .close = tiff_end,
2472  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2475  .p.priv_class = &tiff_decoder_class,
2476 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:72
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:657
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:427
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:209
TiffContext::gb
GetByteContext gb
Definition: tiff.c:61
AVCodec
AVCodec.
Definition: codec.h:187
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:147
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
ff_tiff_decoder
const FFCodec ff_tiff_decoder
Definition: tiff.c:2463
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
DNG_AS_SHOT_WHITE_XY
@ DNG_AS_SHOT_WHITE_XY
Definition: tiff.h:112
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:158
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1247
DNG_COLOR_MATRIX2
@ DNG_COLOR_MATRIX2
Definition: tiff.h:107
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:198
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:131
GetByteContext
Definition: bytestream.h:33
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:171
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:102
camera_xyz_coeff
static void camera_xyz_coeff(TiffContext *s, float rgb2cam[3][4], double cam2xyz[4][3])
Definition: tiff.c:1880
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:589
TiffContext::strippos
int strippos
Definition: tiff.c:109
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:87
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:160
OFFSET
#define OFFSET(x)
Definition: tiff.c:2448
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1420
TiffContext::sot
int sot
Definition: tiff.c:108
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:245
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
tiff_projection_codes
static const TiffGeoTagKeyName tiff_projection_codes[]
Definition: tiff_data.h:1536
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:127
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:155
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:202
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2431
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:344
w
uint8_t w
Definition: llviddspenc.c:38
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:114
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:133
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:341
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:89
AVPacket::data
uint8_t * data
Definition: packet.h:524
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:199
AVOption
AVOption.
Definition: opt.h:346
TIFF_LONG
@ TIFF_LONG
Definition: tiff_common.h:40
b
#define b
Definition: input.c:41
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:132
FFCodec
Definition: codec_internal.h:126
float.h
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:440
reverse.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:58
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:91
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:646
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:151
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:120
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:159
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:161
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:126
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:95
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:506
TiffGeoTagKeyName
Definition: tiff.h:220
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:190
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:134
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:146
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:117
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:285
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:119
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2449
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:70
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:203
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:51
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:194
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:120
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:104
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:72
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:80
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:393
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:466
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:130
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
Definition: tiff.c:966
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:194
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:84
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:54
GetBitContext
Definition: get_bits.h:108
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:145
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:119
TiffContext::tile_length
int tile_length
Definition: tiff.c:115
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:68
val
static double val(void *priv, double ch)
Definition: aeval.c:78
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:385
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TiffContext::color_matrix
float color_matrix[3][4]
Definition: tiff.c:96
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:181
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:71
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
TiffContext::as_shot_neutral
float as_shot_neutral[4]
Definition: tiff.c:94
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:585
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:125
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:103
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
TIFF_SHORT
@ TIFF_SHORT
Definition: tiff_common.h:39
get_geokey_val
static const char * get_geokey_val(int key, uint16_t val)
Definition: tiff.c:186
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
TiffGeoTag
Definition: tiff.h:212
TIFF_GRAY_RESPONSE_CURVE
@ TIFF_GRAY_RESPONSE_CURVE
Definition: tiff.h:66
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:107
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
TiffContext::premultiply
float premultiply[4]
Definition: tiff.c:98
TiffContext::camera_calibration
float camera_calibration[4][4]
Definition: tiff.c:97
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:119
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
float
float
Definition: af_crystalizer.c:121
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:213
TiffContext::stripsize
int stripsize
Definition: tiff.c:109
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:286
tiff_proj_cs_type_codes
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
Definition: tiff_data.h:559
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:172
TiffContext::width
int width
Definition: tiff.c:73
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:285
g
const char * g
Definition: vf_curves.c:128
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:34
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:973
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:59
fminf
float fminf(float, float)
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:696
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:121
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:143
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:647
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
TIFF_STRING
@ TIFF_STRING
Definition: tiff_common.h:38
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:201
TiffContext::use_color_matrix
int use_color_matrix
Definition: tiff.c:90
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:166
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:69
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:53
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1934
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:89
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
key
const char * key
Definition: hwcontext_opencl.c:189
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:66
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:78
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:79
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:177
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:286
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:73
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:503
if
if(ret)
Definition: filter_design.txt:179
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Definition: tiff.c:304
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:119
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:124
TiffContext::height
int height
Definition: tiff.c:73
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:63
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:184
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:130
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2390
TiffContext::as_shot_white
float as_shot_white[4]
Definition: tiff.c:95
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:45
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:343
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:196
TiffContext
Definition: tiff.c:58
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:403
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:86
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:60
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:110
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:210
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:193
tiff_common.h
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:68
DNG_AS_SHOT_NEUTRAL
@ DNG_AS_SHOT_NEUTRAL
Definition: tiff.h:111
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:203
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:90
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:197
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
TiffContext::black_level
float black_level[4]
Definition: tiff.c:99
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:505
MJpegDecodeContext
Definition: mjpegdec.h:54
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:76
RET_GEOKEY_TYPE
#define RET_GEOKEY_TYPE(TYPE, array)
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:142
TIFF_BYTE
@ TIFF_BYTE
Definition: tiff_common.h:37
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:73
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:117
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:57
TIFF_SRATIONAL
@ TIFF_SRATIONAL
Definition: tiff_common.h:46
TIFF_G3
@ TIFF_G3
Definition: tiff.h:128
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:46
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:79
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:75
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:973
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:254
AV_PIX_FMT_RGBF32BE
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
Definition: pixfmt.h:420
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:40
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
shift
static int shift(int a, int b)
Definition: bonk.c:261
TiffContext::analog_balance
float analog_balance[4]
Definition: tiff.c:93
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
DNG_CAMERA_CALIBRATION1
@ DNG_CAMERA_CALIBRATION1
Definition: tiff.h:108
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
TIFF_DOUBLE
@ TIFF_DOUBLE
Definition: tiff_common.h:48
bps
unsigned bps
Definition: movenc.c:1788
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:209
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:96
size
int size
Definition: twinvq_data.h:10344
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1874
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:1933
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:74
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:140
TiffContext::jpkt
AVPacket * jpkt
Definition: tiff.c:65
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:461
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:52
TiffContext::fill_order
int fill_order
Definition: tiff.c:84
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:92
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:78
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:55
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:410
height
#define height
TiffContext::white_level
unsigned white_level
Definition: tiff.c:101
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:109
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
TiffContext::planar
int planar
Definition: tiff.c:80
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:49
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:47
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:172
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2456
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:104
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:67
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:202
TiffContext::le
int le
Definition: tiff.c:77
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:120
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:81
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:70
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1906
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:109
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:200
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:51
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:191
TiffContext::tile_width
int tile_width
Definition: tiff.c:115
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:82
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:74
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:453
TiffContext::res
uint32_t res[4]
Definition: tiff.c:85
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:91
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:62
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:287
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:36
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:75
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:275
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1547
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:38
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:101
TiffContext::stripoff
int stripoff
Definition: tiff.c:109
len
int len
Definition: vorbis_enc_data.h:426
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:342
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:189
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:88
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:56
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:77
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
pv
#define pv
Definition: regdef.h:60
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:214
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
tag
uint32_t tag
Definition: movenc.c:1787
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:74
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:105
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:76
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:48
d65_white
static const float d65_white[3]
Definition: tiff.c:128
pos
unsigned int pos
Definition: spdifenc.c:414
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:143
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:50
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:57
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:188
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:87
AVCodecContext
main external API structure.
Definition: avcodec.h:445
ADD_METADATA
#define ADD_METADATA(count, name, sep)
AV_PIX_FMT_RGBAF32BE
@ AV_PIX_FMT_RGBAF32BE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian.
Definition: pixfmt.h:423
TiffContext::sstype
int sstype
Definition: tiff.c:107
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:75
TIFF_RATIONAL
@ TIFF_RATIONAL
Definition: tiff_common.h:41
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:692
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:110
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1228
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:136
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:626
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:94
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:121
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:113
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:229
AV_PIX_FMT_RGBF32LE
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
Definition: pixfmt.h:421
RET_GEOKEY_STR
#define RET_GEOKEY_STR(TYPE, array)
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:61
planes
static const struct @400 planes[]
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_clip_uint16
#define av_clip_uint16
Definition: common.h:111
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:93
faxcompr.h
DNG_CAMERA_CALIBRATION2
@ DNG_CAMERA_CALIBRATION2
Definition: tiff.h:109
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:273
desc
const char * desc
Definition: libsvtav1.c:75
AV_PIX_FMT_RGBAF32LE
@ AV_PIX_FMT_RGBAF32LE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian.
Definition: pixfmt.h:424
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
init_image
static int init_image(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1039
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:135
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:135
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:192
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:501
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:81
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16, int odd_line)
Definition: tiff.c:312
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:737
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
DNG_COLOR_MATRIX1
@ DNG_COLOR_MATRIX1
Definition: tiff.h:106
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:114
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:145
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:64
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:60
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:271
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:125
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:149
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:105
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:148
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:53
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:460
TiffContext::predictor
int predictor
Definition: tiff.c:83
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:504
int
int
Definition: ffmpeg_filter.c:424
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:64
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:195
TiffContext::strips
int strips
Definition: tiff.c:107
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:157
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:118
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:104
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:288
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:122
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
DNG_ANALOG_BALANCE
@ DNG_ANALOG_BALANCE
Definition: tiff.h:110
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:141