00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00031 #include "avcodec.h"
00032 #include "internal.h"
00033 #include "get_bits.h"
00034 #include "put_bits.h"
00035 #include "dsputil.h"
00036 #include "thread.h"
00037 #include "huffman.h"
00038
00039 #define VLC_BITS 11
00040
00041 #if HAVE_BIGENDIAN
00042 #define B 3
00043 #define G 2
00044 #define R 1
00045 #define A 0
00046 #else
00047 #define B 0
00048 #define G 1
00049 #define R 2
00050 #define A 3
00051 #endif
00052
00053 typedef enum Predictor {
00054 LEFT= 0,
00055 PLANE,
00056 MEDIAN,
00057 } Predictor;
00058
00059 typedef struct HYuvContext {
00060 AVCodecContext *avctx;
00061 Predictor predictor;
00062 GetBitContext gb;
00063 PutBitContext pb;
00064 int interlaced;
00065 int decorrelate;
00066 int bitstream_bpp;
00067 int version;
00068 int yuy2;
00069 int bgr32;
00070 int width, height;
00071 int flags;
00072 int context;
00073 int picture_number;
00074 int last_slice_end;
00075 uint8_t *temp[3];
00076 uint64_t stats[3][256];
00077 uint8_t len[3][256];
00078 uint32_t bits[3][256];
00079 uint32_t pix_bgr_map[1<<VLC_BITS];
00080 VLC vlc[6];
00081 AVFrame picture;
00082 uint8_t *bitstream_buffer;
00083 unsigned int bitstream_buffer_size;
00084 DSPContext dsp;
00085 } HYuvContext;
00086
00087 #define classic_shift_luma_table_size 42
00088 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00089 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
00090 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
00091 69,68, 0,
00092 0,0,0,0,0,0,0,0,
00093 };
00094
00095 #define classic_shift_chroma_table_size 59
00096 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00097 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
00098 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
00099 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0,
00100 0,0,0,0,0,0,0,0,
00101 };
00102
00103 static const unsigned char classic_add_luma[256] = {
00104 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
00105 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
00106 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
00107 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
00108 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
00109 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
00110 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
00111 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
00112 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
00113 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
00114 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
00115 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
00116 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
00117 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
00118 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
00119 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
00120 };
00121
00122 static const unsigned char classic_add_chroma[256] = {
00123 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
00124 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
00125 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
00126 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
00127 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
00128 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
00129 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
00130 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
00131 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
00132 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
00133 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
00134 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
00135 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
00136 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
00137 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
00138 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
00139 };
00140
00141 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
00142 const uint8_t *src, int w, int left)
00143 {
00144 int i;
00145 if (w < 32) {
00146 for (i = 0; i < w; i++) {
00147 const int temp = src[i];
00148 dst[i] = temp - left;
00149 left = temp;
00150 }
00151 return left;
00152 } else {
00153 for (i = 0; i < 16; i++) {
00154 const int temp = src[i];
00155 dst[i] = temp - left;
00156 left = temp;
00157 }
00158 s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
00159 return src[w-1];
00160 }
00161 }
00162
00163 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
00164 const uint8_t *src, int w,
00165 int *red, int *green, int *blue, int *alpha)
00166 {
00167 int i;
00168 int r,g,b,a;
00169 r = *red;
00170 g = *green;
00171 b = *blue;
00172 a = *alpha;
00173 for (i = 0; i < FFMIN(w, 4); i++) {
00174 const int rt = src[i * 4 + R];
00175 const int gt = src[i * 4 + G];
00176 const int bt = src[i * 4 + B];
00177 const int at = src[i * 4 + A];
00178 dst[i * 4 + R] = rt - r;
00179 dst[i * 4 + G] = gt - g;
00180 dst[i * 4 + B] = bt - b;
00181 dst[i * 4 + A] = at - a;
00182 r = rt;
00183 g = gt;
00184 b = bt;
00185 a = at;
00186 }
00187
00188 s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
00189
00190 *red = src[(w - 1) * 4 + R];
00191 *green = src[(w - 1) * 4 + G];
00192 *blue = src[(w - 1) * 4 + B];
00193 *alpha = src[(w - 1) * 4 + A];
00194 }
00195
00196 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
00197 int i;
00198 int r,g,b;
00199 r = *red;
00200 g = *green;
00201 b = *blue;
00202 for (i = 0; i < FFMIN(w,16); i++) {
00203 const int rt = src[i*3 + 0];
00204 const int gt = src[i*3 + 1];
00205 const int bt = src[i*3 + 2];
00206 dst[i*3 + 0] = rt - r;
00207 dst[i*3 + 1] = gt - g;
00208 dst[i*3 + 2] = bt - b;
00209 r = rt;
00210 g = gt;
00211 b = bt;
00212 }
00213
00214 s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w*3 - 48);
00215
00216 *red = src[(w - 1)*3 + 0];
00217 *green = src[(w - 1)*3 + 1];
00218 *blue = src[(w - 1)*3 + 2];
00219 }
00220
00221 static int read_len_table(uint8_t *dst, GetBitContext *gb)
00222 {
00223 int i, val, repeat;
00224
00225 for (i = 0; i < 256;) {
00226 repeat = get_bits(gb, 3);
00227 val = get_bits(gb, 5);
00228 if (repeat == 0)
00229 repeat = get_bits(gb, 8);
00230 if (i + repeat > 256 || get_bits_left(gb) < 0) {
00231 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
00232 return -1;
00233 }
00234 while (repeat--)
00235 dst[i++] = val;
00236 }
00237 return 0;
00238 }
00239
00240 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table)
00241 {
00242 int len, index;
00243 uint32_t bits = 0;
00244
00245 for (len = 32; len > 0; len--) {
00246 for (index = 0; index < 256; index++) {
00247 if (len_table[index] == len)
00248 dst[index] = bits++;
00249 }
00250 if (bits & 1) {
00251 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
00252 return -1;
00253 }
00254 bits >>= 1;
00255 }
00256 return 0;
00257 }
00258
00259 static void generate_joint_tables(HYuvContext *s)
00260 {
00261 uint16_t symbols[1 << VLC_BITS];
00262 uint16_t bits[1 << VLC_BITS];
00263 uint8_t len[1 << VLC_BITS];
00264 if (s->bitstream_bpp < 24) {
00265 int p, i, y, u;
00266 for (p = 0; p < 3; p++) {
00267 for (i = y = 0; y < 256; y++) {
00268 int len0 = s->len[0][y];
00269 int limit = VLC_BITS - len0;
00270 if(limit <= 0)
00271 continue;
00272 for (u = 0; u < 256; u++) {
00273 int len1 = s->len[p][u];
00274 if (len1 > limit)
00275 continue;
00276 len[i] = len0 + len1;
00277 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
00278 symbols[i] = (y << 8) + u;
00279 if(symbols[i] != 0xffff)
00280 i++;
00281 }
00282 }
00283 ff_free_vlc(&s->vlc[3 + p]);
00284 ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
00285 bits, 2, 2, symbols, 2, 2, 0);
00286 }
00287 } else {
00288 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
00289 int i, b, g, r, code;
00290 int p0 = s->decorrelate;
00291 int p1 = !s->decorrelate;
00292
00293
00294
00295 for (i = 0, g = -16; g < 16; g++) {
00296 int len0 = s->len[p0][g & 255];
00297 int limit0 = VLC_BITS - len0;
00298 if (limit0 < 2)
00299 continue;
00300 for (b = -16; b < 16; b++) {
00301 int len1 = s->len[p1][b & 255];
00302 int limit1 = limit0 - len1;
00303 if (limit1 < 1)
00304 continue;
00305 code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
00306 for (r = -16; r < 16; r++) {
00307 int len2 = s->len[2][r & 255];
00308 if (len2 > limit1)
00309 continue;
00310 len[i] = len0 + len1 + len2;
00311 bits[i] = (code << len2) + s->bits[2][r & 255];
00312 if (s->decorrelate) {
00313 map[i][G] = g;
00314 map[i][B] = g + b;
00315 map[i][R] = g + r;
00316 } else {
00317 map[i][B] = g;
00318 map[i][G] = b;
00319 map[i][R] = r;
00320 }
00321 i++;
00322 }
00323 }
00324 }
00325 ff_free_vlc(&s->vlc[3]);
00326 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
00327 }
00328 }
00329
00330 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
00331 {
00332 GetBitContext gb;
00333 int i;
00334
00335 init_get_bits(&gb, src, length * 8);
00336
00337 for (i = 0; i < 3; i++) {
00338 if (read_len_table(s->len[i], &gb) < 0)
00339 return -1;
00340 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
00341 return -1;
00342 }
00343 ff_free_vlc(&s->vlc[i]);
00344 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
00345 s->bits[i], 4, 4, 0);
00346 }
00347
00348 generate_joint_tables(s);
00349
00350 return (get_bits_count(&gb) + 7) / 8;
00351 }
00352
00353 static int read_old_huffman_tables(HYuvContext *s)
00354 {
00355 GetBitContext gb;
00356 int i;
00357
00358 init_get_bits(&gb, classic_shift_luma,
00359 classic_shift_luma_table_size * 8);
00360 if (read_len_table(s->len[0], &gb) < 0)
00361 return -1;
00362
00363 init_get_bits(&gb, classic_shift_chroma,
00364 classic_shift_chroma_table_size * 8);
00365 if (read_len_table(s->len[1], &gb) < 0)
00366 return -1;
00367
00368 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
00369 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
00370
00371 if (s->bitstream_bpp >= 24) {
00372 memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
00373 memcpy(s->len[1] , s->len [0], 256 * sizeof(uint8_t));
00374 }
00375 memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
00376 memcpy(s->len[2] , s->len [1], 256 * sizeof(uint8_t));
00377
00378 for (i = 0; i < 3; i++) {
00379 ff_free_vlc(&s->vlc[i]);
00380 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
00381 s->bits[i], 4, 4, 0);
00382 }
00383
00384 generate_joint_tables(s);
00385
00386 return 0;
00387 }
00388
00389 static av_cold void alloc_temp(HYuvContext *s)
00390 {
00391 int i;
00392
00393 if (s->bitstream_bpp<24) {
00394 for (i=0; i<3; i++) {
00395 s->temp[i]= av_malloc(s->width + 16);
00396 }
00397 } else {
00398 s->temp[0]= av_mallocz(4*s->width + 16);
00399 }
00400 }
00401
00402 static av_cold int common_init(AVCodecContext *avctx)
00403 {
00404 HYuvContext *s = avctx->priv_data;
00405
00406 s->avctx = avctx;
00407 s->flags = avctx->flags;
00408
00409 ff_dsputil_init(&s->dsp, avctx);
00410
00411 s->width = avctx->width;
00412 s->height = avctx->height;
00413 av_assert1(s->width > 0 && s->height > 0);
00414
00415 return 0;
00416 }
00417
00418 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00419 static av_cold int decode_init(AVCodecContext *avctx)
00420 {
00421 HYuvContext *s = avctx->priv_data;
00422
00423 common_init(avctx);
00424 memset(s->vlc, 0, 3 * sizeof(VLC));
00425
00426 avctx->coded_frame = &s->picture;
00427 avcodec_get_frame_defaults(&s->picture);
00428 s->interlaced = s->height > 288;
00429
00430 s->bgr32 = 1;
00431
00432 if (avctx->extradata_size) {
00433 if ((avctx->bits_per_coded_sample & 7) &&
00434 avctx->bits_per_coded_sample != 12)
00435 s->version = 1;
00436 else
00437 s->version = 2;
00438 } else
00439 s->version = 0;
00440
00441 if (s->version == 2) {
00442 int method, interlace;
00443
00444 if (avctx->extradata_size < 4)
00445 return -1;
00446
00447 method = ((uint8_t*)avctx->extradata)[0];
00448 s->decorrelate = method & 64 ? 1 : 0;
00449 s->predictor = method & 63;
00450 s->bitstream_bpp = ((uint8_t*)avctx->extradata)[1];
00451 if (s->bitstream_bpp == 0)
00452 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
00453 interlace = (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
00454 s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
00455 s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
00456
00457 if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
00458 avctx->extradata_size - 4) < 0)
00459 return -1;
00460 }else{
00461 switch (avctx->bits_per_coded_sample & 7) {
00462 case 1:
00463 s->predictor = LEFT;
00464 s->decorrelate = 0;
00465 break;
00466 case 2:
00467 s->predictor = LEFT;
00468 s->decorrelate = 1;
00469 break;
00470 case 3:
00471 s->predictor = PLANE;
00472 s->decorrelate = avctx->bits_per_coded_sample >= 24;
00473 break;
00474 case 4:
00475 s->predictor = MEDIAN;
00476 s->decorrelate = 0;
00477 break;
00478 default:
00479 s->predictor = LEFT;
00480 s->decorrelate = 0;
00481 break;
00482 }
00483 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
00484 s->context = 0;
00485
00486 if (read_old_huffman_tables(s) < 0)
00487 return -1;
00488 }
00489
00490 switch (s->bitstream_bpp) {
00491 case 12:
00492 avctx->pix_fmt = PIX_FMT_YUV420P;
00493 break;
00494 case 16:
00495 if (s->yuy2) {
00496 avctx->pix_fmt = PIX_FMT_YUYV422;
00497 } else {
00498 avctx->pix_fmt = PIX_FMT_YUV422P;
00499 }
00500 break;
00501 case 24:
00502 case 32:
00503 if (s->bgr32) {
00504 avctx->pix_fmt = PIX_FMT_RGB32;
00505 } else {
00506 avctx->pix_fmt = PIX_FMT_BGR24;
00507 }
00508 break;
00509 default:
00510 return AVERROR_INVALIDDATA;
00511 }
00512
00513 if ((avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P) && avctx->width & 1) {
00514 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
00515 return AVERROR_INVALIDDATA;
00516 }
00517
00518 alloc_temp(s);
00519
00520 return 0;
00521 }
00522
00523 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
00524 {
00525 HYuvContext *s = avctx->priv_data;
00526 int i;
00527
00528 avctx->coded_frame= &s->picture;
00529 alloc_temp(s);
00530
00531 for (i = 0; i < 6; i++)
00532 s->vlc[i].table = NULL;
00533
00534 if (s->version == 2) {
00535 if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
00536 avctx->extradata_size) < 0)
00537 return -1;
00538 } else {
00539 if (read_old_huffman_tables(s) < 0)
00540 return -1;
00541 }
00542
00543 return 0;
00544 }
00545 #endif
00546
00547 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00548 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
00549 {
00550 int i;
00551 int index = 0;
00552
00553 for (i = 0; i < 256;) {
00554 int val = len[i];
00555 int repeat = 0;
00556
00557 for (; i < 256 && len[i] == val && repeat < 255; i++)
00558 repeat++;
00559
00560 av_assert0(val < 32 && val >0 && repeat<256 && repeat>0);
00561 if (repeat > 7) {
00562 buf[index++] = val;
00563 buf[index++] = repeat;
00564 } else {
00565 buf[index++] = val | (repeat << 5);
00566 }
00567 }
00568
00569 return index;
00570 }
00571
00572 static av_cold int encode_init(AVCodecContext *avctx)
00573 {
00574 HYuvContext *s = avctx->priv_data;
00575 int i, j;
00576
00577 common_init(avctx);
00578
00579 avctx->extradata = av_mallocz(1024*30);
00580 avctx->stats_out = av_mallocz(1024*30);
00581 s->version = 2;
00582
00583 avctx->coded_frame = &s->picture;
00584
00585 switch (avctx->pix_fmt) {
00586 case PIX_FMT_YUV420P:
00587 case PIX_FMT_YUV422P:
00588 if (s->width & 1) {
00589 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
00590 return AVERROR(EINVAL);
00591 }
00592 s->bitstream_bpp = avctx->pix_fmt == PIX_FMT_YUV420P ? 12 : 16;
00593 break;
00594 case PIX_FMT_RGB32:
00595 s->bitstream_bpp = 32;
00596 break;
00597 case PIX_FMT_RGB24:
00598 s->bitstream_bpp = 24;
00599 break;
00600 default:
00601 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
00602 return -1;
00603 }
00604 avctx->bits_per_coded_sample = s->bitstream_bpp;
00605 s->decorrelate = s->bitstream_bpp >= 24;
00606 s->predictor = avctx->prediction_method;
00607 s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
00608 if (avctx->context_model == 1) {
00609 s->context = avctx->context_model;
00610 if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
00611 av_log(avctx, AV_LOG_ERROR,
00612 "context=1 is not compatible with "
00613 "2 pass huffyuv encoding\n");
00614 return -1;
00615 }
00616 }else s->context= 0;
00617
00618 if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
00619 if (avctx->pix_fmt == PIX_FMT_YUV420P) {
00620 av_log(avctx, AV_LOG_ERROR,
00621 "Error: YV12 is not supported by huffyuv; use "
00622 "vcodec=ffvhuff or format=422p\n");
00623 return -1;
00624 }
00625 if (avctx->context_model) {
00626 av_log(avctx, AV_LOG_ERROR,
00627 "Error: per-frame huffman tables are not supported "
00628 "by huffyuv; use vcodec=ffvhuff\n");
00629 return -1;
00630 }
00631 if (s->interlaced != ( s->height > 288 ))
00632 av_log(avctx, AV_LOG_INFO,
00633 "using huffyuv 2.2.0 or newer interlacing flag\n");
00634 }
00635
00636 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
00637 av_log(avctx, AV_LOG_ERROR,
00638 "Error: RGB is incompatible with median predictor\n");
00639 return -1;
00640 }
00641
00642 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
00643 ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
00644 ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
00645 if (s->context)
00646 ((uint8_t*)avctx->extradata)[2] |= 0x40;
00647 ((uint8_t*)avctx->extradata)[3] = 0;
00648 s->avctx->extradata_size = 4;
00649
00650 if (avctx->stats_in) {
00651 char *p = avctx->stats_in;
00652
00653 for (i = 0; i < 3; i++)
00654 for (j = 0; j < 256; j++)
00655 s->stats[i][j] = 1;
00656
00657 for (;;) {
00658 for (i = 0; i < 3; i++) {
00659 char *next;
00660
00661 for (j = 0; j < 256; j++) {
00662 s->stats[i][j] += strtol(p, &next, 0);
00663 if (next == p) return -1;
00664 p = next;
00665 }
00666 }
00667 if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
00668 }
00669 } else {
00670 for (i = 0; i < 3; i++)
00671 for (j = 0; j < 256; j++) {
00672 int d = FFMIN(j, 256 - j);
00673
00674 s->stats[i][j] = 100000000 / (d + 1);
00675 }
00676 }
00677
00678 for (i = 0; i < 3; i++) {
00679 ff_huff_gen_len_table(s->len[i], s->stats[i]);
00680
00681 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
00682 return -1;
00683 }
00684
00685 s->avctx->extradata_size +=
00686 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
00687 }
00688
00689 if (s->context) {
00690 for (i = 0; i < 3; i++) {
00691 int pels = s->width * s->height / (i ? 40 : 10);
00692 for (j = 0; j < 256; j++) {
00693 int d = FFMIN(j, 256 - j);
00694 s->stats[i][j] = pels/(d + 1);
00695 }
00696 }
00697 } else {
00698 for (i = 0; i < 3; i++)
00699 for (j = 0; j < 256; j++)
00700 s->stats[i][j]= 0;
00701 }
00702
00703 alloc_temp(s);
00704
00705 s->picture_number=0;
00706
00707 return 0;
00708 }
00709 #endif
00710
00711
00712
00713 #define READ_2PIX(dst0, dst1, plane1){\
00714 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
00715 if(code != 0xffff){\
00716 dst0 = code>>8;\
00717 dst1 = code;\
00718 }else{\
00719 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
00720 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
00721 }\
00722 }
00723
00724 static void decode_422_bitstream(HYuvContext *s, int count)
00725 {
00726 int i;
00727
00728 count /= 2;
00729
00730 if (count >= (get_bits_left(&s->gb)) / (31 * 4)) {
00731 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00732 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
00733 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
00734 }
00735 } else {
00736 for (i = 0; i < count; i++) {
00737 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
00738 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
00739 }
00740 }
00741 }
00742
00743 static void decode_gray_bitstream(HYuvContext *s, int count)
00744 {
00745 int i;
00746
00747 count/=2;
00748
00749 if (count >= (get_bits_left(&s->gb)) / (31 * 2)) {
00750 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00751 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
00752 }
00753 } else {
00754 for(i=0; i<count; i++){
00755 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
00756 }
00757 }
00758 }
00759
00760 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00761 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
00762 {
00763 int i;
00764 const uint8_t *y = s->temp[0] + offset;
00765 const uint8_t *u = s->temp[1] + offset / 2;
00766 const uint8_t *v = s->temp[2] + offset / 2;
00767
00768 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
00769 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00770 return -1;
00771 }
00772
00773 #define LOAD4\
00774 int y0 = y[2 * i];\
00775 int y1 = y[2 * i + 1];\
00776 int u0 = u[i];\
00777 int v0 = v[i];
00778
00779 count /= 2;
00780
00781 if (s->flags & CODEC_FLAG_PASS1) {
00782 for(i = 0; i < count; i++) {
00783 LOAD4;
00784 s->stats[0][y0]++;
00785 s->stats[1][u0]++;
00786 s->stats[0][y1]++;
00787 s->stats[2][v0]++;
00788 }
00789 }
00790 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
00791 return 0;
00792 if (s->context) {
00793 for (i = 0; i < count; i++) {
00794 LOAD4;
00795 s->stats[0][y0]++;
00796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00797 s->stats[1][u0]++;
00798 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00799 s->stats[0][y1]++;
00800 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00801 s->stats[2][v0]++;
00802 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00803 }
00804 } else {
00805 for(i = 0; i < count; i++) {
00806 LOAD4;
00807 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00808 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00809 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00810 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00811 }
00812 }
00813 return 0;
00814 }
00815
00816 static int encode_gray_bitstream(HYuvContext *s, int count)
00817 {
00818 int i;
00819
00820 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
00821 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00822 return -1;
00823 }
00824
00825 #define LOAD2\
00826 int y0 = s->temp[0][2 * i];\
00827 int y1 = s->temp[0][2 * i + 1];
00828 #define STAT2\
00829 s->stats[0][y0]++;\
00830 s->stats[0][y1]++;
00831 #define WRITE2\
00832 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
00833 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00834
00835 count /= 2;
00836
00837 if (s->flags & CODEC_FLAG_PASS1) {
00838 for (i = 0; i < count; i++) {
00839 LOAD2;
00840 STAT2;
00841 }
00842 }
00843 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
00844 return 0;
00845
00846 if (s->context) {
00847 for (i = 0; i < count; i++) {
00848 LOAD2;
00849 STAT2;
00850 WRITE2;
00851 }
00852 } else {
00853 for (i = 0; i < count; i++) {
00854 LOAD2;
00855 WRITE2;
00856 }
00857 }
00858 return 0;
00859 }
00860 #endif
00861
00862 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
00863 int decorrelate, int alpha)
00864 {
00865 int i;
00866 for (i = 0; i < count; i++) {
00867 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
00868 if (code != -1) {
00869 *(uint32_t*)&s->temp[0][4 * i] = s->pix_bgr_map[code];
00870 } else if(decorrelate) {
00871 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00872 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) +
00873 s->temp[0][4 * i + G];
00874 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) +
00875 s->temp[0][4 * i + G];
00876 } else {
00877 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
00878 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00879 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00880 }
00881 if (alpha)
00882 s->temp[0][4 * i + A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00883 }
00884 }
00885
00886 static void decode_bgr_bitstream(HYuvContext *s, int count)
00887 {
00888 if (s->decorrelate) {
00889 if (s->bitstream_bpp==24)
00890 decode_bgr_1(s, count, 1, 0);
00891 else
00892 decode_bgr_1(s, count, 1, 1);
00893 } else {
00894 if (s->bitstream_bpp==24)
00895 decode_bgr_1(s, count, 0, 0);
00896 else
00897 decode_bgr_1(s, count, 0, 1);
00898 }
00899 }
00900
00901 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
00902 {
00903 int i;
00904
00905 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count) {
00906 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00907 return -1;
00908 }
00909
00910 #define LOAD3\
00911 int g = s->temp[0][planes==3 ? 3*i + 1 : 4*i + G];\
00912 int b = (s->temp[0][planes==3 ? 3*i + 2 : 4*i + B] - g) & 0xff;\
00913 int r = (s->temp[0][planes==3 ? 3*i + 0 : 4*i + R] - g) & 0xff;\
00914 int a = s->temp[0][planes*i + A];
00915 #define STAT3\
00916 s->stats[0][b]++;\
00917 s->stats[1][g]++;\
00918 s->stats[2][r]++;\
00919 if(planes==4) s->stats[2][a]++;
00920 #define WRITE3\
00921 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
00922 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
00923 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
00924 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
00925
00926 if ((s->flags & CODEC_FLAG_PASS1) &&
00927 (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
00928 for (i = 0; i < count; i++) {
00929 LOAD3;
00930 STAT3;
00931 }
00932 } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
00933 for (i = 0; i < count; i++) {
00934 LOAD3;
00935 STAT3;
00936 WRITE3;
00937 }
00938 } else {
00939 for (i = 0; i < count; i++) {
00940 LOAD3;
00941 WRITE3;
00942 }
00943 }
00944 return 0;
00945 }
00946
00947 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00948 static void draw_slice(HYuvContext *s, int y)
00949 {
00950 int h, cy, i;
00951 int offset[AV_NUM_DATA_POINTERS];
00952
00953 if (s->avctx->draw_horiz_band==NULL)
00954 return;
00955
00956 h = y - s->last_slice_end;
00957 y -= h;
00958
00959 if (s->bitstream_bpp == 12) {
00960 cy = y>>1;
00961 } else {
00962 cy = y;
00963 }
00964
00965 offset[0] = s->picture.linesize[0]*y;
00966 offset[1] = s->picture.linesize[1]*cy;
00967 offset[2] = s->picture.linesize[2]*cy;
00968 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
00969 offset[i] = 0;
00970 emms_c();
00971
00972 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
00973
00974 s->last_slice_end = y + h;
00975 }
00976
00977 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
00978 AVPacket *avpkt)
00979 {
00980 const uint8_t *buf = avpkt->data;
00981 int buf_size = avpkt->size;
00982 HYuvContext *s = avctx->priv_data;
00983 const int width = s->width;
00984 const int width2 = s->width>>1;
00985 const int height = s->height;
00986 int fake_ystride, fake_ustride, fake_vstride;
00987 AVFrame * const p = &s->picture;
00988 int table_size = 0;
00989
00990 AVFrame *picture = data;
00991
00992 av_fast_malloc(&s->bitstream_buffer,
00993 &s->bitstream_buffer_size,
00994 buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
00995 if (!s->bitstream_buffer)
00996 return AVERROR(ENOMEM);
00997
00998 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
00999 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
01000 (const uint32_t*)buf, buf_size / 4);
01001
01002 if (p->data[0])
01003 ff_thread_release_buffer(avctx, p);
01004
01005 p->reference = 0;
01006 if (ff_thread_get_buffer(avctx, p) < 0) {
01007 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
01008 return -1;
01009 }
01010
01011 if (s->context) {
01012 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
01013 if (table_size < 0)
01014 return -1;
01015 }
01016
01017 if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
01018 return -1;
01019
01020 init_get_bits(&s->gb, s->bitstream_buffer+table_size,
01021 (buf_size-table_size) * 8);
01022
01023 fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
01024 fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
01025 fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
01026
01027 s->last_slice_end = 0;
01028
01029 if (s->bitstream_bpp < 24) {
01030 int y, cy;
01031 int lefty, leftu, leftv;
01032 int lefttopy, lefttopu, lefttopv;
01033
01034 if (s->yuy2) {
01035 p->data[0][3] = get_bits(&s->gb, 8);
01036 p->data[0][2] = get_bits(&s->gb, 8);
01037 p->data[0][1] = get_bits(&s->gb, 8);
01038 p->data[0][0] = get_bits(&s->gb, 8);
01039
01040 av_log(avctx, AV_LOG_ERROR,
01041 "YUY2 output is not implemented yet\n");
01042 return -1;
01043 } else {
01044
01045 leftv = p->data[2][0] = get_bits(&s->gb, 8);
01046 lefty = p->data[0][1] = get_bits(&s->gb, 8);
01047 leftu = p->data[1][0] = get_bits(&s->gb, 8);
01048 p->data[0][0] = get_bits(&s->gb, 8);
01049
01050 switch (s->predictor) {
01051 case LEFT:
01052 case PLANE:
01053 decode_422_bitstream(s, width-2);
01054 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01055 if (!(s->flags&CODEC_FLAG_GRAY)) {
01056 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
01057 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
01058 }
01059
01060 for (cy = y = 1; y < s->height; y++, cy++) {
01061 uint8_t *ydst, *udst, *vdst;
01062
01063 if (s->bitstream_bpp == 12) {
01064 decode_gray_bitstream(s, width);
01065
01066 ydst = p->data[0] + p->linesize[0] * y;
01067
01068 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01069 if (s->predictor == PLANE) {
01070 if (y > s->interlaced)
01071 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01072 }
01073 y++;
01074 if (y >= s->height) break;
01075 }
01076
01077 draw_slice(s, y);
01078
01079 ydst = p->data[0] + p->linesize[0]*y;
01080 udst = p->data[1] + p->linesize[1]*cy;
01081 vdst = p->data[2] + p->linesize[2]*cy;
01082
01083 decode_422_bitstream(s, width);
01084 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01085 if (!(s->flags & CODEC_FLAG_GRAY)) {
01086 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
01087 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
01088 }
01089 if (s->predictor == PLANE) {
01090 if (cy > s->interlaced) {
01091 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01092 if (!(s->flags & CODEC_FLAG_GRAY)) {
01093 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
01094 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
01095 }
01096 }
01097 }
01098 }
01099 draw_slice(s, height);
01100
01101 break;
01102 case MEDIAN:
01103
01104 decode_422_bitstream(s, width - 2);
01105 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width - 2, lefty);
01106 if (!(s->flags & CODEC_FLAG_GRAY)) {
01107 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
01108 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
01109 }
01110
01111 cy = y = 1;
01112
01113
01114 if (s->interlaced) {
01115 decode_422_bitstream(s, width);
01116 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
01117 if (!(s->flags & CODEC_FLAG_GRAY)) {
01118 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
01119 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
01120 }
01121 y++; cy++;
01122 }
01123
01124
01125 decode_422_bitstream(s, 4);
01126 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
01127 if (!(s->flags&CODEC_FLAG_GRAY)) {
01128 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
01129 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
01130 }
01131
01132
01133 lefttopy = p->data[0][3];
01134 decode_422_bitstream(s, width - 4);
01135 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
01136 if (!(s->flags&CODEC_FLAG_GRAY)) {
01137 lefttopu = p->data[1][1];
01138 lefttopv = p->data[2][1];
01139 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
01140 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
01141 }
01142 y++; cy++;
01143
01144 for (; y<height; y++, cy++) {
01145 uint8_t *ydst, *udst, *vdst;
01146
01147 if (s->bitstream_bpp == 12) {
01148 while (2 * cy > y) {
01149 decode_gray_bitstream(s, width);
01150 ydst = p->data[0] + p->linesize[0] * y;
01151 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01152 y++;
01153 }
01154 if (y >= height) break;
01155 }
01156 draw_slice(s, y);
01157
01158 decode_422_bitstream(s, width);
01159
01160 ydst = p->data[0] + p->linesize[0] * y;
01161 udst = p->data[1] + p->linesize[1] * cy;
01162 vdst = p->data[2] + p->linesize[2] * cy;
01163
01164 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01165 if (!(s->flags & CODEC_FLAG_GRAY)) {
01166 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
01167 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
01168 }
01169 }
01170
01171 draw_slice(s, height);
01172 break;
01173 }
01174 }
01175 } else {
01176 int y;
01177 int leftr, leftg, leftb, lefta;
01178 const int last_line = (height - 1) * p->linesize[0];
01179
01180 if (s->bitstream_bpp == 32) {
01181 lefta = p->data[0][last_line+A] = get_bits(&s->gb, 8);
01182 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
01183 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
01184 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
01185 } else {
01186 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
01187 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
01188 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
01189 lefta = p->data[0][last_line+A] = 255;
01190 skip_bits(&s->gb, 8);
01191 }
01192
01193 if (s->bgr32) {
01194 switch (s->predictor) {
01195 case LEFT:
01196 case PLANE:
01197 decode_bgr_bitstream(s, width - 1);
01198 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width - 1, &leftr, &leftg, &leftb, &lefta);
01199
01200 for (y = s->height - 2; y >= 0; y--) {
01201 decode_bgr_bitstream(s, width);
01202
01203 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
01204 if (s->predictor == PLANE) {
01205 if (s->bitstream_bpp != 32) lefta = 0;
01206 if ((y & s->interlaced) == 0 &&
01207 y < s->height - 1 - s->interlaced) {
01208 s->dsp.add_bytes(p->data[0] + p->linesize[0] * y,
01209 p->data[0] + p->linesize[0] * y +
01210 fake_ystride, fake_ystride);
01211 }
01212 }
01213 }
01214
01215 draw_slice(s, height);
01216 break;
01217 default:
01218 av_log(avctx, AV_LOG_ERROR,
01219 "prediction type not supported!\n");
01220 }
01221 }else{
01222 av_log(avctx, AV_LOG_ERROR,
01223 "BGR24 output is not implemented yet\n");
01224 return -1;
01225 }
01226 }
01227 emms_c();
01228
01229 *picture = *p;
01230 *data_size = sizeof(AVFrame);
01231
01232 return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
01233 }
01234 #endif
01235
01236 static int common_end(HYuvContext *s)
01237 {
01238 int i;
01239
01240 for(i = 0; i < 3; i++) {
01241 av_freep(&s->temp[i]);
01242 }
01243 return 0;
01244 }
01245
01246 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
01247 static av_cold int decode_end(AVCodecContext *avctx)
01248 {
01249 HYuvContext *s = avctx->priv_data;
01250 int i;
01251
01252 if (s->picture.data[0])
01253 avctx->release_buffer(avctx, &s->picture);
01254
01255 common_end(s);
01256 av_freep(&s->bitstream_buffer);
01257
01258 for (i = 0; i < 6; i++) {
01259 ff_free_vlc(&s->vlc[i]);
01260 }
01261
01262 return 0;
01263 }
01264 #endif
01265
01266 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
01267 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
01268 const AVFrame *pict, int *got_packet)
01269 {
01270 HYuvContext *s = avctx->priv_data;
01271 const int width = s->width;
01272 const int width2 = s->width>>1;
01273 const int height = s->height;
01274 const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
01275 const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
01276 const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
01277 AVFrame * const p = &s->picture;
01278 int i, j, size = 0, ret;
01279
01280 if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
01281 return ret;
01282
01283 *p = *pict;
01284 p->pict_type = AV_PICTURE_TYPE_I;
01285 p->key_frame = 1;
01286
01287 if (s->context) {
01288 for (i = 0; i < 3; i++) {
01289 ff_huff_gen_len_table(s->len[i], s->stats[i]);
01290 if (generate_bits_table(s->bits[i], s->len[i]) < 0)
01291 return -1;
01292 size += store_table(s, s->len[i], &pkt->data[size]);
01293 }
01294
01295 for (i = 0; i < 3; i++)
01296 for (j = 0; j < 256; j++)
01297 s->stats[i][j] >>= 1;
01298 }
01299
01300 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
01301
01302 if (avctx->pix_fmt == PIX_FMT_YUV422P ||
01303 avctx->pix_fmt == PIX_FMT_YUV420P) {
01304 int lefty, leftu, leftv, y, cy;
01305
01306 put_bits(&s->pb, 8, leftv = p->data[2][0]);
01307 put_bits(&s->pb, 8, lefty = p->data[0][1]);
01308 put_bits(&s->pb, 8, leftu = p->data[1][0]);
01309 put_bits(&s->pb, 8, p->data[0][0]);
01310
01311 lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
01312 leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
01313 leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
01314
01315 encode_422_bitstream(s, 2, width-2);
01316
01317 if (s->predictor==MEDIAN) {
01318 int lefttopy, lefttopu, lefttopv;
01319 cy = y = 1;
01320 if (s->interlaced) {
01321 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
01322 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
01323 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
01324
01325 encode_422_bitstream(s, 0, width);
01326 y++; cy++;
01327 }
01328
01329 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
01330 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
01331 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
01332
01333 encode_422_bitstream(s, 0, 4);
01334
01335 lefttopy = p->data[0][3];
01336 lefttopu = p->data[1][1];
01337 lefttopv = p->data[2][1];
01338 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
01339 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
01340 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
01341 encode_422_bitstream(s, 0, width - 4);
01342 y++; cy++;
01343
01344 for (; y < height; y++,cy++) {
01345 uint8_t *ydst, *udst, *vdst;
01346
01347 if (s->bitstream_bpp == 12) {
01348 while (2 * cy > y) {
01349 ydst = p->data[0] + p->linesize[0] * y;
01350 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01351 encode_gray_bitstream(s, width);
01352 y++;
01353 }
01354 if (y >= height) break;
01355 }
01356 ydst = p->data[0] + p->linesize[0] * y;
01357 udst = p->data[1] + p->linesize[1] * cy;
01358 vdst = p->data[2] + p->linesize[2] * cy;
01359
01360 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01361 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
01362 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
01363
01364 encode_422_bitstream(s, 0, width);
01365 }
01366 } else {
01367 for (cy = y = 1; y < height; y++, cy++) {
01368 uint8_t *ydst, *udst, *vdst;
01369
01370
01371 if (s->bitstream_bpp == 12) {
01372 ydst = p->data[0] + p->linesize[0] * y;
01373
01374 if (s->predictor == PLANE && s->interlaced < y) {
01375 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01376
01377 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01378 } else {
01379 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01380 }
01381 encode_gray_bitstream(s, width);
01382 y++;
01383 if (y >= height) break;
01384 }
01385
01386 ydst = p->data[0] + p->linesize[0] * y;
01387 udst = p->data[1] + p->linesize[1] * cy;
01388 vdst = p->data[2] + p->linesize[2] * cy;
01389
01390 if (s->predictor == PLANE && s->interlaced < cy) {
01391 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01392 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
01393 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
01394
01395 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01396 leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
01397 leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
01398 } else {
01399 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01400 leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
01401 leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
01402 }
01403
01404 encode_422_bitstream(s, 0, width);
01405 }
01406 }
01407 } else if(avctx->pix_fmt == PIX_FMT_RGB32) {
01408 uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
01409 const int stride = -p->linesize[0];
01410 const int fake_stride = -fake_ystride;
01411 int y;
01412 int leftr, leftg, leftb, lefta;
01413
01414 put_bits(&s->pb, 8, lefta = data[A]);
01415 put_bits(&s->pb, 8, leftr = data[R]);
01416 put_bits(&s->pb, 8, leftg = data[G]);
01417 put_bits(&s->pb, 8, leftb = data[B]);
01418
01419 sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb, &lefta);
01420 encode_bgra_bitstream(s, width - 1, 4);
01421
01422 for (y = 1; y < s->height; y++) {
01423 uint8_t *dst = data + y*stride;
01424 if (s->predictor == PLANE && s->interlaced < y) {
01425 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
01426 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
01427 } else {
01428 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
01429 }
01430 encode_bgra_bitstream(s, width, 4);
01431 }
01432 }else if(avctx->pix_fmt == PIX_FMT_RGB24){
01433 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01434 const int stride = -p->linesize[0];
01435 const int fake_stride = -fake_ystride;
01436 int y;
01437 int leftr, leftg, leftb;
01438
01439 put_bits(&s->pb, 8, leftr= data[0]);
01440 put_bits(&s->pb, 8, leftg= data[1]);
01441 put_bits(&s->pb, 8, leftb= data[2]);
01442 put_bits(&s->pb, 8, 0);
01443
01444 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
01445 encode_bgra_bitstream(s, width-1, 3);
01446
01447 for(y=1; y<s->height; y++){
01448 uint8_t *dst = data + y*stride;
01449 if(s->predictor == PLANE && s->interlaced < y){
01450 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
01451 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
01452 }else{
01453 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
01454 }
01455 encode_bgra_bitstream(s, width, 3);
01456 }
01457 } else {
01458 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
01459 }
01460 emms_c();
01461
01462 size += (put_bits_count(&s->pb) + 31) / 8;
01463 put_bits(&s->pb, 16, 0);
01464 put_bits(&s->pb, 15, 0);
01465 size /= 4;
01466
01467 if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
01468 int j;
01469 char *p = avctx->stats_out;
01470 char *end = p + 1024*30;
01471 for (i = 0; i < 3; i++) {
01472 for (j = 0; j < 256; j++) {
01473 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
01474 p += strlen(p);
01475 s->stats[i][j]= 0;
01476 }
01477 snprintf(p, end-p, "\n");
01478 p++;
01479 }
01480 } else
01481 avctx->stats_out[0] = '\0';
01482 if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
01483 flush_put_bits(&s->pb);
01484 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
01485 }
01486
01487 s->picture_number++;
01488
01489 pkt->size = size * 4;
01490 pkt->flags |= AV_PKT_FLAG_KEY;
01491 *got_packet = 1;
01492
01493 return 0;
01494 }
01495
01496 static av_cold int encode_end(AVCodecContext *avctx)
01497 {
01498 HYuvContext *s = avctx->priv_data;
01499
01500 common_end(s);
01501
01502 av_freep(&avctx->extradata);
01503 av_freep(&avctx->stats_out);
01504
01505 return 0;
01506 }
01507 #endif
01508
01509 #if CONFIG_HUFFYUV_DECODER
01510 AVCodec ff_huffyuv_decoder = {
01511 .name = "huffyuv",
01512 .type = AVMEDIA_TYPE_VIDEO,
01513 .id = AV_CODEC_ID_HUFFYUV,
01514 .priv_data_size = sizeof(HYuvContext),
01515 .init = decode_init,
01516 .close = decode_end,
01517 .decode = decode_frame,
01518 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
01519 CODEC_CAP_FRAME_THREADS,
01520 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01521 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01522 };
01523 #endif
01524
01525 #if CONFIG_FFVHUFF_DECODER
01526 AVCodec ff_ffvhuff_decoder = {
01527 .name = "ffvhuff",
01528 .type = AVMEDIA_TYPE_VIDEO,
01529 .id = AV_CODEC_ID_FFVHUFF,
01530 .priv_data_size = sizeof(HYuvContext),
01531 .init = decode_init,
01532 .close = decode_end,
01533 .decode = decode_frame,
01534 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
01535 CODEC_CAP_FRAME_THREADS,
01536 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01537 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01538 };
01539 #endif
01540
01541 #if CONFIG_HUFFYUV_ENCODER
01542 AVCodec ff_huffyuv_encoder = {
01543 .name = "huffyuv",
01544 .type = AVMEDIA_TYPE_VIDEO,
01545 .id = AV_CODEC_ID_HUFFYUV,
01546 .priv_data_size = sizeof(HYuvContext),
01547 .init = encode_init,
01548 .encode2 = encode_frame,
01549 .close = encode_end,
01550 .pix_fmts = (const enum PixelFormat[]){
01551 PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
01552 },
01553 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01554 };
01555 #endif
01556
01557 #if CONFIG_FFVHUFF_ENCODER
01558 AVCodec ff_ffvhuff_encoder = {
01559 .name = "ffvhuff",
01560 .type = AVMEDIA_TYPE_VIDEO,
01561 .id = AV_CODEC_ID_FFVHUFF,
01562 .priv_data_size = sizeof(HYuvContext),
01563 .init = encode_init,
01564 .encode2 = encode_frame,
01565 .close = encode_end,
01566 .pix_fmts = (const enum PixelFormat[]){
01567 PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
01568 },
01569 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01570 };
01571 #endif