Libav 0.7.1
|
00001 /* 00002 * huffyuv codec for libavcodec 00003 * 00004 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at> 00005 * 00006 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of 00007 * the algorithm used 00008 * 00009 * This file is part of Libav. 00010 * 00011 * Libav is free software; you can redistribute it and/or 00012 * modify it under the terms of the GNU Lesser General Public 00013 * License as published by the Free Software Foundation; either 00014 * version 2.1 of the License, or (at your option) any later version. 00015 * 00016 * Libav is distributed in the hope that it will be useful, 00017 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00018 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00019 * Lesser General Public License for more details. 00020 * 00021 * You should have received a copy of the GNU Lesser General Public 00022 * License along with Libav; if not, write to the Free Software 00023 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00024 */ 00025 00031 #include "avcodec.h" 00032 #include "get_bits.h" 00033 #include "put_bits.h" 00034 #include "dsputil.h" 00035 #include "thread.h" 00036 00037 #define VLC_BITS 11 00038 00039 #if HAVE_BIGENDIAN 00040 #define B 3 00041 #define G 2 00042 #define R 1 00043 #define A 0 00044 #else 00045 #define B 0 00046 #define G 1 00047 #define R 2 00048 #define A 3 00049 #endif 00050 00051 typedef enum Predictor{ 00052 LEFT= 0, 00053 PLANE, 00054 MEDIAN, 00055 } Predictor; 00056 00057 typedef struct HYuvContext{ 00058 AVCodecContext *avctx; 00059 Predictor predictor; 00060 GetBitContext gb; 00061 PutBitContext pb; 00062 int interlaced; 00063 int decorrelate; 00064 int bitstream_bpp; 00065 int version; 00066 int yuy2; //use yuy2 instead of 422P 00067 int bgr32; //use bgr32 instead of bgr24 00068 int width, height; 00069 int flags; 00070 int context; 00071 int picture_number; 00072 int last_slice_end; 00073 uint8_t *temp[3]; 00074 uint64_t stats[3][256]; 00075 uint8_t len[3][256]; 00076 uint32_t bits[3][256]; 00077 uint32_t pix_bgr_map[1<<VLC_BITS]; 00078 VLC vlc[6]; //Y,U,V,YY,YU,YV 00079 AVFrame picture; 00080 uint8_t *bitstream_buffer; 00081 unsigned int bitstream_buffer_size; 00082 DSPContext dsp; 00083 }HYuvContext; 00084 00085 #define classic_shift_luma_table_size 42 00086 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = { 00087 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8, 00088 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70, 00089 69,68, 0 00090 }; 00091 00092 #define classic_shift_chroma_table_size 59 00093 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = { 00094 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183, 00095 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119, 00096 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0 00097 }; 00098 00099 static const unsigned char classic_add_luma[256] = { 00100 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37, 00101 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36, 00102 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36, 00103 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39, 00104 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37, 00105 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29, 00106 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16, 00107 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14, 00108 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6, 00109 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15, 00110 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25, 00111 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49, 00112 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60, 00113 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52, 00114 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43, 00115 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8, 00116 }; 00117 00118 static const unsigned char classic_add_chroma[256] = { 00119 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9, 00120 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7, 00121 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77, 00122 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63, 00123 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 00124 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22, 00125 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111, 00126 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1, 00127 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134, 00128 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96, 00129 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41, 00130 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36, 00131 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26, 00132 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13, 00133 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8, 00134 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2, 00135 }; 00136 00137 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){ 00138 int i; 00139 if(w<32){ 00140 for(i=0; i<w; i++){ 00141 const int temp= src[i]; 00142 dst[i]= temp - left; 00143 left= temp; 00144 } 00145 return left; 00146 }else{ 00147 for(i=0; i<16; i++){ 00148 const int temp= src[i]; 00149 dst[i]= temp - left; 00150 left= temp; 00151 } 00152 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16); 00153 return src[w-1]; 00154 } 00155 } 00156 00157 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){ 00158 int i; 00159 int r,g,b; 00160 r= *red; 00161 g= *green; 00162 b= *blue; 00163 for(i=0; i<FFMIN(w,4); i++){ 00164 const int rt= src[i*4+R]; 00165 const int gt= src[i*4+G]; 00166 const int bt= src[i*4+B]; 00167 dst[i*4+R]= rt - r; 00168 dst[i*4+G]= gt - g; 00169 dst[i*4+B]= bt - b; 00170 r = rt; 00171 g = gt; 00172 b = bt; 00173 } 00174 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16); 00175 *red= src[(w-1)*4+R]; 00176 *green= src[(w-1)*4+G]; 00177 *blue= src[(w-1)*4+B]; 00178 } 00179 00180 static int read_len_table(uint8_t *dst, GetBitContext *gb){ 00181 int i, val, repeat; 00182 00183 for(i=0; i<256;){ 00184 repeat= get_bits(gb, 3); 00185 val = get_bits(gb, 5); 00186 if(repeat==0) 00187 repeat= get_bits(gb, 8); 00188 //printf("%d %d\n", val, repeat); 00189 if(i+repeat > 256 || get_bits_left(gb) < 0) { 00190 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n"); 00191 return -1; 00192 } 00193 while (repeat--) 00194 dst[i++] = val; 00195 } 00196 return 0; 00197 } 00198 00199 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){ 00200 int len, index; 00201 uint32_t bits=0; 00202 00203 for(len=32; len>0; len--){ 00204 for(index=0; index<256; index++){ 00205 if(len_table[index]==len) 00206 dst[index]= bits++; 00207 } 00208 if(bits & 1){ 00209 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n"); 00210 return -1; 00211 } 00212 bits >>= 1; 00213 } 00214 return 0; 00215 } 00216 00217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00218 typedef struct { 00219 uint64_t val; 00220 int name; 00221 } HeapElem; 00222 00223 static void heap_sift(HeapElem *h, int root, int size) 00224 { 00225 while(root*2+1 < size) { 00226 int child = root*2+1; 00227 if(child < size-1 && h[child].val > h[child+1].val) 00228 child++; 00229 if(h[root].val > h[child].val) { 00230 FFSWAP(HeapElem, h[root], h[child]); 00231 root = child; 00232 } else 00233 break; 00234 } 00235 } 00236 00237 static void generate_len_table(uint8_t *dst, const uint64_t *stats){ 00238 HeapElem h[256]; 00239 int up[2*256]; 00240 int len[2*256]; 00241 int offset, i, next; 00242 int size = 256; 00243 00244 for(offset=1; ; offset<<=1){ 00245 for(i=0; i<size; i++){ 00246 h[i].name = i; 00247 h[i].val = (stats[i] << 8) + offset; 00248 } 00249 for(i=size/2-1; i>=0; i--) 00250 heap_sift(h, i, size); 00251 00252 for(next=size; next<size*2-1; next++){ 00253 // merge the two smallest entries, and put it back in the heap 00254 uint64_t min1v = h[0].val; 00255 up[h[0].name] = next; 00256 h[0].val = INT64_MAX; 00257 heap_sift(h, 0, size); 00258 up[h[0].name] = next; 00259 h[0].name = next; 00260 h[0].val += min1v; 00261 heap_sift(h, 0, size); 00262 } 00263 00264 len[2*size-2] = 0; 00265 for(i=2*size-3; i>=size; i--) 00266 len[i] = len[up[i]] + 1; 00267 for(i=0; i<size; i++) { 00268 dst[i] = len[up[i]] + 1; 00269 if(dst[i] >= 32) break; 00270 } 00271 if(i==size) break; 00272 } 00273 } 00274 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00275 00276 static void generate_joint_tables(HYuvContext *s){ 00277 uint16_t symbols[1<<VLC_BITS]; 00278 uint16_t bits[1<<VLC_BITS]; 00279 uint8_t len[1<<VLC_BITS]; 00280 if(s->bitstream_bpp < 24){ 00281 int p, i, y, u; 00282 for(p=0; p<3; p++){ 00283 for(i=y=0; y<256; y++){ 00284 int len0 = s->len[0][y]; 00285 int limit = VLC_BITS - len0; 00286 if(limit <= 0) 00287 continue; 00288 for(u=0; u<256; u++){ 00289 int len1 = s->len[p][u]; 00290 if(len1 > limit) 00291 continue; 00292 len[i] = len0 + len1; 00293 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u]; 00294 symbols[i] = (y<<8) + u; 00295 if(symbols[i] != 0xffff) // reserved to mean "invalid" 00296 i++; 00297 } 00298 } 00299 free_vlc(&s->vlc[3+p]); 00300 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0); 00301 } 00302 }else{ 00303 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map; 00304 int i, b, g, r, code; 00305 int p0 = s->decorrelate; 00306 int p1 = !s->decorrelate; 00307 // restrict the range to +/-16 becaues that's pretty much guaranteed to 00308 // cover all the combinations that fit in 11 bits total, and it doesn't 00309 // matter if we miss a few rare codes. 00310 for(i=0, g=-16; g<16; g++){ 00311 int len0 = s->len[p0][g&255]; 00312 int limit0 = VLC_BITS - len0; 00313 if(limit0 < 2) 00314 continue; 00315 for(b=-16; b<16; b++){ 00316 int len1 = s->len[p1][b&255]; 00317 int limit1 = limit0 - len1; 00318 if(limit1 < 1) 00319 continue; 00320 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255]; 00321 for(r=-16; r<16; r++){ 00322 int len2 = s->len[2][r&255]; 00323 if(len2 > limit1) 00324 continue; 00325 len[i] = len0 + len1 + len2; 00326 bits[i] = (code << len2) + s->bits[2][r&255]; 00327 if(s->decorrelate){ 00328 map[i][G] = g; 00329 map[i][B] = g+b; 00330 map[i][R] = g+r; 00331 }else{ 00332 map[i][B] = g; 00333 map[i][G] = b; 00334 map[i][R] = r; 00335 } 00336 i++; 00337 } 00338 } 00339 } 00340 free_vlc(&s->vlc[3]); 00341 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0); 00342 } 00343 } 00344 00345 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){ 00346 GetBitContext gb; 00347 int i; 00348 00349 init_get_bits(&gb, src, length*8); 00350 00351 for(i=0; i<3; i++){ 00352 if(read_len_table(s->len[i], &gb)<0) 00353 return -1; 00354 if(generate_bits_table(s->bits[i], s->len[i])<0){ 00355 return -1; 00356 } 00357 free_vlc(&s->vlc[i]); 00358 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 00359 } 00360 00361 generate_joint_tables(s); 00362 00363 return (get_bits_count(&gb)+7)/8; 00364 } 00365 00366 static int read_old_huffman_tables(HYuvContext *s){ 00367 #if 1 00368 GetBitContext gb; 00369 int i; 00370 00371 init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8); 00372 if(read_len_table(s->len[0], &gb)<0) 00373 return -1; 00374 init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8); 00375 if(read_len_table(s->len[1], &gb)<0) 00376 return -1; 00377 00378 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i]; 00379 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i]; 00380 00381 if(s->bitstream_bpp >= 24){ 00382 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t)); 00383 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t)); 00384 } 00385 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t)); 00386 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t)); 00387 00388 for(i=0; i<3; i++){ 00389 free_vlc(&s->vlc[i]); 00390 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 00391 } 00392 00393 generate_joint_tables(s); 00394 00395 return 0; 00396 #else 00397 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n"); 00398 return -1; 00399 #endif 00400 } 00401 00402 static av_cold void alloc_temp(HYuvContext *s){ 00403 int i; 00404 00405 if(s->bitstream_bpp<24){ 00406 for(i=0; i<3; i++){ 00407 s->temp[i]= av_malloc(s->width + 16); 00408 } 00409 }else{ 00410 s->temp[0]= av_mallocz(4*s->width + 16); 00411 } 00412 } 00413 00414 static av_cold int common_init(AVCodecContext *avctx){ 00415 HYuvContext *s = avctx->priv_data; 00416 00417 s->avctx= avctx; 00418 s->flags= avctx->flags; 00419 00420 dsputil_init(&s->dsp, avctx); 00421 00422 s->width= avctx->width; 00423 s->height= avctx->height; 00424 assert(s->width>0 && s->height>0); 00425 00426 return 0; 00427 } 00428 00429 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 00430 static av_cold int decode_init(AVCodecContext *avctx) 00431 { 00432 HYuvContext *s = avctx->priv_data; 00433 00434 common_init(avctx); 00435 memset(s->vlc, 0, 3*sizeof(VLC)); 00436 00437 avctx->coded_frame= &s->picture; 00438 s->interlaced= s->height > 288; 00439 00440 s->bgr32=1; 00441 //if(avctx->extradata) 00442 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size); 00443 if(avctx->extradata_size){ 00444 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12) 00445 s->version=1; // do such files exist at all? 00446 else 00447 s->version=2; 00448 }else 00449 s->version=0; 00450 00451 if(s->version==2){ 00452 int method, interlace; 00453 00454 if (avctx->extradata_size < 4) 00455 return -1; 00456 00457 method= ((uint8_t*)avctx->extradata)[0]; 00458 s->decorrelate= method&64 ? 1 : 0; 00459 s->predictor= method&63; 00460 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1]; 00461 if(s->bitstream_bpp==0) 00462 s->bitstream_bpp= avctx->bits_per_coded_sample&~7; 00463 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4; 00464 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced; 00465 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0; 00466 00467 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0) 00468 return -1; 00469 }else{ 00470 switch(avctx->bits_per_coded_sample&7){ 00471 case 1: 00472 s->predictor= LEFT; 00473 s->decorrelate= 0; 00474 break; 00475 case 2: 00476 s->predictor= LEFT; 00477 s->decorrelate= 1; 00478 break; 00479 case 3: 00480 s->predictor= PLANE; 00481 s->decorrelate= avctx->bits_per_coded_sample >= 24; 00482 break; 00483 case 4: 00484 s->predictor= MEDIAN; 00485 s->decorrelate= 0; 00486 break; 00487 default: 00488 s->predictor= LEFT; //OLD 00489 s->decorrelate= 0; 00490 break; 00491 } 00492 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7; 00493 s->context= 0; 00494 00495 if(read_old_huffman_tables(s) < 0) 00496 return -1; 00497 } 00498 00499 switch(s->bitstream_bpp){ 00500 case 12: 00501 avctx->pix_fmt = PIX_FMT_YUV420P; 00502 break; 00503 case 16: 00504 if(s->yuy2){ 00505 avctx->pix_fmt = PIX_FMT_YUYV422; 00506 }else{ 00507 avctx->pix_fmt = PIX_FMT_YUV422P; 00508 } 00509 break; 00510 case 24: 00511 case 32: 00512 if(s->bgr32){ 00513 avctx->pix_fmt = PIX_FMT_RGB32; 00514 }else{ 00515 avctx->pix_fmt = PIX_FMT_BGR24; 00516 } 00517 break; 00518 default: 00519 return AVERROR_INVALIDDATA; 00520 } 00521 00522 alloc_temp(s); 00523 00524 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 00525 00526 return 0; 00527 } 00528 00529 static av_cold int decode_init_thread_copy(AVCodecContext *avctx) 00530 { 00531 HYuvContext *s = avctx->priv_data; 00532 int i; 00533 00534 avctx->coded_frame= &s->picture; 00535 alloc_temp(s); 00536 00537 for (i = 0; i < 6; i++) 00538 s->vlc[i].table = NULL; 00539 00540 if(s->version==2){ 00541 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0) 00542 return -1; 00543 }else{ 00544 if(read_old_huffman_tables(s) < 0) 00545 return -1; 00546 } 00547 00548 return 0; 00549 } 00550 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 00551 00552 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00553 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){ 00554 int i; 00555 int index= 0; 00556 00557 for(i=0; i<256;){ 00558 int val= len[i]; 00559 int repeat=0; 00560 00561 for(; i<256 && len[i]==val && repeat<255; i++) 00562 repeat++; 00563 00564 assert(val < 32 && val >0 && repeat<256 && repeat>0); 00565 if(repeat>7){ 00566 buf[index++]= val; 00567 buf[index++]= repeat; 00568 }else{ 00569 buf[index++]= val | (repeat<<5); 00570 } 00571 } 00572 00573 return index; 00574 } 00575 00576 static av_cold int encode_init(AVCodecContext *avctx) 00577 { 00578 HYuvContext *s = avctx->priv_data; 00579 int i, j; 00580 00581 common_init(avctx); 00582 00583 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772 00584 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 00585 s->version=2; 00586 00587 avctx->coded_frame= &s->picture; 00588 00589 switch(avctx->pix_fmt){ 00590 case PIX_FMT_YUV420P: 00591 s->bitstream_bpp= 12; 00592 break; 00593 case PIX_FMT_YUV422P: 00594 s->bitstream_bpp= 16; 00595 break; 00596 case PIX_FMT_RGB32: 00597 s->bitstream_bpp= 24; 00598 break; 00599 default: 00600 av_log(avctx, AV_LOG_ERROR, "format not supported\n"); 00601 return -1; 00602 } 00603 avctx->bits_per_coded_sample= s->bitstream_bpp; 00604 s->decorrelate= s->bitstream_bpp >= 24; 00605 s->predictor= avctx->prediction_method; 00606 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; 00607 if(avctx->context_model==1){ 00608 s->context= avctx->context_model; 00609 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){ 00610 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n"); 00611 return -1; 00612 } 00613 }else s->context= 0; 00614 00615 if(avctx->codec->id==CODEC_ID_HUFFYUV){ 00616 if(avctx->pix_fmt==PIX_FMT_YUV420P){ 00617 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n"); 00618 return -1; 00619 } 00620 if(avctx->context_model){ 00621 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n"); 00622 return -1; 00623 } 00624 if(s->interlaced != ( s->height > 288 )) 00625 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n"); 00626 } 00627 00628 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){ 00629 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n"); 00630 return -1; 00631 } 00632 00633 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6); 00634 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp; 00635 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20; 00636 if(s->context) 00637 ((uint8_t*)avctx->extradata)[2]|= 0x40; 00638 ((uint8_t*)avctx->extradata)[3]= 0; 00639 s->avctx->extradata_size= 4; 00640 00641 if(avctx->stats_in){ 00642 char *p= avctx->stats_in; 00643 00644 for(i=0; i<3; i++) 00645 for(j=0; j<256; j++) 00646 s->stats[i][j]= 1; 00647 00648 for(;;){ 00649 for(i=0; i<3; i++){ 00650 char *next; 00651 00652 for(j=0; j<256; j++){ 00653 s->stats[i][j]+= strtol(p, &next, 0); 00654 if(next==p) return -1; 00655 p=next; 00656 } 00657 } 00658 if(p[0]==0 || p[1]==0 || p[2]==0) break; 00659 } 00660 }else{ 00661 for(i=0; i<3; i++) 00662 for(j=0; j<256; j++){ 00663 int d= FFMIN(j, 256-j); 00664 00665 s->stats[i][j]= 100000000/(d+1); 00666 } 00667 } 00668 00669 for(i=0; i<3; i++){ 00670 generate_len_table(s->len[i], s->stats[i]); 00671 00672 if(generate_bits_table(s->bits[i], s->len[i])<0){ 00673 return -1; 00674 } 00675 00676 s->avctx->extradata_size+= 00677 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]); 00678 } 00679 00680 if(s->context){ 00681 for(i=0; i<3; i++){ 00682 int pels = s->width*s->height / (i?40:10); 00683 for(j=0; j<256; j++){ 00684 int d= FFMIN(j, 256-j); 00685 s->stats[i][j]= pels/(d+1); 00686 } 00687 } 00688 }else{ 00689 for(i=0; i<3; i++) 00690 for(j=0; j<256; j++) 00691 s->stats[i][j]= 0; 00692 } 00693 00694 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 00695 00696 alloc_temp(s); 00697 00698 s->picture_number=0; 00699 00700 return 0; 00701 } 00702 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00703 00704 /* TODO instead of restarting the read when the code isn't in the first level 00705 * of the joint table, jump into the 2nd level of the individual table. */ 00706 #define READ_2PIX(dst0, dst1, plane1){\ 00707 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\ 00708 if(code != 0xffff){\ 00709 dst0 = code>>8;\ 00710 dst1 = code;\ 00711 }else{\ 00712 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\ 00713 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\ 00714 }\ 00715 } 00716 00717 static void decode_422_bitstream(HYuvContext *s, int count){ 00718 int i; 00719 00720 count/=2; 00721 00722 if(count >= (get_bits_left(&s->gb))/(31*4)){ 00723 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 00724 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 00725 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 00726 } 00727 }else{ 00728 for(i=0; i<count; i++){ 00729 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 00730 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 00731 } 00732 } 00733 } 00734 00735 static void decode_gray_bitstream(HYuvContext *s, int count){ 00736 int i; 00737 00738 count/=2; 00739 00740 if(count >= (get_bits_left(&s->gb))/(31*2)){ 00741 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 00742 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 00743 } 00744 }else{ 00745 for(i=0; i<count; i++){ 00746 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 00747 } 00748 } 00749 } 00750 00751 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00752 static int encode_422_bitstream(HYuvContext *s, int offset, int count){ 00753 int i; 00754 const uint8_t *y = s->temp[0] + offset; 00755 const uint8_t *u = s->temp[1] + offset/2; 00756 const uint8_t *v = s->temp[2] + offset/2; 00757 00758 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){ 00759 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00760 return -1; 00761 } 00762 00763 #define LOAD4\ 00764 int y0 = y[2*i];\ 00765 int y1 = y[2*i+1];\ 00766 int u0 = u[i];\ 00767 int v0 = v[i]; 00768 00769 count/=2; 00770 if(s->flags&CODEC_FLAG_PASS1){ 00771 for(i=0; i<count; i++){ 00772 LOAD4; 00773 s->stats[0][y0]++; 00774 s->stats[1][u0]++; 00775 s->stats[0][y1]++; 00776 s->stats[2][v0]++; 00777 } 00778 } 00779 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 00780 return 0; 00781 if(s->context){ 00782 for(i=0; i<count; i++){ 00783 LOAD4; 00784 s->stats[0][y0]++; 00785 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 00786 s->stats[1][u0]++; 00787 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 00788 s->stats[0][y1]++; 00789 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00790 s->stats[2][v0]++; 00791 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 00792 } 00793 }else{ 00794 for(i=0; i<count; i++){ 00795 LOAD4; 00796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 00797 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 00798 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00799 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 00800 } 00801 } 00802 return 0; 00803 } 00804 00805 static int encode_gray_bitstream(HYuvContext *s, int count){ 00806 int i; 00807 00808 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){ 00809 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00810 return -1; 00811 } 00812 00813 #define LOAD2\ 00814 int y0 = s->temp[0][2*i];\ 00815 int y1 = s->temp[0][2*i+1]; 00816 #define STAT2\ 00817 s->stats[0][y0]++;\ 00818 s->stats[0][y1]++; 00819 #define WRITE2\ 00820 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\ 00821 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00822 00823 count/=2; 00824 if(s->flags&CODEC_FLAG_PASS1){ 00825 for(i=0; i<count; i++){ 00826 LOAD2; 00827 STAT2; 00828 } 00829 } 00830 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 00831 return 0; 00832 00833 if(s->context){ 00834 for(i=0; i<count; i++){ 00835 LOAD2; 00836 STAT2; 00837 WRITE2; 00838 } 00839 }else{ 00840 for(i=0; i<count; i++){ 00841 LOAD2; 00842 WRITE2; 00843 } 00844 } 00845 return 0; 00846 } 00847 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00848 00849 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){ 00850 int i; 00851 for(i=0; i<count; i++){ 00852 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1); 00853 if(code != -1){ 00854 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code]; 00855 }else if(decorrelate){ 00856 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 00857 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 00858 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 00859 }else{ 00860 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3); 00861 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 00862 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); 00863 } 00864 if(alpha) 00865 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); 00866 } 00867 } 00868 00869 static void decode_bgr_bitstream(HYuvContext *s, int count){ 00870 if(s->decorrelate){ 00871 if(s->bitstream_bpp==24) 00872 decode_bgr_1(s, count, 1, 0); 00873 else 00874 decode_bgr_1(s, count, 1, 1); 00875 }else{ 00876 if(s->bitstream_bpp==24) 00877 decode_bgr_1(s, count, 0, 0); 00878 else 00879 decode_bgr_1(s, count, 0, 1); 00880 } 00881 } 00882 00883 static int encode_bgr_bitstream(HYuvContext *s, int count){ 00884 int i; 00885 00886 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){ 00887 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00888 return -1; 00889 } 00890 00891 #define LOAD3\ 00892 int g= s->temp[0][4*i+G];\ 00893 int b= (s->temp[0][4*i+B] - g) & 0xff;\ 00894 int r= (s->temp[0][4*i+R] - g) & 0xff; 00895 #define STAT3\ 00896 s->stats[0][b]++;\ 00897 s->stats[1][g]++;\ 00898 s->stats[2][r]++; 00899 #define WRITE3\ 00900 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\ 00901 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\ 00902 put_bits(&s->pb, s->len[2][r], s->bits[2][r]); 00903 00904 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){ 00905 for(i=0; i<count; i++){ 00906 LOAD3; 00907 STAT3; 00908 } 00909 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){ 00910 for(i=0; i<count; i++){ 00911 LOAD3; 00912 STAT3; 00913 WRITE3; 00914 } 00915 }else{ 00916 for(i=0; i<count; i++){ 00917 LOAD3; 00918 WRITE3; 00919 } 00920 } 00921 return 0; 00922 } 00923 00924 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 00925 static void draw_slice(HYuvContext *s, int y){ 00926 int h, cy; 00927 int offset[4]; 00928 00929 if(s->avctx->draw_horiz_band==NULL) 00930 return; 00931 00932 h= y - s->last_slice_end; 00933 y -= h; 00934 00935 if(s->bitstream_bpp==12){ 00936 cy= y>>1; 00937 }else{ 00938 cy= y; 00939 } 00940 00941 offset[0] = s->picture.linesize[0]*y; 00942 offset[1] = s->picture.linesize[1]*cy; 00943 offset[2] = s->picture.linesize[2]*cy; 00944 offset[3] = 0; 00945 emms_c(); 00946 00947 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); 00948 00949 s->last_slice_end= y + h; 00950 } 00951 00952 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ 00953 const uint8_t *buf = avpkt->data; 00954 int buf_size = avpkt->size; 00955 HYuvContext *s = avctx->priv_data; 00956 const int width= s->width; 00957 const int width2= s->width>>1; 00958 const int height= s->height; 00959 int fake_ystride, fake_ustride, fake_vstride; 00960 AVFrame * const p= &s->picture; 00961 int table_size= 0; 00962 00963 AVFrame *picture = data; 00964 00965 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); 00966 if (!s->bitstream_buffer) 00967 return AVERROR(ENOMEM); 00968 00969 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); 00970 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4); 00971 00972 if(p->data[0]) 00973 ff_thread_release_buffer(avctx, p); 00974 00975 p->reference= 0; 00976 if(ff_thread_get_buffer(avctx, p) < 0){ 00977 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 00978 return -1; 00979 } 00980 00981 if(s->context){ 00982 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size); 00983 if(table_size < 0) 00984 return -1; 00985 } 00986 00987 if((unsigned)(buf_size-table_size) >= INT_MAX/8) 00988 return -1; 00989 00990 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8); 00991 00992 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0]; 00993 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1]; 00994 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2]; 00995 00996 s->last_slice_end= 0; 00997 00998 if(s->bitstream_bpp<24){ 00999 int y, cy; 01000 int lefty, leftu, leftv; 01001 int lefttopy, lefttopu, lefttopv; 01002 01003 if(s->yuy2){ 01004 p->data[0][3]= get_bits(&s->gb, 8); 01005 p->data[0][2]= get_bits(&s->gb, 8); 01006 p->data[0][1]= get_bits(&s->gb, 8); 01007 p->data[0][0]= get_bits(&s->gb, 8); 01008 01009 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n"); 01010 return -1; 01011 }else{ 01012 01013 leftv= p->data[2][0]= get_bits(&s->gb, 8); 01014 lefty= p->data[0][1]= get_bits(&s->gb, 8); 01015 leftu= p->data[1][0]= get_bits(&s->gb, 8); 01016 p->data[0][0]= get_bits(&s->gb, 8); 01017 01018 switch(s->predictor){ 01019 case LEFT: 01020 case PLANE: 01021 decode_422_bitstream(s, width-2); 01022 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 01023 if(!(s->flags&CODEC_FLAG_GRAY)){ 01024 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 01025 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 01026 } 01027 01028 for(cy=y=1; y<s->height; y++,cy++){ 01029 uint8_t *ydst, *udst, *vdst; 01030 01031 if(s->bitstream_bpp==12){ 01032 decode_gray_bitstream(s, width); 01033 01034 ydst= p->data[0] + p->linesize[0]*y; 01035 01036 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty); 01037 if(s->predictor == PLANE){ 01038 if(y>s->interlaced) 01039 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 01040 } 01041 y++; 01042 if(y>=s->height) break; 01043 } 01044 01045 draw_slice(s, y); 01046 01047 ydst= p->data[0] + p->linesize[0]*y; 01048 udst= p->data[1] + p->linesize[1]*cy; 01049 vdst= p->data[2] + p->linesize[2]*cy; 01050 01051 decode_422_bitstream(s, width); 01052 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty); 01053 if(!(s->flags&CODEC_FLAG_GRAY)){ 01054 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu); 01055 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv); 01056 } 01057 if(s->predictor == PLANE){ 01058 if(cy>s->interlaced){ 01059 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 01060 if(!(s->flags&CODEC_FLAG_GRAY)){ 01061 s->dsp.add_bytes(udst, udst - fake_ustride, width2); 01062 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2); 01063 } 01064 } 01065 } 01066 } 01067 draw_slice(s, height); 01068 01069 break; 01070 case MEDIAN: 01071 /* first line except first 2 pixels is left predicted */ 01072 decode_422_bitstream(s, width-2); 01073 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 01074 if(!(s->flags&CODEC_FLAG_GRAY)){ 01075 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 01076 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 01077 } 01078 01079 cy=y=1; 01080 01081 /* second line is left predicted for interlaced case */ 01082 if(s->interlaced){ 01083 decode_422_bitstream(s, width); 01084 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty); 01085 if(!(s->flags&CODEC_FLAG_GRAY)){ 01086 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); 01087 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); 01088 } 01089 y++; cy++; 01090 } 01091 01092 /* next 4 pixels are left predicted too */ 01093 decode_422_bitstream(s, 4); 01094 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty); 01095 if(!(s->flags&CODEC_FLAG_GRAY)){ 01096 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu); 01097 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv); 01098 } 01099 01100 /* next line except the first 4 pixels is median predicted */ 01101 lefttopy= p->data[0][3]; 01102 decode_422_bitstream(s, width-4); 01103 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy); 01104 if(!(s->flags&CODEC_FLAG_GRAY)){ 01105 lefttopu= p->data[1][1]; 01106 lefttopv= p->data[2][1]; 01107 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu); 01108 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv); 01109 } 01110 y++; cy++; 01111 01112 for(; y<height; y++,cy++){ 01113 uint8_t *ydst, *udst, *vdst; 01114 01115 if(s->bitstream_bpp==12){ 01116 while(2*cy > y){ 01117 decode_gray_bitstream(s, width); 01118 ydst= p->data[0] + p->linesize[0]*y; 01119 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 01120 y++; 01121 } 01122 if(y>=height) break; 01123 } 01124 draw_slice(s, y); 01125 01126 decode_422_bitstream(s, width); 01127 01128 ydst= p->data[0] + p->linesize[0]*y; 01129 udst= p->data[1] + p->linesize[1]*cy; 01130 vdst= p->data[2] + p->linesize[2]*cy; 01131 01132 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 01133 if(!(s->flags&CODEC_FLAG_GRAY)){ 01134 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu); 01135 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv); 01136 } 01137 } 01138 01139 draw_slice(s, height); 01140 break; 01141 } 01142 } 01143 }else{ 01144 int y; 01145 int leftr, leftg, leftb, lefta; 01146 const int last_line= (height-1)*p->linesize[0]; 01147 01148 if(s->bitstream_bpp==32){ 01149 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8); 01150 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 01151 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 01152 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 01153 }else{ 01154 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 01155 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 01156 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 01157 lefta= p->data[0][last_line+A]= 255; 01158 skip_bits(&s->gb, 8); 01159 } 01160 01161 if(s->bgr32){ 01162 switch(s->predictor){ 01163 case LEFT: 01164 case PLANE: 01165 decode_bgr_bitstream(s, width-1); 01166 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta); 01167 01168 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down. 01169 decode_bgr_bitstream(s, width); 01170 01171 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta); 01172 if(s->predictor == PLANE){ 01173 if(s->bitstream_bpp!=32) lefta=0; 01174 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){ 01175 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y, 01176 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride); 01177 } 01178 } 01179 } 01180 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order 01181 break; 01182 default: 01183 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n"); 01184 } 01185 }else{ 01186 01187 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n"); 01188 return -1; 01189 } 01190 } 01191 emms_c(); 01192 01193 *picture= *p; 01194 *data_size = sizeof(AVFrame); 01195 01196 return (get_bits_count(&s->gb)+31)/32*4 + table_size; 01197 } 01198 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 01199 01200 static int common_end(HYuvContext *s){ 01201 int i; 01202 01203 for(i=0; i<3; i++){ 01204 av_freep(&s->temp[i]); 01205 } 01206 return 0; 01207 } 01208 01209 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 01210 static av_cold int decode_end(AVCodecContext *avctx) 01211 { 01212 HYuvContext *s = avctx->priv_data; 01213 int i; 01214 01215 if (s->picture.data[0]) 01216 avctx->release_buffer(avctx, &s->picture); 01217 01218 common_end(s); 01219 av_freep(&s->bitstream_buffer); 01220 01221 for(i=0; i<6; i++){ 01222 free_vlc(&s->vlc[i]); 01223 } 01224 01225 return 0; 01226 } 01227 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 01228 01229 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 01230 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ 01231 HYuvContext *s = avctx->priv_data; 01232 AVFrame *pict = data; 01233 const int width= s->width; 01234 const int width2= s->width>>1; 01235 const int height= s->height; 01236 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; 01237 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; 01238 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; 01239 AVFrame * const p= &s->picture; 01240 int i, j, size=0; 01241 01242 *p = *pict; 01243 p->pict_type= AV_PICTURE_TYPE_I; 01244 p->key_frame= 1; 01245 01246 if(s->context){ 01247 for(i=0; i<3; i++){ 01248 generate_len_table(s->len[i], s->stats[i]); 01249 if(generate_bits_table(s->bits[i], s->len[i])<0) 01250 return -1; 01251 size+= store_table(s, s->len[i], &buf[size]); 01252 } 01253 01254 for(i=0; i<3; i++) 01255 for(j=0; j<256; j++) 01256 s->stats[i][j] >>= 1; 01257 } 01258 01259 init_put_bits(&s->pb, buf+size, buf_size-size); 01260 01261 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){ 01262 int lefty, leftu, leftv, y, cy; 01263 01264 put_bits(&s->pb, 8, leftv= p->data[2][0]); 01265 put_bits(&s->pb, 8, lefty= p->data[0][1]); 01266 put_bits(&s->pb, 8, leftu= p->data[1][0]); 01267 put_bits(&s->pb, 8, p->data[0][0]); 01268 01269 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0); 01270 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0); 01271 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0); 01272 01273 encode_422_bitstream(s, 2, width-2); 01274 01275 if(s->predictor==MEDIAN){ 01276 int lefttopy, lefttopu, lefttopv; 01277 cy=y=1; 01278 if(s->interlaced){ 01279 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty); 01280 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu); 01281 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv); 01282 01283 encode_422_bitstream(s, 0, width); 01284 y++; cy++; 01285 } 01286 01287 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty); 01288 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu); 01289 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv); 01290 01291 encode_422_bitstream(s, 0, 4); 01292 01293 lefttopy= p->data[0][3]; 01294 lefttopu= p->data[1][1]; 01295 lefttopv= p->data[2][1]; 01296 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy); 01297 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu); 01298 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv); 01299 encode_422_bitstream(s, 0, width-4); 01300 y++; cy++; 01301 01302 for(; y<height; y++,cy++){ 01303 uint8_t *ydst, *udst, *vdst; 01304 01305 if(s->bitstream_bpp==12){ 01306 while(2*cy > y){ 01307 ydst= p->data[0] + p->linesize[0]*y; 01308 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 01309 encode_gray_bitstream(s, width); 01310 y++; 01311 } 01312 if(y>=height) break; 01313 } 01314 ydst= p->data[0] + p->linesize[0]*y; 01315 udst= p->data[1] + p->linesize[1]*cy; 01316 vdst= p->data[2] + p->linesize[2]*cy; 01317 01318 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 01319 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); 01320 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv); 01321 01322 encode_422_bitstream(s, 0, width); 01323 } 01324 }else{ 01325 for(cy=y=1; y<height; y++,cy++){ 01326 uint8_t *ydst, *udst, *vdst; 01327 01328 /* encode a luma only line & y++ */ 01329 if(s->bitstream_bpp==12){ 01330 ydst= p->data[0] + p->linesize[0]*y; 01331 01332 if(s->predictor == PLANE && s->interlaced < y){ 01333 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 01334 01335 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 01336 }else{ 01337 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 01338 } 01339 encode_gray_bitstream(s, width); 01340 y++; 01341 if(y>=height) break; 01342 } 01343 01344 ydst= p->data[0] + p->linesize[0]*y; 01345 udst= p->data[1] + p->linesize[1]*cy; 01346 vdst= p->data[2] + p->linesize[2]*cy; 01347 01348 if(s->predictor == PLANE && s->interlaced < cy){ 01349 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 01350 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2); 01351 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2); 01352 01353 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 01354 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu); 01355 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv); 01356 }else{ 01357 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 01358 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu); 01359 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv); 01360 } 01361 01362 encode_422_bitstream(s, 0, width); 01363 } 01364 } 01365 }else if(avctx->pix_fmt == PIX_FMT_RGB32){ 01366 uint8_t *data = p->data[0] + (height-1)*p->linesize[0]; 01367 const int stride = -p->linesize[0]; 01368 const int fake_stride = -fake_ystride; 01369 int y; 01370 int leftr, leftg, leftb; 01371 01372 put_bits(&s->pb, 8, leftr= data[R]); 01373 put_bits(&s->pb, 8, leftg= data[G]); 01374 put_bits(&s->pb, 8, leftb= data[B]); 01375 put_bits(&s->pb, 8, 0); 01376 01377 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb); 01378 encode_bgr_bitstream(s, width-1); 01379 01380 for(y=1; y<s->height; y++){ 01381 uint8_t *dst = data + y*stride; 01382 if(s->predictor == PLANE && s->interlaced < y){ 01383 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4); 01384 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb); 01385 }else{ 01386 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb); 01387 } 01388 encode_bgr_bitstream(s, width); 01389 } 01390 }else{ 01391 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); 01392 } 01393 emms_c(); 01394 01395 size+= (put_bits_count(&s->pb)+31)/8; 01396 put_bits(&s->pb, 16, 0); 01397 put_bits(&s->pb, 15, 0); 01398 size/= 4; 01399 01400 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){ 01401 int j; 01402 char *p= avctx->stats_out; 01403 char *end= p + 1024*30; 01404 for(i=0; i<3; i++){ 01405 for(j=0; j<256; j++){ 01406 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]); 01407 p+= strlen(p); 01408 s->stats[i][j]= 0; 01409 } 01410 snprintf(p, end-p, "\n"); 01411 p++; 01412 } 01413 } else 01414 avctx->stats_out[0] = '\0'; 01415 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){ 01416 flush_put_bits(&s->pb); 01417 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); 01418 } 01419 01420 s->picture_number++; 01421 01422 return size*4; 01423 } 01424 01425 static av_cold int encode_end(AVCodecContext *avctx) 01426 { 01427 HYuvContext *s = avctx->priv_data; 01428 01429 common_end(s); 01430 01431 av_freep(&avctx->extradata); 01432 av_freep(&avctx->stats_out); 01433 01434 return 0; 01435 } 01436 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 01437 01438 #if CONFIG_HUFFYUV_DECODER 01439 AVCodec ff_huffyuv_decoder = { 01440 "huffyuv", 01441 AVMEDIA_TYPE_VIDEO, 01442 CODEC_ID_HUFFYUV, 01443 sizeof(HYuvContext), 01444 decode_init, 01445 NULL, 01446 decode_end, 01447 decode_frame, 01448 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, 01449 NULL, 01450 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), 01451 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 01452 }; 01453 #endif 01454 01455 #if CONFIG_FFVHUFF_DECODER 01456 AVCodec ff_ffvhuff_decoder = { 01457 "ffvhuff", 01458 AVMEDIA_TYPE_VIDEO, 01459 CODEC_ID_FFVHUFF, 01460 sizeof(HYuvContext), 01461 decode_init, 01462 NULL, 01463 decode_end, 01464 decode_frame, 01465 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, 01466 NULL, 01467 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), 01468 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 01469 }; 01470 #endif 01471 01472 #if CONFIG_HUFFYUV_ENCODER 01473 AVCodec ff_huffyuv_encoder = { 01474 "huffyuv", 01475 AVMEDIA_TYPE_VIDEO, 01476 CODEC_ID_HUFFYUV, 01477 sizeof(HYuvContext), 01478 encode_init, 01479 encode_frame, 01480 encode_end, 01481 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 01482 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 01483 }; 01484 #endif 01485 01486 #if CONFIG_FFVHUFF_ENCODER 01487 AVCodec ff_ffvhuff_encoder = { 01488 "ffvhuff", 01489 AVMEDIA_TYPE_VIDEO, 01490 CODEC_ID_FFVHUFF, 01491 sizeof(HYuvContext), 01492 encode_init, 01493 encode_frame, 01494 encode_end, 01495 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 01496 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 01497 }; 01498 #endif