Libav 0.7.1
|
00001 /* 00002 * huffyuv codec for libavcodec 00003 * 00004 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at> 00005 * 00006 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of 00007 * the algorithm used 00008 * 00009 * This file is part of Libav. 00010 * 00011 * Libav is free software; you can redistribute it and/or 00012 * modify it under the terms of the GNU Lesser General Public 00013 * License as published by the Free Software Foundation; either 00014 * version 2.1 of the License, or (at your option) any later version. 00015 * 00016 * Libav is distributed in the hope that it will be useful, 00017 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00018 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00019 * Lesser General Public License for more details. 00020 * 00021 * You should have received a copy of the GNU Lesser General Public 00022 * License along with Libav; if not, write to the Free Software 00023 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00024 */ 00025 00031 #include "avcodec.h" 00032 #include "get_bits.h" 00033 #include "put_bits.h" 00034 #include "dsputil.h" 00035 #include "thread.h" 00036 00037 #define VLC_BITS 11 00038 00039 #if HAVE_BIGENDIAN 00040 #define B 3 00041 #define G 2 00042 #define R 1 00043 #define A 0 00044 #else 00045 #define B 0 00046 #define G 1 00047 #define R 2 00048 #define A 3 00049 #endif 00050 00051 typedef enum Predictor{ 00052 LEFT= 0, 00053 PLANE, 00054 MEDIAN, 00055 } Predictor; 00056 00057 typedef struct HYuvContext{ 00058 AVCodecContext *avctx; 00059 Predictor predictor; 00060 GetBitContext gb; 00061 PutBitContext pb; 00062 int interlaced; 00063 int decorrelate; 00064 int bitstream_bpp; 00065 int version; 00066 int yuy2; //use yuy2 instead of 422P 00067 int bgr32; //use bgr32 instead of bgr24 00068 int width, height; 00069 int flags; 00070 int context; 00071 int picture_number; 00072 int last_slice_end; 00073 uint8_t *temp[3]; 00074 uint64_t stats[3][256]; 00075 uint8_t len[3][256]; 00076 uint32_t bits[3][256]; 00077 uint32_t pix_bgr_map[1<<VLC_BITS]; 00078 VLC vlc[6]; //Y,U,V,YY,YU,YV 00079 AVFrame picture; 00080 uint8_t *bitstream_buffer; 00081 unsigned int bitstream_buffer_size; 00082 DSPContext dsp; 00083 }HYuvContext; 00084 00085 static const unsigned char classic_shift_luma[] = { 00086 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8, 00087 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70, 00088 69,68, 0 00089 }; 00090 00091 static const unsigned char classic_shift_chroma[] = { 00092 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183, 00093 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119, 00094 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0 00095 }; 00096 00097 static const unsigned char classic_add_luma[256] = { 00098 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37, 00099 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36, 00100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36, 00101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39, 00102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37, 00103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29, 00104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16, 00105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14, 00106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6, 00107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15, 00108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25, 00109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49, 00110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60, 00111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52, 00112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43, 00113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8, 00114 }; 00115 00116 static const unsigned char classic_add_chroma[256] = { 00117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9, 00118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7, 00119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77, 00120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63, 00121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 00122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22, 00123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111, 00124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1, 00125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134, 00126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96, 00127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41, 00128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36, 00129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26, 00130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13, 00131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8, 00132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2, 00133 }; 00134 00135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){ 00136 int i; 00137 if(w<32){ 00138 for(i=0; i<w; i++){ 00139 const int temp= src[i]; 00140 dst[i]= temp - left; 00141 left= temp; 00142 } 00143 return left; 00144 }else{ 00145 for(i=0; i<16; i++){ 00146 const int temp= src[i]; 00147 dst[i]= temp - left; 00148 left= temp; 00149 } 00150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16); 00151 return src[w-1]; 00152 } 00153 } 00154 00155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){ 00156 int i; 00157 int r,g,b; 00158 r= *red; 00159 g= *green; 00160 b= *blue; 00161 for(i=0; i<FFMIN(w,4); i++){ 00162 const int rt= src[i*4+R]; 00163 const int gt= src[i*4+G]; 00164 const int bt= src[i*4+B]; 00165 dst[i*4+R]= rt - r; 00166 dst[i*4+G]= gt - g; 00167 dst[i*4+B]= bt - b; 00168 r = rt; 00169 g = gt; 00170 b = bt; 00171 } 00172 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16); 00173 *red= src[(w-1)*4+R]; 00174 *green= src[(w-1)*4+G]; 00175 *blue= src[(w-1)*4+B]; 00176 } 00177 00178 static int read_len_table(uint8_t *dst, GetBitContext *gb){ 00179 int i, val, repeat; 00180 00181 for(i=0; i<256;){ 00182 repeat= get_bits(gb, 3); 00183 val = get_bits(gb, 5); 00184 if(repeat==0) 00185 repeat= get_bits(gb, 8); 00186 //printf("%d %d\n", val, repeat); 00187 if(i+repeat > 256) { 00188 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n"); 00189 return -1; 00190 } 00191 while (repeat--) 00192 dst[i++] = val; 00193 } 00194 return 0; 00195 } 00196 00197 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){ 00198 int len, index; 00199 uint32_t bits=0; 00200 00201 for(len=32; len>0; len--){ 00202 for(index=0; index<256; index++){ 00203 if(len_table[index]==len) 00204 dst[index]= bits++; 00205 } 00206 if(bits & 1){ 00207 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n"); 00208 return -1; 00209 } 00210 bits >>= 1; 00211 } 00212 return 0; 00213 } 00214 00215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00216 typedef struct { 00217 uint64_t val; 00218 int name; 00219 } HeapElem; 00220 00221 static void heap_sift(HeapElem *h, int root, int size) 00222 { 00223 while(root*2+1 < size) { 00224 int child = root*2+1; 00225 if(child < size-1 && h[child].val > h[child+1].val) 00226 child++; 00227 if(h[root].val > h[child].val) { 00228 FFSWAP(HeapElem, h[root], h[child]); 00229 root = child; 00230 } else 00231 break; 00232 } 00233 } 00234 00235 static void generate_len_table(uint8_t *dst, const uint64_t *stats){ 00236 HeapElem h[256]; 00237 int up[2*256]; 00238 int len[2*256]; 00239 int offset, i, next; 00240 int size = 256; 00241 00242 for(offset=1; ; offset<<=1){ 00243 for(i=0; i<size; i++){ 00244 h[i].name = i; 00245 h[i].val = (stats[i] << 8) + offset; 00246 } 00247 for(i=size/2-1; i>=0; i--) 00248 heap_sift(h, i, size); 00249 00250 for(next=size; next<size*2-1; next++){ 00251 // merge the two smallest entries, and put it back in the heap 00252 uint64_t min1v = h[0].val; 00253 up[h[0].name] = next; 00254 h[0].val = INT64_MAX; 00255 heap_sift(h, 0, size); 00256 up[h[0].name] = next; 00257 h[0].name = next; 00258 h[0].val += min1v; 00259 heap_sift(h, 0, size); 00260 } 00261 00262 len[2*size-2] = 0; 00263 for(i=2*size-3; i>=size; i--) 00264 len[i] = len[up[i]] + 1; 00265 for(i=0; i<size; i++) { 00266 dst[i] = len[up[i]] + 1; 00267 if(dst[i] >= 32) break; 00268 } 00269 if(i==size) break; 00270 } 00271 } 00272 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00273 00274 static void generate_joint_tables(HYuvContext *s){ 00275 uint16_t symbols[1<<VLC_BITS]; 00276 uint16_t bits[1<<VLC_BITS]; 00277 uint8_t len[1<<VLC_BITS]; 00278 if(s->bitstream_bpp < 24){ 00279 int p, i, y, u; 00280 for(p=0; p<3; p++){ 00281 for(i=y=0; y<256; y++){ 00282 int len0 = s->len[0][y]; 00283 int limit = VLC_BITS - len0; 00284 if(limit <= 0) 00285 continue; 00286 for(u=0; u<256; u++){ 00287 int len1 = s->len[p][u]; 00288 if(len1 > limit) 00289 continue; 00290 len[i] = len0 + len1; 00291 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u]; 00292 symbols[i] = (y<<8) + u; 00293 if(symbols[i] != 0xffff) // reserved to mean "invalid" 00294 i++; 00295 } 00296 } 00297 free_vlc(&s->vlc[3+p]); 00298 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0); 00299 } 00300 }else{ 00301 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map; 00302 int i, b, g, r, code; 00303 int p0 = s->decorrelate; 00304 int p1 = !s->decorrelate; 00305 // restrict the range to +/-16 becaues that's pretty much guaranteed to 00306 // cover all the combinations that fit in 11 bits total, and it doesn't 00307 // matter if we miss a few rare codes. 00308 for(i=0, g=-16; g<16; g++){ 00309 int len0 = s->len[p0][g&255]; 00310 int limit0 = VLC_BITS - len0; 00311 if(limit0 < 2) 00312 continue; 00313 for(b=-16; b<16; b++){ 00314 int len1 = s->len[p1][b&255]; 00315 int limit1 = limit0 - len1; 00316 if(limit1 < 1) 00317 continue; 00318 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255]; 00319 for(r=-16; r<16; r++){ 00320 int len2 = s->len[2][r&255]; 00321 if(len2 > limit1) 00322 continue; 00323 len[i] = len0 + len1 + len2; 00324 bits[i] = (code << len2) + s->bits[2][r&255]; 00325 if(s->decorrelate){ 00326 map[i][G] = g; 00327 map[i][B] = g+b; 00328 map[i][R] = g+r; 00329 }else{ 00330 map[i][B] = g; 00331 map[i][G] = b; 00332 map[i][R] = r; 00333 } 00334 i++; 00335 } 00336 } 00337 } 00338 free_vlc(&s->vlc[3]); 00339 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0); 00340 } 00341 } 00342 00343 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){ 00344 GetBitContext gb; 00345 int i; 00346 00347 init_get_bits(&gb, src, length*8); 00348 00349 for(i=0; i<3; i++){ 00350 if(read_len_table(s->len[i], &gb)<0) 00351 return -1; 00352 if(generate_bits_table(s->bits[i], s->len[i])<0){ 00353 return -1; 00354 } 00355 free_vlc(&s->vlc[i]); 00356 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 00357 } 00358 00359 generate_joint_tables(s); 00360 00361 return (get_bits_count(&gb)+7)/8; 00362 } 00363 00364 static int read_old_huffman_tables(HYuvContext *s){ 00365 #if 1 00366 GetBitContext gb; 00367 int i; 00368 00369 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8); 00370 if(read_len_table(s->len[0], &gb)<0) 00371 return -1; 00372 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8); 00373 if(read_len_table(s->len[1], &gb)<0) 00374 return -1; 00375 00376 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i]; 00377 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i]; 00378 00379 if(s->bitstream_bpp >= 24){ 00380 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t)); 00381 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t)); 00382 } 00383 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t)); 00384 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t)); 00385 00386 for(i=0; i<3; i++){ 00387 free_vlc(&s->vlc[i]); 00388 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 00389 } 00390 00391 generate_joint_tables(s); 00392 00393 return 0; 00394 #else 00395 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n"); 00396 return -1; 00397 #endif 00398 } 00399 00400 static av_cold void alloc_temp(HYuvContext *s){ 00401 int i; 00402 00403 if(s->bitstream_bpp<24){ 00404 for(i=0; i<3; i++){ 00405 s->temp[i]= av_malloc(s->width + 16); 00406 } 00407 }else{ 00408 s->temp[0]= av_mallocz(4*s->width + 16); 00409 } 00410 } 00411 00412 static av_cold int common_init(AVCodecContext *avctx){ 00413 HYuvContext *s = avctx->priv_data; 00414 00415 s->avctx= avctx; 00416 s->flags= avctx->flags; 00417 00418 dsputil_init(&s->dsp, avctx); 00419 00420 s->width= avctx->width; 00421 s->height= avctx->height; 00422 assert(s->width>0 && s->height>0); 00423 00424 return 0; 00425 } 00426 00427 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 00428 static av_cold int decode_init(AVCodecContext *avctx) 00429 { 00430 HYuvContext *s = avctx->priv_data; 00431 00432 common_init(avctx); 00433 memset(s->vlc, 0, 3*sizeof(VLC)); 00434 00435 avctx->coded_frame= &s->picture; 00436 s->interlaced= s->height > 288; 00437 00438 s->bgr32=1; 00439 //if(avctx->extradata) 00440 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size); 00441 if(avctx->extradata_size){ 00442 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12) 00443 s->version=1; // do such files exist at all? 00444 else 00445 s->version=2; 00446 }else 00447 s->version=0; 00448 00449 if(s->version==2){ 00450 int method, interlace; 00451 00452 if (avctx->extradata_size < 4) 00453 return -1; 00454 00455 method= ((uint8_t*)avctx->extradata)[0]; 00456 s->decorrelate= method&64 ? 1 : 0; 00457 s->predictor= method&63; 00458 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1]; 00459 if(s->bitstream_bpp==0) 00460 s->bitstream_bpp= avctx->bits_per_coded_sample&~7; 00461 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4; 00462 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced; 00463 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0; 00464 00465 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0) 00466 return -1; 00467 }else{ 00468 switch(avctx->bits_per_coded_sample&7){ 00469 case 1: 00470 s->predictor= LEFT; 00471 s->decorrelate= 0; 00472 break; 00473 case 2: 00474 s->predictor= LEFT; 00475 s->decorrelate= 1; 00476 break; 00477 case 3: 00478 s->predictor= PLANE; 00479 s->decorrelate= avctx->bits_per_coded_sample >= 24; 00480 break; 00481 case 4: 00482 s->predictor= MEDIAN; 00483 s->decorrelate= 0; 00484 break; 00485 default: 00486 s->predictor= LEFT; //OLD 00487 s->decorrelate= 0; 00488 break; 00489 } 00490 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7; 00491 s->context= 0; 00492 00493 if(read_old_huffman_tables(s) < 0) 00494 return -1; 00495 } 00496 00497 switch(s->bitstream_bpp){ 00498 case 12: 00499 avctx->pix_fmt = PIX_FMT_YUV420P; 00500 break; 00501 case 16: 00502 if(s->yuy2){ 00503 avctx->pix_fmt = PIX_FMT_YUYV422; 00504 }else{ 00505 avctx->pix_fmt = PIX_FMT_YUV422P; 00506 } 00507 break; 00508 case 24: 00509 case 32: 00510 if(s->bgr32){ 00511 avctx->pix_fmt = PIX_FMT_RGB32; 00512 }else{ 00513 avctx->pix_fmt = PIX_FMT_BGR24; 00514 } 00515 break; 00516 default: 00517 assert(0); 00518 } 00519 00520 alloc_temp(s); 00521 00522 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 00523 00524 return 0; 00525 } 00526 00527 static av_cold int decode_init_thread_copy(AVCodecContext *avctx) 00528 { 00529 HYuvContext *s = avctx->priv_data; 00530 int i; 00531 00532 avctx->coded_frame= &s->picture; 00533 alloc_temp(s); 00534 00535 for (i = 0; i < 6; i++) 00536 s->vlc[i].table = NULL; 00537 00538 if(s->version==2){ 00539 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0) 00540 return -1; 00541 }else{ 00542 if(read_old_huffman_tables(s) < 0) 00543 return -1; 00544 } 00545 00546 return 0; 00547 } 00548 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 00549 00550 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00551 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){ 00552 int i; 00553 int index= 0; 00554 00555 for(i=0; i<256;){ 00556 int val= len[i]; 00557 int repeat=0; 00558 00559 for(; i<256 && len[i]==val && repeat<255; i++) 00560 repeat++; 00561 00562 assert(val < 32 && val >0 && repeat<256 && repeat>0); 00563 if(repeat>7){ 00564 buf[index++]= val; 00565 buf[index++]= repeat; 00566 }else{ 00567 buf[index++]= val | (repeat<<5); 00568 } 00569 } 00570 00571 return index; 00572 } 00573 00574 static av_cold int encode_init(AVCodecContext *avctx) 00575 { 00576 HYuvContext *s = avctx->priv_data; 00577 int i, j; 00578 00579 common_init(avctx); 00580 00581 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772 00582 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 00583 s->version=2; 00584 00585 avctx->coded_frame= &s->picture; 00586 00587 switch(avctx->pix_fmt){ 00588 case PIX_FMT_YUV420P: 00589 s->bitstream_bpp= 12; 00590 break; 00591 case PIX_FMT_YUV422P: 00592 s->bitstream_bpp= 16; 00593 break; 00594 case PIX_FMT_RGB32: 00595 s->bitstream_bpp= 24; 00596 break; 00597 default: 00598 av_log(avctx, AV_LOG_ERROR, "format not supported\n"); 00599 return -1; 00600 } 00601 avctx->bits_per_coded_sample= s->bitstream_bpp; 00602 s->decorrelate= s->bitstream_bpp >= 24; 00603 s->predictor= avctx->prediction_method; 00604 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; 00605 if(avctx->context_model==1){ 00606 s->context= avctx->context_model; 00607 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){ 00608 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n"); 00609 return -1; 00610 } 00611 }else s->context= 0; 00612 00613 if(avctx->codec->id==CODEC_ID_HUFFYUV){ 00614 if(avctx->pix_fmt==PIX_FMT_YUV420P){ 00615 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n"); 00616 return -1; 00617 } 00618 if(avctx->context_model){ 00619 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n"); 00620 return -1; 00621 } 00622 if(s->interlaced != ( s->height > 288 )) 00623 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n"); 00624 } 00625 00626 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){ 00627 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n"); 00628 return -1; 00629 } 00630 00631 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6); 00632 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp; 00633 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20; 00634 if(s->context) 00635 ((uint8_t*)avctx->extradata)[2]|= 0x40; 00636 ((uint8_t*)avctx->extradata)[3]= 0; 00637 s->avctx->extradata_size= 4; 00638 00639 if(avctx->stats_in){ 00640 char *p= avctx->stats_in; 00641 00642 for(i=0; i<3; i++) 00643 for(j=0; j<256; j++) 00644 s->stats[i][j]= 1; 00645 00646 for(;;){ 00647 for(i=0; i<3; i++){ 00648 char *next; 00649 00650 for(j=0; j<256; j++){ 00651 s->stats[i][j]+= strtol(p, &next, 0); 00652 if(next==p) return -1; 00653 p=next; 00654 } 00655 } 00656 if(p[0]==0 || p[1]==0 || p[2]==0) break; 00657 } 00658 }else{ 00659 for(i=0; i<3; i++) 00660 for(j=0; j<256; j++){ 00661 int d= FFMIN(j, 256-j); 00662 00663 s->stats[i][j]= 100000000/(d+1); 00664 } 00665 } 00666 00667 for(i=0; i<3; i++){ 00668 generate_len_table(s->len[i], s->stats[i]); 00669 00670 if(generate_bits_table(s->bits[i], s->len[i])<0){ 00671 return -1; 00672 } 00673 00674 s->avctx->extradata_size+= 00675 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]); 00676 } 00677 00678 if(s->context){ 00679 for(i=0; i<3; i++){ 00680 int pels = s->width*s->height / (i?40:10); 00681 for(j=0; j<256; j++){ 00682 int d= FFMIN(j, 256-j); 00683 s->stats[i][j]= pels/(d+1); 00684 } 00685 } 00686 }else{ 00687 for(i=0; i<3; i++) 00688 for(j=0; j<256; j++) 00689 s->stats[i][j]= 0; 00690 } 00691 00692 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 00693 00694 alloc_temp(s); 00695 00696 s->picture_number=0; 00697 00698 return 0; 00699 } 00700 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00701 00702 /* TODO instead of restarting the read when the code isn't in the first level 00703 * of the joint table, jump into the 2nd level of the individual table. */ 00704 #define READ_2PIX(dst0, dst1, plane1){\ 00705 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\ 00706 if(code != 0xffff){\ 00707 dst0 = code>>8;\ 00708 dst1 = code;\ 00709 }else{\ 00710 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\ 00711 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\ 00712 }\ 00713 } 00714 00715 static void decode_422_bitstream(HYuvContext *s, int count){ 00716 int i; 00717 00718 count/=2; 00719 00720 if(count >= (get_bits_left(&s->gb))/(31*4)){ 00721 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 00722 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 00723 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 00724 } 00725 }else{ 00726 for(i=0; i<count; i++){ 00727 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 00728 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 00729 } 00730 } 00731 } 00732 00733 static void decode_gray_bitstream(HYuvContext *s, int count){ 00734 int i; 00735 00736 count/=2; 00737 00738 if(count >= (get_bits_left(&s->gb))/(31*2)){ 00739 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 00740 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 00741 } 00742 }else{ 00743 for(i=0; i<count; i++){ 00744 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 00745 } 00746 } 00747 } 00748 00749 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00750 static int encode_422_bitstream(HYuvContext *s, int offset, int count){ 00751 int i; 00752 const uint8_t *y = s->temp[0] + offset; 00753 const uint8_t *u = s->temp[1] + offset/2; 00754 const uint8_t *v = s->temp[2] + offset/2; 00755 00756 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){ 00757 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00758 return -1; 00759 } 00760 00761 #define LOAD4\ 00762 int y0 = y[2*i];\ 00763 int y1 = y[2*i+1];\ 00764 int u0 = u[i];\ 00765 int v0 = v[i]; 00766 00767 count/=2; 00768 if(s->flags&CODEC_FLAG_PASS1){ 00769 for(i=0; i<count; i++){ 00770 LOAD4; 00771 s->stats[0][y0]++; 00772 s->stats[1][u0]++; 00773 s->stats[0][y1]++; 00774 s->stats[2][v0]++; 00775 } 00776 } 00777 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 00778 return 0; 00779 if(s->context){ 00780 for(i=0; i<count; i++){ 00781 LOAD4; 00782 s->stats[0][y0]++; 00783 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 00784 s->stats[1][u0]++; 00785 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 00786 s->stats[0][y1]++; 00787 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00788 s->stats[2][v0]++; 00789 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 00790 } 00791 }else{ 00792 for(i=0; i<count; i++){ 00793 LOAD4; 00794 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 00795 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 00796 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00797 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 00798 } 00799 } 00800 return 0; 00801 } 00802 00803 static int encode_gray_bitstream(HYuvContext *s, int count){ 00804 int i; 00805 00806 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){ 00807 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00808 return -1; 00809 } 00810 00811 #define LOAD2\ 00812 int y0 = s->temp[0][2*i];\ 00813 int y1 = s->temp[0][2*i+1]; 00814 #define STAT2\ 00815 s->stats[0][y0]++;\ 00816 s->stats[0][y1]++; 00817 #define WRITE2\ 00818 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\ 00819 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00820 00821 count/=2; 00822 if(s->flags&CODEC_FLAG_PASS1){ 00823 for(i=0; i<count; i++){ 00824 LOAD2; 00825 STAT2; 00826 } 00827 } 00828 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 00829 return 0; 00830 00831 if(s->context){ 00832 for(i=0; i<count; i++){ 00833 LOAD2; 00834 STAT2; 00835 WRITE2; 00836 } 00837 }else{ 00838 for(i=0; i<count; i++){ 00839 LOAD2; 00840 WRITE2; 00841 } 00842 } 00843 return 0; 00844 } 00845 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00846 00847 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){ 00848 int i; 00849 for(i=0; i<count; i++){ 00850 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1); 00851 if(code != -1){ 00852 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code]; 00853 }else if(decorrelate){ 00854 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 00855 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 00856 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 00857 }else{ 00858 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3); 00859 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 00860 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); 00861 } 00862 if(alpha) 00863 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); 00864 } 00865 } 00866 00867 static void decode_bgr_bitstream(HYuvContext *s, int count){ 00868 if(s->decorrelate){ 00869 if(s->bitstream_bpp==24) 00870 decode_bgr_1(s, count, 1, 0); 00871 else 00872 decode_bgr_1(s, count, 1, 1); 00873 }else{ 00874 if(s->bitstream_bpp==24) 00875 decode_bgr_1(s, count, 0, 0); 00876 else 00877 decode_bgr_1(s, count, 0, 1); 00878 } 00879 } 00880 00881 static int encode_bgr_bitstream(HYuvContext *s, int count){ 00882 int i; 00883 00884 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){ 00885 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00886 return -1; 00887 } 00888 00889 #define LOAD3\ 00890 int g= s->temp[0][4*i+G];\ 00891 int b= (s->temp[0][4*i+B] - g) & 0xff;\ 00892 int r= (s->temp[0][4*i+R] - g) & 0xff; 00893 #define STAT3\ 00894 s->stats[0][b]++;\ 00895 s->stats[1][g]++;\ 00896 s->stats[2][r]++; 00897 #define WRITE3\ 00898 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\ 00899 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\ 00900 put_bits(&s->pb, s->len[2][r], s->bits[2][r]); 00901 00902 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){ 00903 for(i=0; i<count; i++){ 00904 LOAD3; 00905 STAT3; 00906 } 00907 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){ 00908 for(i=0; i<count; i++){ 00909 LOAD3; 00910 STAT3; 00911 WRITE3; 00912 } 00913 }else{ 00914 for(i=0; i<count; i++){ 00915 LOAD3; 00916 WRITE3; 00917 } 00918 } 00919 return 0; 00920 } 00921 00922 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 00923 static void draw_slice(HYuvContext *s, int y){ 00924 int h, cy; 00925 int offset[4]; 00926 00927 if(s->avctx->draw_horiz_band==NULL) 00928 return; 00929 00930 h= y - s->last_slice_end; 00931 y -= h; 00932 00933 if(s->bitstream_bpp==12){ 00934 cy= y>>1; 00935 }else{ 00936 cy= y; 00937 } 00938 00939 offset[0] = s->picture.linesize[0]*y; 00940 offset[1] = s->picture.linesize[1]*cy; 00941 offset[2] = s->picture.linesize[2]*cy; 00942 offset[3] = 0; 00943 emms_c(); 00944 00945 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); 00946 00947 s->last_slice_end= y + h; 00948 } 00949 00950 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ 00951 const uint8_t *buf = avpkt->data; 00952 int buf_size = avpkt->size; 00953 HYuvContext *s = avctx->priv_data; 00954 const int width= s->width; 00955 const int width2= s->width>>1; 00956 const int height= s->height; 00957 int fake_ystride, fake_ustride, fake_vstride; 00958 AVFrame * const p= &s->picture; 00959 int table_size= 0; 00960 00961 AVFrame *picture = data; 00962 00963 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); 00964 if (!s->bitstream_buffer) 00965 return AVERROR(ENOMEM); 00966 00967 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); 00968 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4); 00969 00970 if(p->data[0]) 00971 ff_thread_release_buffer(avctx, p); 00972 00973 p->reference= 0; 00974 if(ff_thread_get_buffer(avctx, p) < 0){ 00975 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 00976 return -1; 00977 } 00978 00979 if(s->context){ 00980 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size); 00981 if(table_size < 0) 00982 return -1; 00983 } 00984 00985 if((unsigned)(buf_size-table_size) >= INT_MAX/8) 00986 return -1; 00987 00988 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8); 00989 00990 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0]; 00991 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1]; 00992 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2]; 00993 00994 s->last_slice_end= 0; 00995 00996 if(s->bitstream_bpp<24){ 00997 int y, cy; 00998 int lefty, leftu, leftv; 00999 int lefttopy, lefttopu, lefttopv; 01000 01001 if(s->yuy2){ 01002 p->data[0][3]= get_bits(&s->gb, 8); 01003 p->data[0][2]= get_bits(&s->gb, 8); 01004 p->data[0][1]= get_bits(&s->gb, 8); 01005 p->data[0][0]= get_bits(&s->gb, 8); 01006 01007 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n"); 01008 return -1; 01009 }else{ 01010 01011 leftv= p->data[2][0]= get_bits(&s->gb, 8); 01012 lefty= p->data[0][1]= get_bits(&s->gb, 8); 01013 leftu= p->data[1][0]= get_bits(&s->gb, 8); 01014 p->data[0][0]= get_bits(&s->gb, 8); 01015 01016 switch(s->predictor){ 01017 case LEFT: 01018 case PLANE: 01019 decode_422_bitstream(s, width-2); 01020 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 01021 if(!(s->flags&CODEC_FLAG_GRAY)){ 01022 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 01023 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 01024 } 01025 01026 for(cy=y=1; y<s->height; y++,cy++){ 01027 uint8_t *ydst, *udst, *vdst; 01028 01029 if(s->bitstream_bpp==12){ 01030 decode_gray_bitstream(s, width); 01031 01032 ydst= p->data[0] + p->linesize[0]*y; 01033 01034 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty); 01035 if(s->predictor == PLANE){ 01036 if(y>s->interlaced) 01037 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 01038 } 01039 y++; 01040 if(y>=s->height) break; 01041 } 01042 01043 draw_slice(s, y); 01044 01045 ydst= p->data[0] + p->linesize[0]*y; 01046 udst= p->data[1] + p->linesize[1]*cy; 01047 vdst= p->data[2] + p->linesize[2]*cy; 01048 01049 decode_422_bitstream(s, width); 01050 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty); 01051 if(!(s->flags&CODEC_FLAG_GRAY)){ 01052 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu); 01053 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv); 01054 } 01055 if(s->predictor == PLANE){ 01056 if(cy>s->interlaced){ 01057 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 01058 if(!(s->flags&CODEC_FLAG_GRAY)){ 01059 s->dsp.add_bytes(udst, udst - fake_ustride, width2); 01060 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2); 01061 } 01062 } 01063 } 01064 } 01065 draw_slice(s, height); 01066 01067 break; 01068 case MEDIAN: 01069 /* first line except first 2 pixels is left predicted */ 01070 decode_422_bitstream(s, width-2); 01071 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 01072 if(!(s->flags&CODEC_FLAG_GRAY)){ 01073 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 01074 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 01075 } 01076 01077 cy=y=1; 01078 01079 /* second line is left predicted for interlaced case */ 01080 if(s->interlaced){ 01081 decode_422_bitstream(s, width); 01082 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty); 01083 if(!(s->flags&CODEC_FLAG_GRAY)){ 01084 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); 01085 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); 01086 } 01087 y++; cy++; 01088 } 01089 01090 /* next 4 pixels are left predicted too */ 01091 decode_422_bitstream(s, 4); 01092 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty); 01093 if(!(s->flags&CODEC_FLAG_GRAY)){ 01094 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu); 01095 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv); 01096 } 01097 01098 /* next line except the first 4 pixels is median predicted */ 01099 lefttopy= p->data[0][3]; 01100 decode_422_bitstream(s, width-4); 01101 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy); 01102 if(!(s->flags&CODEC_FLAG_GRAY)){ 01103 lefttopu= p->data[1][1]; 01104 lefttopv= p->data[2][1]; 01105 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu); 01106 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv); 01107 } 01108 y++; cy++; 01109 01110 for(; y<height; y++,cy++){ 01111 uint8_t *ydst, *udst, *vdst; 01112 01113 if(s->bitstream_bpp==12){ 01114 while(2*cy > y){ 01115 decode_gray_bitstream(s, width); 01116 ydst= p->data[0] + p->linesize[0]*y; 01117 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 01118 y++; 01119 } 01120 if(y>=height) break; 01121 } 01122 draw_slice(s, y); 01123 01124 decode_422_bitstream(s, width); 01125 01126 ydst= p->data[0] + p->linesize[0]*y; 01127 udst= p->data[1] + p->linesize[1]*cy; 01128 vdst= p->data[2] + p->linesize[2]*cy; 01129 01130 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 01131 if(!(s->flags&CODEC_FLAG_GRAY)){ 01132 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu); 01133 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv); 01134 } 01135 } 01136 01137 draw_slice(s, height); 01138 break; 01139 } 01140 } 01141 }else{ 01142 int y; 01143 int leftr, leftg, leftb, lefta; 01144 const int last_line= (height-1)*p->linesize[0]; 01145 01146 if(s->bitstream_bpp==32){ 01147 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8); 01148 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 01149 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 01150 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 01151 }else{ 01152 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 01153 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 01154 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 01155 lefta= p->data[0][last_line+A]= 255; 01156 skip_bits(&s->gb, 8); 01157 } 01158 01159 if(s->bgr32){ 01160 switch(s->predictor){ 01161 case LEFT: 01162 case PLANE: 01163 decode_bgr_bitstream(s, width-1); 01164 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta); 01165 01166 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down. 01167 decode_bgr_bitstream(s, width); 01168 01169 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta); 01170 if(s->predictor == PLANE){ 01171 if(s->bitstream_bpp!=32) lefta=0; 01172 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){ 01173 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y, 01174 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride); 01175 } 01176 } 01177 } 01178 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order 01179 break; 01180 default: 01181 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n"); 01182 } 01183 }else{ 01184 01185 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n"); 01186 return -1; 01187 } 01188 } 01189 emms_c(); 01190 01191 *picture= *p; 01192 *data_size = sizeof(AVFrame); 01193 01194 return (get_bits_count(&s->gb)+31)/32*4 + table_size; 01195 } 01196 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 01197 01198 static int common_end(HYuvContext *s){ 01199 int i; 01200 01201 for(i=0; i<3; i++){ 01202 av_freep(&s->temp[i]); 01203 } 01204 return 0; 01205 } 01206 01207 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 01208 static av_cold int decode_end(AVCodecContext *avctx) 01209 { 01210 HYuvContext *s = avctx->priv_data; 01211 int i; 01212 01213 if (s->picture.data[0]) 01214 avctx->release_buffer(avctx, &s->picture); 01215 01216 common_end(s); 01217 av_freep(&s->bitstream_buffer); 01218 01219 for(i=0; i<6; i++){ 01220 free_vlc(&s->vlc[i]); 01221 } 01222 01223 return 0; 01224 } 01225 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 01226 01227 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 01228 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ 01229 HYuvContext *s = avctx->priv_data; 01230 AVFrame *pict = data; 01231 const int width= s->width; 01232 const int width2= s->width>>1; 01233 const int height= s->height; 01234 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; 01235 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; 01236 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; 01237 AVFrame * const p= &s->picture; 01238 int i, j, size=0; 01239 01240 *p = *pict; 01241 p->pict_type= AV_PICTURE_TYPE_I; 01242 p->key_frame= 1; 01243 01244 if(s->context){ 01245 for(i=0; i<3; i++){ 01246 generate_len_table(s->len[i], s->stats[i]); 01247 if(generate_bits_table(s->bits[i], s->len[i])<0) 01248 return -1; 01249 size+= store_table(s, s->len[i], &buf[size]); 01250 } 01251 01252 for(i=0; i<3; i++) 01253 for(j=0; j<256; j++) 01254 s->stats[i][j] >>= 1; 01255 } 01256 01257 init_put_bits(&s->pb, buf+size, buf_size-size); 01258 01259 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){ 01260 int lefty, leftu, leftv, y, cy; 01261 01262 put_bits(&s->pb, 8, leftv= p->data[2][0]); 01263 put_bits(&s->pb, 8, lefty= p->data[0][1]); 01264 put_bits(&s->pb, 8, leftu= p->data[1][0]); 01265 put_bits(&s->pb, 8, p->data[0][0]); 01266 01267 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0); 01268 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0); 01269 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0); 01270 01271 encode_422_bitstream(s, 2, width-2); 01272 01273 if(s->predictor==MEDIAN){ 01274 int lefttopy, lefttopu, lefttopv; 01275 cy=y=1; 01276 if(s->interlaced){ 01277 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty); 01278 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu); 01279 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv); 01280 01281 encode_422_bitstream(s, 0, width); 01282 y++; cy++; 01283 } 01284 01285 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty); 01286 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu); 01287 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv); 01288 01289 encode_422_bitstream(s, 0, 4); 01290 01291 lefttopy= p->data[0][3]; 01292 lefttopu= p->data[1][1]; 01293 lefttopv= p->data[2][1]; 01294 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy); 01295 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu); 01296 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv); 01297 encode_422_bitstream(s, 0, width-4); 01298 y++; cy++; 01299 01300 for(; y<height; y++,cy++){ 01301 uint8_t *ydst, *udst, *vdst; 01302 01303 if(s->bitstream_bpp==12){ 01304 while(2*cy > y){ 01305 ydst= p->data[0] + p->linesize[0]*y; 01306 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 01307 encode_gray_bitstream(s, width); 01308 y++; 01309 } 01310 if(y>=height) break; 01311 } 01312 ydst= p->data[0] + p->linesize[0]*y; 01313 udst= p->data[1] + p->linesize[1]*cy; 01314 vdst= p->data[2] + p->linesize[2]*cy; 01315 01316 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 01317 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); 01318 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv); 01319 01320 encode_422_bitstream(s, 0, width); 01321 } 01322 }else{ 01323 for(cy=y=1; y<height; y++,cy++){ 01324 uint8_t *ydst, *udst, *vdst; 01325 01326 /* encode a luma only line & y++ */ 01327 if(s->bitstream_bpp==12){ 01328 ydst= p->data[0] + p->linesize[0]*y; 01329 01330 if(s->predictor == PLANE && s->interlaced < y){ 01331 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 01332 01333 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 01334 }else{ 01335 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 01336 } 01337 encode_gray_bitstream(s, width); 01338 y++; 01339 if(y>=height) break; 01340 } 01341 01342 ydst= p->data[0] + p->linesize[0]*y; 01343 udst= p->data[1] + p->linesize[1]*cy; 01344 vdst= p->data[2] + p->linesize[2]*cy; 01345 01346 if(s->predictor == PLANE && s->interlaced < cy){ 01347 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 01348 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2); 01349 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2); 01350 01351 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 01352 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu); 01353 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv); 01354 }else{ 01355 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 01356 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu); 01357 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv); 01358 } 01359 01360 encode_422_bitstream(s, 0, width); 01361 } 01362 } 01363 }else if(avctx->pix_fmt == PIX_FMT_RGB32){ 01364 uint8_t *data = p->data[0] + (height-1)*p->linesize[0]; 01365 const int stride = -p->linesize[0]; 01366 const int fake_stride = -fake_ystride; 01367 int y; 01368 int leftr, leftg, leftb; 01369 01370 put_bits(&s->pb, 8, leftr= data[R]); 01371 put_bits(&s->pb, 8, leftg= data[G]); 01372 put_bits(&s->pb, 8, leftb= data[B]); 01373 put_bits(&s->pb, 8, 0); 01374 01375 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb); 01376 encode_bgr_bitstream(s, width-1); 01377 01378 for(y=1; y<s->height; y++){ 01379 uint8_t *dst = data + y*stride; 01380 if(s->predictor == PLANE && s->interlaced < y){ 01381 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4); 01382 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb); 01383 }else{ 01384 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb); 01385 } 01386 encode_bgr_bitstream(s, width); 01387 } 01388 }else{ 01389 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); 01390 } 01391 emms_c(); 01392 01393 size+= (put_bits_count(&s->pb)+31)/8; 01394 put_bits(&s->pb, 16, 0); 01395 put_bits(&s->pb, 15, 0); 01396 size/= 4; 01397 01398 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){ 01399 int j; 01400 char *p= avctx->stats_out; 01401 char *end= p + 1024*30; 01402 for(i=0; i<3; i++){ 01403 for(j=0; j<256; j++){ 01404 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]); 01405 p+= strlen(p); 01406 s->stats[i][j]= 0; 01407 } 01408 snprintf(p, end-p, "\n"); 01409 p++; 01410 } 01411 } else 01412 avctx->stats_out[0] = '\0'; 01413 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){ 01414 flush_put_bits(&s->pb); 01415 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); 01416 } 01417 01418 s->picture_number++; 01419 01420 return size*4; 01421 } 01422 01423 static av_cold int encode_end(AVCodecContext *avctx) 01424 { 01425 HYuvContext *s = avctx->priv_data; 01426 01427 common_end(s); 01428 01429 av_freep(&avctx->extradata); 01430 av_freep(&avctx->stats_out); 01431 01432 return 0; 01433 } 01434 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 01435 01436 #if CONFIG_HUFFYUV_DECODER 01437 AVCodec ff_huffyuv_decoder = { 01438 "huffyuv", 01439 AVMEDIA_TYPE_VIDEO, 01440 CODEC_ID_HUFFYUV, 01441 sizeof(HYuvContext), 01442 decode_init, 01443 NULL, 01444 decode_end, 01445 decode_frame, 01446 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, 01447 NULL, 01448 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), 01449 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 01450 }; 01451 #endif 01452 01453 #if CONFIG_FFVHUFF_DECODER 01454 AVCodec ff_ffvhuff_decoder = { 01455 "ffvhuff", 01456 AVMEDIA_TYPE_VIDEO, 01457 CODEC_ID_FFVHUFF, 01458 sizeof(HYuvContext), 01459 decode_init, 01460 NULL, 01461 decode_end, 01462 decode_frame, 01463 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, 01464 NULL, 01465 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), 01466 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 01467 }; 01468 #endif 01469 01470 #if CONFIG_HUFFYUV_ENCODER 01471 AVCodec ff_huffyuv_encoder = { 01472 "huffyuv", 01473 AVMEDIA_TYPE_VIDEO, 01474 CODEC_ID_HUFFYUV, 01475 sizeof(HYuvContext), 01476 encode_init, 01477 encode_frame, 01478 encode_end, 01479 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 01480 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 01481 }; 01482 #endif 01483 01484 #if CONFIG_FFVHUFF_ENCODER 01485 AVCodec ff_ffvhuff_encoder = { 01486 "ffvhuff", 01487 AVMEDIA_TYPE_VIDEO, 01488 CODEC_ID_FFVHUFF, 01489 sizeof(HYuvContext), 01490 encode_init, 01491 encode_frame, 01492 encode_end, 01493 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 01494 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 01495 }; 01496 #endif