Libav 0.7.1
libavcodec/mpegvideo.c
Go to the documentation of this file.
00001 /*
00002  * The simplest mpeg encoder (well, it was the simplest!)
00003  * Copyright (c) 2000,2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
00007  *
00008  * This file is part of Libav.
00009  *
00010  * Libav is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  * Libav is distributed in the hope that it will be useful,
00016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018  * Lesser General Public License for more details.
00019  *
00020  * You should have received a copy of the GNU Lesser General Public
00021  * License along with Libav; if not, write to the Free Software
00022  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00023  */
00024 
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043 
00044 //#undef NDEBUG
00045 //#include <assert.h>
00046 
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048                                    DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050                                    DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052                                    DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054                                    DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056                                    DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058                                   DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060                                   DCTELEM *block, int n, int qscale);
00061 
00062 
00063 /* enable all paranoid tests for rounding, overflows, etc... */
00064 //#define PARANOID
00065 
00066 //#define DEBUG
00067 
00068 
00069 static const uint8_t ff_default_chroma_qscale_table[32]={
00070 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00071     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00072 };
00073 
00074 const uint8_t ff_mpeg1_dc_scale_table[128]={
00075 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00076     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 };
00081 
00082 static const uint8_t mpeg2_dc_scale_table1[128]={
00083 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00084     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00086     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00087     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00088 };
00089 
00090 static const uint8_t mpeg2_dc_scale_table2[128]={
00091 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00092     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00093     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00094     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00095     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00096 };
00097 
00098 static const uint8_t mpeg2_dc_scale_table3[128]={
00099 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00100     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00101     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00102     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00103     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00104 };
00105 
00106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
00107     ff_mpeg1_dc_scale_table,
00108     mpeg2_dc_scale_table1,
00109     mpeg2_dc_scale_table2,
00110     mpeg2_dc_scale_table3,
00111 };
00112 
00113 const enum PixelFormat ff_pixfmt_list_420[] = {
00114     PIX_FMT_YUV420P,
00115     PIX_FMT_NONE
00116 };
00117 
00118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00119     PIX_FMT_DXVA2_VLD,
00120     PIX_FMT_VAAPI_VLD,
00121     PIX_FMT_YUV420P,
00122     PIX_FMT_NONE
00123 };
00124 
00125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00126     int i;
00127 
00128     assert(p<=end);
00129     if(p>=end)
00130         return end;
00131 
00132     for(i=0; i<3; i++){
00133         uint32_t tmp= *state << 8;
00134         *state= tmp + *(p++);
00135         if(tmp == 0x100 || p==end)
00136             return p;
00137     }
00138 
00139     while(p<end){
00140         if     (p[-1] > 1      ) p+= 3;
00141         else if(p[-2]          ) p+= 2;
00142         else if(p[-3]|(p[-1]-1)) p++;
00143         else{
00144             p++;
00145             break;
00146         }
00147     }
00148 
00149     p= FFMIN(p, end)-4;
00150     *state= AV_RB32(p);
00151 
00152     return p+4;
00153 }
00154 
00155 /* init common dct for both encoder and decoder */
00156 av_cold int ff_dct_common_init(MpegEncContext *s)
00157 {
00158     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00159     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00160     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00161     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00162     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00163     if(s->flags & CODEC_FLAG_BITEXACT)
00164         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00165     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00166 
00167 #if   HAVE_MMX
00168     MPV_common_init_mmx(s);
00169 #elif ARCH_ALPHA
00170     MPV_common_init_axp(s);
00171 #elif CONFIG_MLIB
00172     MPV_common_init_mlib(s);
00173 #elif HAVE_MMI
00174     MPV_common_init_mmi(s);
00175 #elif ARCH_ARM
00176     MPV_common_init_arm(s);
00177 #elif HAVE_ALTIVEC
00178     MPV_common_init_altivec(s);
00179 #elif ARCH_BFIN
00180     MPV_common_init_bfin(s);
00181 #endif
00182 
00183     /* load & permutate scantables
00184        note: only wmv uses different ones
00185     */
00186     if(s->alternate_scan){
00187         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00188         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00189     }else{
00190         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00191         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00192     }
00193     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00194     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00195 
00196     return 0;
00197 }
00198 
00199 void ff_copy_picture(Picture *dst, Picture *src){
00200     *dst = *src;
00201     dst->type= FF_BUFFER_TYPE_COPY;
00202 }
00203 
00207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00208 {
00209     ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
00210     av_freep(&pic->hwaccel_picture_private);
00211 }
00212 
00216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00217 {
00218     int r;
00219 
00220     if (s->avctx->hwaccel) {
00221         assert(!pic->hwaccel_picture_private);
00222         if (s->avctx->hwaccel->priv_data_size) {
00223             pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00224             if (!pic->hwaccel_picture_private) {
00225                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00226                 return -1;
00227             }
00228         }
00229     }
00230 
00231     r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
00232 
00233     if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
00234         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00235         av_freep(&pic->hwaccel_picture_private);
00236         return -1;
00237     }
00238 
00239     if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
00240         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00241         free_frame_buffer(s, pic);
00242         return -1;
00243     }
00244 
00245     if (pic->linesize[1] != pic->linesize[2]) {
00246         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00247         free_frame_buffer(s, pic);
00248         return -1;
00249     }
00250 
00251     return 0;
00252 }
00253 
00258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00259     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
00260     const int mb_array_size= s->mb_stride*s->mb_height;
00261     const int b8_array_size= s->b8_stride*s->mb_height*2;
00262     const int b4_array_size= s->b4_stride*s->mb_height*4;
00263     int i;
00264     int r= -1;
00265 
00266     if(shared){
00267         assert(pic->data[0]);
00268         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00269         pic->type= FF_BUFFER_TYPE_SHARED;
00270     }else{
00271         assert(!pic->data[0]);
00272 
00273         if (alloc_frame_buffer(s, pic) < 0)
00274             return -1;
00275 
00276         s->linesize  = pic->linesize[0];
00277         s->uvlinesize= pic->linesize[1];
00278     }
00279 
00280     if(pic->qscale_table==NULL){
00281         if (s->encoding) {
00282             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var   , mb_array_size * sizeof(int16_t)  , fail)
00283             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t)  , fail)
00284             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean  , mb_array_size * sizeof(int8_t )  , fail)
00285         }
00286 
00287         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
00288         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t)  , fail)
00289         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
00290         pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00291         pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
00292         if(s->out_format == FMT_H264){
00293             for(i=0; i<2; i++){
00294                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t), fail)
00295                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00296                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00297             }
00298             pic->motion_subsample_log2= 2;
00299         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00300             for(i=0; i<2; i++){
00301                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
00302                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00303                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00304             }
00305             pic->motion_subsample_log2= 3;
00306         }
00307         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00308             FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
00309         }
00310         pic->qstride= s->mb_stride;
00311         FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
00312     }
00313 
00314     /* It might be nicer if the application would keep track of these
00315      * but it would require an API change. */
00316     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00317     s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
00318     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
00319         pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
00320     pic->owner2 = NULL;
00321 
00322     return 0;
00323 fail: //for the FF_ALLOCZ_OR_GOTO macro
00324     if(r>=0)
00325         free_frame_buffer(s, pic);
00326     return -1;
00327 }
00328 
00332 static void free_picture(MpegEncContext *s, Picture *pic){
00333     int i;
00334 
00335     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00336         free_frame_buffer(s, pic);
00337     }
00338 
00339     av_freep(&pic->mb_var);
00340     av_freep(&pic->mc_mb_var);
00341     av_freep(&pic->mb_mean);
00342     av_freep(&pic->mbskip_table);
00343     av_freep(&pic->qscale_table_base);
00344     av_freep(&pic->mb_type_base);
00345     av_freep(&pic->dct_coeff);
00346     av_freep(&pic->pan_scan);
00347     pic->mb_type= NULL;
00348     for(i=0; i<2; i++){
00349         av_freep(&pic->motion_val_base[i]);
00350         av_freep(&pic->ref_index[i]);
00351     }
00352 
00353     if(pic->type == FF_BUFFER_TYPE_SHARED){
00354         for(i=0; i<4; i++){
00355             pic->base[i]=
00356             pic->data[i]= NULL;
00357         }
00358         pic->type= 0;
00359     }
00360 }
00361 
00362 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00363     int y_size = s->b8_stride * (2 * s->mb_height + 1);
00364     int c_size = s->mb_stride * (s->mb_height + 1);
00365     int yc_size = y_size + 2 * c_size;
00366     int i;
00367 
00368     // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
00369     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
00370 
00371      //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
00372     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t), fail)
00373     s->me.temp=         s->me.scratchpad;
00374     s->rd_scratchpad=   s->me.scratchpad;
00375     s->b_scratchpad=    s->me.scratchpad;
00376     s->obmc_scratchpad= s->me.scratchpad + 16;
00377     if (s->encoding) {
00378         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map      , ME_MAP_SIZE*sizeof(uint32_t), fail)
00379         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
00380         if(s->avctx->noise_reduction){
00381             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
00382         }
00383     }
00384     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
00385     s->block= s->blocks[0];
00386 
00387     for(i=0;i<12;i++){
00388         s->pblocks[i] = &s->block[i];
00389     }
00390 
00391     if (s->out_format == FMT_H263) {
00392         /* ac values */
00393         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
00394         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00395         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00396         s->ac_val[2] = s->ac_val[1] + c_size;
00397     }
00398 
00399     return 0;
00400 fail:
00401     return -1; //free() through MPV_common_end()
00402 }
00403 
00404 static void free_duplicate_context(MpegEncContext *s){
00405     if(s==NULL) return;
00406 
00407     av_freep(&s->edge_emu_buffer);
00408     av_freep(&s->me.scratchpad);
00409     s->me.temp=
00410     s->rd_scratchpad=
00411     s->b_scratchpad=
00412     s->obmc_scratchpad= NULL;
00413 
00414     av_freep(&s->dct_error_sum);
00415     av_freep(&s->me.map);
00416     av_freep(&s->me.score_map);
00417     av_freep(&s->blocks);
00418     av_freep(&s->ac_val_base);
00419     s->block= NULL;
00420 }
00421 
00422 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00423 #define COPY(a) bak->a= src->a
00424     COPY(edge_emu_buffer);
00425     COPY(me.scratchpad);
00426     COPY(me.temp);
00427     COPY(rd_scratchpad);
00428     COPY(b_scratchpad);
00429     COPY(obmc_scratchpad);
00430     COPY(me.map);
00431     COPY(me.score_map);
00432     COPY(blocks);
00433     COPY(block);
00434     COPY(start_mb_y);
00435     COPY(end_mb_y);
00436     COPY(me.map_generation);
00437     COPY(pb);
00438     COPY(dct_error_sum);
00439     COPY(dct_count[0]);
00440     COPY(dct_count[1]);
00441     COPY(ac_val_base);
00442     COPY(ac_val[0]);
00443     COPY(ac_val[1]);
00444     COPY(ac_val[2]);
00445 #undef COPY
00446 }
00447 
00448 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00449     MpegEncContext bak;
00450     int i;
00451     //FIXME copy only needed parts
00452 //START_TIMER
00453     backup_duplicate_context(&bak, dst);
00454     memcpy(dst, src, sizeof(MpegEncContext));
00455     backup_duplicate_context(dst, &bak);
00456     for(i=0;i<12;i++){
00457         dst->pblocks[i] = &dst->block[i];
00458     }
00459 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
00460 }
00461 
00462 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
00463 {
00464     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00465 
00466     if(dst == src || !s1->context_initialized) return 0;
00467 
00468     //FIXME can parameters change on I-frames? in that case dst may need a reinit
00469     if(!s->context_initialized){
00470         memcpy(s, s1, sizeof(MpegEncContext));
00471 
00472         s->avctx                 = dst;
00473         s->picture_range_start  += MAX_PICTURE_COUNT;
00474         s->picture_range_end    += MAX_PICTURE_COUNT;
00475         s->bitstream_buffer      = NULL;
00476         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00477 
00478         MPV_common_init(s);
00479     }
00480 
00481     s->avctx->coded_height  = s1->avctx->coded_height;
00482     s->avctx->coded_width   = s1->avctx->coded_width;
00483     s->avctx->width         = s1->avctx->width;
00484     s->avctx->height        = s1->avctx->height;
00485 
00486     s->coded_picture_number = s1->coded_picture_number;
00487     s->picture_number       = s1->picture_number;
00488     s->input_picture_number = s1->input_picture_number;
00489 
00490     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00491     memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
00492 
00493     s->last_picture_ptr     = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
00494     s->current_picture_ptr  = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00495     s->next_picture_ptr     = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
00496 
00497     memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
00498 
00499     //Error/bug resilience
00500     s->next_p_frame_damaged = s1->next_p_frame_damaged;
00501     s->workaround_bugs      = s1->workaround_bugs;
00502 
00503     //MPEG4 timing info
00504     memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
00505 
00506     //B-frame info
00507     s->max_b_frames         = s1->max_b_frames;
00508     s->low_delay            = s1->low_delay;
00509     s->dropable             = s1->dropable;
00510 
00511     //DivX handling (doesn't work)
00512     s->divx_packed          = s1->divx_packed;
00513 
00514     if(s1->bitstream_buffer){
00515         if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00516             av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
00517         s->bitstream_buffer_size  = s1->bitstream_buffer_size;
00518         memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
00519         memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
00520     }
00521 
00522     //MPEG2/interlacing info
00523     memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
00524 
00525     if(!s1->first_field){
00526         s->last_pict_type= s1->pict_type;
00527         if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
00528 
00529         if(s1->pict_type!=FF_B_TYPE){
00530             s->last_non_b_pict_type= s1->pict_type;
00531         }
00532     }
00533 
00534     return 0;
00535 }
00536 
00541 void MPV_common_defaults(MpegEncContext *s){
00542     s->y_dc_scale_table=
00543     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00544     s->chroma_qscale_table= ff_default_chroma_qscale_table;
00545     s->progressive_frame= 1;
00546     s->progressive_sequence= 1;
00547     s->picture_structure= PICT_FRAME;
00548 
00549     s->coded_picture_number = 0;
00550     s->picture_number = 0;
00551     s->input_picture_number = 0;
00552 
00553     s->picture_in_gop_number = 0;
00554 
00555     s->f_code = 1;
00556     s->b_code = 1;
00557 
00558     s->picture_range_start = 0;
00559     s->picture_range_end = MAX_PICTURE_COUNT;
00560 }
00561 
00566 void MPV_decode_defaults(MpegEncContext *s){
00567     MPV_common_defaults(s);
00568 }
00569 
00574 av_cold int MPV_common_init(MpegEncContext *s)
00575 {
00576     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00577 
00578     if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00579         s->mb_height = (s->height + 31) / 32 * 2;
00580     else if (s->codec_id != CODEC_ID_H264)
00581         s->mb_height = (s->height + 15) / 16;
00582 
00583     if(s->avctx->pix_fmt == PIX_FMT_NONE){
00584         av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00585         return -1;
00586     }
00587 
00588     if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
00589        (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
00590         av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00591         return -1;
00592     }
00593 
00594     if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
00595         return -1;
00596 
00597     dsputil_init(&s->dsp, s->avctx);
00598     ff_dct_common_init(s);
00599 
00600     s->flags= s->avctx->flags;
00601     s->flags2= s->avctx->flags2;
00602 
00603     if (s->width && s->height) {
00604         s->mb_width  = (s->width  + 15) / 16;
00605         s->mb_stride = s->mb_width + 1;
00606         s->b8_stride = s->mb_width*2 + 1;
00607         s->b4_stride = s->mb_width*4 + 1;
00608         mb_array_size= s->mb_height * s->mb_stride;
00609         mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00610 
00611         /* set chroma shifts */
00612         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00613                                       &(s->chroma_y_shift) );
00614 
00615         /* set default edge pos, will be overriden in decode_header if needed */
00616         s->h_edge_pos= s->mb_width*16;
00617         s->v_edge_pos= s->mb_height*16;
00618 
00619         s->mb_num = s->mb_width * s->mb_height;
00620 
00621         s->block_wrap[0]=
00622         s->block_wrap[1]=
00623         s->block_wrap[2]=
00624         s->block_wrap[3]= s->b8_stride;
00625         s->block_wrap[4]=
00626         s->block_wrap[5]= s->mb_stride;
00627 
00628         y_size = s->b8_stride * (2 * s->mb_height + 1);
00629         c_size = s->mb_stride * (s->mb_height + 1);
00630         yc_size = y_size + 2 * c_size;
00631 
00632         /* convert fourcc to upper case */
00633         s->codec_tag = ff_toupper4(s->avctx->codec_tag);
00634 
00635         s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
00636 
00637         s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00638 
00639         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
00640         for(y=0; y<s->mb_height; y++){
00641             for(x=0; x<s->mb_width; x++){
00642                 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00643             }
00644         }
00645         s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
00646 
00647         if (s->encoding) {
00648             /* Allocate MV tables */
00649             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
00650             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00651             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00652             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00653             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00654             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
00655             s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
00656             s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
00657             s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
00658             s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00659             s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00660             s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
00661 
00662             if(s->msmpeg4_version){
00663                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00664             }
00665             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00666 
00667             /* Allocate MB type table */
00668             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
00669 
00670             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00671 
00672             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
00673             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
00674             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00675             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00676             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00677             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00678 
00679             if(s->avctx->noise_reduction){
00680                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00681             }
00682         }
00683     }
00684 
00685     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00686     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
00687     for(i = 0; i < s->picture_count; i++) {
00688         avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
00689     }
00690 
00691     if (s->width && s->height) {
00692         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00693 
00694         if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00695             /* interlaced direct mode decoding tables */
00696             for(i=0; i<2; i++){
00697                 int j, k;
00698                 for(j=0; j<2; j++){
00699                     for(k=0; k<2; k++){
00700                         FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00701                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00702                     }
00703                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00704                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00705                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
00706                 }
00707                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00708             }
00709         }
00710         if (s->out_format == FMT_H263) {
00711             /* cbp values */
00712             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00713             s->coded_block= s->coded_block_base + s->b8_stride + 1;
00714 
00715             /* cbp, ac_pred, pred_dir */
00716             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
00717             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
00718         }
00719 
00720         if (s->h263_pred || s->h263_plus || !s->encoding) {
00721             /* dc values */
00722             //MN: we need these for error resilience of intra-frames
00723             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00724             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00725             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00726             s->dc_val[2] = s->dc_val[1] + c_size;
00727             for(i=0;i<yc_size;i++)
00728                 s->dc_val_base[i] = 1024;
00729         }
00730 
00731         /* which mb is a intra block */
00732         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00733         memset(s->mbintra_table, 1, mb_array_size);
00734 
00735         /* init macroblock skip table */
00736         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
00737         //Note the +1 is for a quicker mpeg4 slice_end detection
00738         FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00739 
00740         s->parse_context.state= -1;
00741         if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00742             s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00743             s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00744             s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00745         }
00746     }
00747 
00748     s->context_initialized = 1;
00749     s->thread_context[0]= s;
00750 
00751     if (s->width && s->height) {
00752     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00753         threads = s->avctx->thread_count;
00754 
00755         for(i=1; i<threads; i++){
00756             s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00757             memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00758         }
00759 
00760         for(i=0; i<threads; i++){
00761             if(init_duplicate_context(s->thread_context[i], s) < 0)
00762                 goto fail;
00763             s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00764             s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00765         }
00766     } else {
00767         if(init_duplicate_context(s, s) < 0) goto fail;
00768         s->start_mb_y = 0;
00769         s->end_mb_y   = s->mb_height;
00770     }
00771     }
00772 
00773     return 0;
00774  fail:
00775     MPV_common_end(s);
00776     return -1;
00777 }
00778 
00779 /* init common structure for both encoder and decoder */
00780 void MPV_common_end(MpegEncContext *s)
00781 {
00782     int i, j, k;
00783 
00784     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00785         for(i=0; i<s->avctx->thread_count; i++){
00786             free_duplicate_context(s->thread_context[i]);
00787         }
00788         for(i=1; i<s->avctx->thread_count; i++){
00789             av_freep(&s->thread_context[i]);
00790         }
00791     } else free_duplicate_context(s);
00792 
00793     av_freep(&s->parse_context.buffer);
00794     s->parse_context.buffer_size=0;
00795 
00796     av_freep(&s->mb_type);
00797     av_freep(&s->p_mv_table_base);
00798     av_freep(&s->b_forw_mv_table_base);
00799     av_freep(&s->b_back_mv_table_base);
00800     av_freep(&s->b_bidir_forw_mv_table_base);
00801     av_freep(&s->b_bidir_back_mv_table_base);
00802     av_freep(&s->b_direct_mv_table_base);
00803     s->p_mv_table= NULL;
00804     s->b_forw_mv_table= NULL;
00805     s->b_back_mv_table= NULL;
00806     s->b_bidir_forw_mv_table= NULL;
00807     s->b_bidir_back_mv_table= NULL;
00808     s->b_direct_mv_table= NULL;
00809     for(i=0; i<2; i++){
00810         for(j=0; j<2; j++){
00811             for(k=0; k<2; k++){
00812                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00813                 s->b_field_mv_table[i][j][k]=NULL;
00814             }
00815             av_freep(&s->b_field_select_table[i][j]);
00816             av_freep(&s->p_field_mv_table_base[i][j]);
00817             s->p_field_mv_table[i][j]=NULL;
00818         }
00819         av_freep(&s->p_field_select_table[i]);
00820     }
00821 
00822     av_freep(&s->dc_val_base);
00823     av_freep(&s->coded_block_base);
00824     av_freep(&s->mbintra_table);
00825     av_freep(&s->cbp_table);
00826     av_freep(&s->pred_dir_table);
00827 
00828     av_freep(&s->mbskip_table);
00829     av_freep(&s->prev_pict_types);
00830     av_freep(&s->bitstream_buffer);
00831     s->allocated_bitstream_buffer_size=0;
00832 
00833     av_freep(&s->avctx->stats_out);
00834     av_freep(&s->ac_stats);
00835     av_freep(&s->error_status_table);
00836     av_freep(&s->mb_index2xy);
00837     av_freep(&s->lambda_table);
00838     av_freep(&s->q_intra_matrix);
00839     av_freep(&s->q_inter_matrix);
00840     av_freep(&s->q_intra_matrix16);
00841     av_freep(&s->q_inter_matrix16);
00842     av_freep(&s->input_picture);
00843     av_freep(&s->reordered_input_picture);
00844     av_freep(&s->dct_offset);
00845 
00846     if(s->picture && !s->avctx->is_copy){
00847         for(i=0; i<s->picture_count; i++){
00848             free_picture(s, &s->picture[i]);
00849         }
00850     }
00851     av_freep(&s->picture);
00852     s->context_initialized = 0;
00853     s->last_picture_ptr=
00854     s->next_picture_ptr=
00855     s->current_picture_ptr= NULL;
00856     s->linesize= s->uvlinesize= 0;
00857 
00858     for(i=0; i<3; i++)
00859         av_freep(&s->visualization_buffer[i]);
00860 
00861     if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
00862         avcodec_default_free_buffers(s->avctx);
00863 }
00864 
00865 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00866 {
00867     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00868     uint8_t index_run[MAX_RUN+1];
00869     int last, run, level, start, end, i;
00870 
00871     /* If table is static, we can quit if rl->max_level[0] is not NULL */
00872     if(static_store && rl->max_level[0])
00873         return;
00874 
00875     /* compute max_level[], max_run[] and index_run[] */
00876     for(last=0;last<2;last++) {
00877         if (last == 0) {
00878             start = 0;
00879             end = rl->last;
00880         } else {
00881             start = rl->last;
00882             end = rl->n;
00883         }
00884 
00885         memset(max_level, 0, MAX_RUN + 1);
00886         memset(max_run, 0, MAX_LEVEL + 1);
00887         memset(index_run, rl->n, MAX_RUN + 1);
00888         for(i=start;i<end;i++) {
00889             run = rl->table_run[i];
00890             level = rl->table_level[i];
00891             if (index_run[run] == rl->n)
00892                 index_run[run] = i;
00893             if (level > max_level[run])
00894                 max_level[run] = level;
00895             if (run > max_run[level])
00896                 max_run[level] = run;
00897         }
00898         if(static_store)
00899             rl->max_level[last] = static_store[last];
00900         else
00901             rl->max_level[last] = av_malloc(MAX_RUN + 1);
00902         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00903         if(static_store)
00904             rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00905         else
00906             rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00907         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00908         if(static_store)
00909             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00910         else
00911             rl->index_run[last] = av_malloc(MAX_RUN + 1);
00912         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00913     }
00914 }
00915 
00916 void init_vlc_rl(RLTable *rl)
00917 {
00918     int i, q;
00919 
00920     for(q=0; q<32; q++){
00921         int qmul= q*2;
00922         int qadd= (q-1)|1;
00923 
00924         if(q==0){
00925             qmul=1;
00926             qadd=0;
00927         }
00928         for(i=0; i<rl->vlc.table_size; i++){
00929             int code= rl->vlc.table[i][0];
00930             int len = rl->vlc.table[i][1];
00931             int level, run;
00932 
00933             if(len==0){ // illegal code
00934                 run= 66;
00935                 level= MAX_LEVEL;
00936             }else if(len<0){ //more bits needed
00937                 run= 0;
00938                 level= code;
00939             }else{
00940                 if(code==rl->n){ //esc
00941                     run= 66;
00942                     level= 0;
00943                 }else{
00944                     run=   rl->table_run  [code] + 1;
00945                     level= rl->table_level[code] * qmul + qadd;
00946                     if(code >= rl->last) run+=192;
00947                 }
00948             }
00949             rl->rl_vlc[q][i].len= len;
00950             rl->rl_vlc[q][i].level= level;
00951             rl->rl_vlc[q][i].run= run;
00952         }
00953     }
00954 }
00955 
00956 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
00957 {
00958     int i;
00959 
00960     /* release non reference frames */
00961     for(i=0; i<s->picture_count; i++){
00962         if(s->picture[i].data[0] && !s->picture[i].reference
00963            && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
00964            && (remove_current || &s->picture[i] != s->current_picture_ptr)
00965            /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
00966             free_frame_buffer(s, &s->picture[i]);
00967         }
00968     }
00969 }
00970 
00971 int ff_find_unused_picture(MpegEncContext *s, int shared){
00972     int i;
00973 
00974     if(shared){
00975         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00976             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00977         }
00978     }else{
00979         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00980             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
00981         }
00982         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00983             if(s->picture[i].data[0]==NULL) return i;
00984         }
00985     }
00986 
00987     av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00988     /* We could return -1, but the codec would crash trying to draw into a
00989      * non-existing frame anyway. This is safer than waiting for a random crash.
00990      * Also the return of this is never useful, an encoder must only allocate
00991      * as much as allowed in the specification. This has no relationship to how
00992      * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
00993      * enough for such valid streams).
00994      * Plus, a decoder has to check stream validity and remove frames if too
00995      * many reference frames are around. Waiting for "OOM" is not correct at
00996      * all. Similarly, missing reference frames have to be replaced by
00997      * interpolated/MC frames, anything else is a bug in the codec ...
00998      */
00999     abort();
01000     return -1;
01001 }
01002 
01003 static void update_noise_reduction(MpegEncContext *s){
01004     int intra, i;
01005 
01006     for(intra=0; intra<2; intra++){
01007         if(s->dct_count[intra] > (1<<16)){
01008             for(i=0; i<64; i++){
01009                 s->dct_error_sum[intra][i] >>=1;
01010             }
01011             s->dct_count[intra] >>= 1;
01012         }
01013 
01014         for(i=0; i<64; i++){
01015             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
01016         }
01017     }
01018 }
01019 
01023 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01024 {
01025     int i;
01026     Picture *pic;
01027     s->mb_skipped = 0;
01028 
01029     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
01030 
01031     /* mark&release old frames */
01032     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
01033       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
01034           free_frame_buffer(s, s->last_picture_ptr);
01035 
01036         /* release forgotten pictures */
01037         /* if(mpeg124/h263) */
01038         if(!s->encoding){
01039             for(i=0; i<s->picture_count; i++){
01040                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
01041                     av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
01042                     free_frame_buffer(s, &s->picture[i]);
01043                 }
01044             }
01045         }
01046       }
01047     }
01048 
01049     if(!s->encoding){
01050         ff_release_unused_pictures(s, 1);
01051 
01052         if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
01053             pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
01054         else{
01055             i= ff_find_unused_picture(s, 0);
01056             pic= &s->picture[i];
01057         }
01058 
01059         pic->reference= 0;
01060         if (!s->dropable){
01061             if (s->codec_id == CODEC_ID_H264)
01062                 pic->reference = s->picture_structure;
01063             else if (s->pict_type != AV_PICTURE_TYPE_B)
01064                 pic->reference = 3;
01065         }
01066 
01067         pic->coded_picture_number= s->coded_picture_number++;
01068 
01069         if(ff_alloc_picture(s, pic, 0) < 0)
01070             return -1;
01071 
01072         s->current_picture_ptr= pic;
01073         //FIXME use only the vars from current_pic
01074         s->current_picture_ptr->top_field_first= s->top_field_first;
01075         if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01076             if(s->picture_structure != PICT_FRAME)
01077                 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01078         }
01079         s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
01080         s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
01081     }
01082 
01083     s->current_picture_ptr->pict_type= s->pict_type;
01084 //    if(s->flags && CODEC_FLAG_QSCALE)
01085   //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
01086     s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
01087 
01088     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01089 
01090     if (s->pict_type != AV_PICTURE_TYPE_B) {
01091         s->last_picture_ptr= s->next_picture_ptr;
01092         if(!s->dropable)
01093             s->next_picture_ptr= s->current_picture_ptr;
01094     }
01095 /*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
01096         s->last_picture_ptr    ? s->last_picture_ptr->data[0] : NULL,
01097         s->next_picture_ptr    ? s->next_picture_ptr->data[0] : NULL,
01098         s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
01099         s->pict_type, s->dropable);*/
01100 
01101     if(s->codec_id != CODEC_ID_H264){
01102         if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
01103            (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
01104             if (s->pict_type != AV_PICTURE_TYPE_I)
01105                 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
01106             else if (s->picture_structure != PICT_FRAME)
01107                 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
01108 
01109             /* Allocate a dummy frame */
01110             i= ff_find_unused_picture(s, 0);
01111             s->last_picture_ptr= &s->picture[i];
01112             if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01113                 return -1;
01114             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
01115             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
01116         }
01117         if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
01118             /* Allocate a dummy frame */
01119             i= ff_find_unused_picture(s, 0);
01120             s->next_picture_ptr= &s->picture[i];
01121             if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01122                 return -1;
01123             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
01124             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
01125         }
01126     }
01127 
01128     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01129     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01130 
01131     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
01132 
01133     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01134         int i;
01135         for(i=0; i<4; i++){
01136             if(s->picture_structure == PICT_BOTTOM_FIELD){
01137                  s->current_picture.data[i] += s->current_picture.linesize[i];
01138             }
01139             s->current_picture.linesize[i] *= 2;
01140             s->last_picture.linesize[i] *=2;
01141             s->next_picture.linesize[i] *=2;
01142         }
01143     }
01144 
01145     s->error_recognition= avctx->error_recognition;
01146 
01147     /* set dequantizer, we can't do it during init as it might change for mpeg4
01148        and we can't do it in the header decode as init is not called for mpeg4 there yet */
01149     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01150         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01151         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01152     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01153         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01154         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01155     }else{
01156         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01157         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01158     }
01159 
01160     if(s->dct_error_sum){
01161         assert(s->avctx->noise_reduction && s->encoding);
01162 
01163         update_noise_reduction(s);
01164     }
01165 
01166     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01167         return ff_xvmc_field_start(s, avctx);
01168 
01169     return 0;
01170 }
01171 
01172 /* generic function for encode/decode called after a frame has been coded/decoded */
01173 void MPV_frame_end(MpegEncContext *s)
01174 {
01175     int i;
01176     /* redraw edges for the frame if decoding didn't complete */
01177     //just to make sure that all data is rendered.
01178     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01179         ff_xvmc_field_end(s);
01180    }else if((s->error_count || s->encoding)
01181        && !s->avctx->hwaccel
01182        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01183        && s->unrestricted_mv
01184        && s->current_picture.reference
01185        && !s->intra_only
01186        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01187             int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01188             int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01189             s->dsp.draw_edges(s->current_picture.data[0], s->linesize  ,
01190                               s->h_edge_pos             , s->v_edge_pos,
01191                               EDGE_WIDTH        , EDGE_WIDTH        , EDGE_TOP | EDGE_BOTTOM);
01192             s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
01193                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01194                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01195             s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
01196                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01197                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01198     }
01199 
01200     emms_c();
01201 
01202     s->last_pict_type    = s->pict_type;
01203     s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01204     if(s->pict_type!=AV_PICTURE_TYPE_B){
01205         s->last_non_b_pict_type= s->pict_type;
01206     }
01207 #if 0
01208         /* copy back current_picture variables */
01209     for(i=0; i<MAX_PICTURE_COUNT; i++){
01210         if(s->picture[i].data[0] == s->current_picture.data[0]){
01211             s->picture[i]= s->current_picture;
01212             break;
01213         }
01214     }
01215     assert(i<MAX_PICTURE_COUNT);
01216 #endif
01217 
01218     if(s->encoding){
01219         /* release non-reference frames */
01220         for(i=0; i<s->picture_count; i++){
01221             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
01222                 free_frame_buffer(s, &s->picture[i]);
01223             }
01224         }
01225     }
01226     // clear copies, to avoid confusion
01227 #if 0
01228     memset(&s->last_picture, 0, sizeof(Picture));
01229     memset(&s->next_picture, 0, sizeof(Picture));
01230     memset(&s->current_picture, 0, sizeof(Picture));
01231 #endif
01232     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01233 
01234     if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
01235         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
01236     }
01237 }
01238 
01246 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01247     int x, y, fr, f;
01248 
01249     sx= av_clip(sx, 0, w-1);
01250     sy= av_clip(sy, 0, h-1);
01251     ex= av_clip(ex, 0, w-1);
01252     ey= av_clip(ey, 0, h-1);
01253 
01254     buf[sy*stride + sx]+= color;
01255 
01256     if(FFABS(ex - sx) > FFABS(ey - sy)){
01257         if(sx > ex){
01258             FFSWAP(int, sx, ex);
01259             FFSWAP(int, sy, ey);
01260         }
01261         buf+= sx + sy*stride;
01262         ex-= sx;
01263         f= ((ey-sy)<<16)/ex;
01264         for(x= 0; x <= ex; x++){
01265             y = (x*f)>>16;
01266             fr= (x*f)&0xFFFF;
01267             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
01268             buf[(y+1)*stride + x]+= (color*         fr )>>16;
01269         }
01270     }else{
01271         if(sy > ey){
01272             FFSWAP(int, sx, ex);
01273             FFSWAP(int, sy, ey);
01274         }
01275         buf+= sx + sy*stride;
01276         ey-= sy;
01277         if(ey) f= ((ex-sx)<<16)/ey;
01278         else   f= 0;
01279         for(y= 0; y <= ey; y++){
01280             x = (y*f)>>16;
01281             fr= (y*f)&0xFFFF;
01282             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
01283             buf[y*stride + x+1]+= (color*         fr )>>16;
01284         }
01285     }
01286 }
01287 
01295 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01296     int dx,dy;
01297 
01298     sx= av_clip(sx, -100, w+100);
01299     sy= av_clip(sy, -100, h+100);
01300     ex= av_clip(ex, -100, w+100);
01301     ey= av_clip(ey, -100, h+100);
01302 
01303     dx= ex - sx;
01304     dy= ey - sy;
01305 
01306     if(dx*dx + dy*dy > 3*3){
01307         int rx=  dx + dy;
01308         int ry= -dx + dy;
01309         int length= ff_sqrt((rx*rx + ry*ry)<<8);
01310 
01311         //FIXME subpixel accuracy
01312         rx= ROUNDED_DIV(rx*3<<4, length);
01313         ry= ROUNDED_DIV(ry*3<<4, length);
01314 
01315         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01316         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01317     }
01318     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01319 }
01320 
01324 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01325 
01326     if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01327 
01328     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01329         int x,y;
01330 
01331         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01332         switch (pict->pict_type) {
01333             case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
01334             case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
01335             case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
01336             case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
01337             case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
01338             case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
01339         }
01340         for(y=0; y<s->mb_height; y++){
01341             for(x=0; x<s->mb_width; x++){
01342                 if(s->avctx->debug&FF_DEBUG_SKIP){
01343                     int count= s->mbskip_table[x + y*s->mb_stride];
01344                     if(count>9) count=9;
01345                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01346                 }
01347                 if(s->avctx->debug&FF_DEBUG_QP){
01348                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01349                 }
01350                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01351                     int mb_type= pict->mb_type[x + y*s->mb_stride];
01352                     //Type & MV direction
01353                     if(IS_PCM(mb_type))
01354                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01355                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01356                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01357                     else if(IS_INTRA4x4(mb_type))
01358                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01359                     else if(IS_INTRA16x16(mb_type))
01360                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01361                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01362                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01363                     else if(IS_DIRECT(mb_type))
01364                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01365                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01366                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01367                     else if(IS_GMC(mb_type))
01368                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01369                     else if(IS_SKIP(mb_type))
01370                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01371                     else if(!USES_LIST(mb_type, 1))
01372                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01373                     else if(!USES_LIST(mb_type, 0))
01374                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01375                     else{
01376                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01377                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01378                     }
01379 
01380                     //segmentation
01381                     if(IS_8X8(mb_type))
01382                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01383                     else if(IS_16X8(mb_type))
01384                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01385                     else if(IS_8X16(mb_type))
01386                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01387                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01388                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01389                     else
01390                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01391 
01392 
01393                     if(IS_INTERLACED(mb_type))
01394                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01395                     else
01396                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01397                 }
01398 //                av_log(s->avctx, AV_LOG_DEBUG, " ");
01399             }
01400             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01401         }
01402     }
01403 
01404     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01405         const int shift= 1 + s->quarter_sample;
01406         int mb_y;
01407         uint8_t *ptr;
01408         int i;
01409         int h_chroma_shift, v_chroma_shift, block_height;
01410         const int width = s->avctx->width;
01411         const int height= s->avctx->height;
01412         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01413         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01414         s->low_delay=0; //needed to see the vectors without trashing the buffers
01415 
01416         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01417         for(i=0; i<3; i++){
01418             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01419             pict->data[i]= s->visualization_buffer[i];
01420         }
01421         pict->type= FF_BUFFER_TYPE_COPY;
01422         ptr= pict->data[0];
01423         block_height = 16>>v_chroma_shift;
01424 
01425         for(mb_y=0; mb_y<s->mb_height; mb_y++){
01426             int mb_x;
01427             for(mb_x=0; mb_x<s->mb_width; mb_x++){
01428                 const int mb_index= mb_x + mb_y*s->mb_stride;
01429                 if((s->avctx->debug_mv) && pict->motion_val){
01430                   int type;
01431                   for(type=0; type<3; type++){
01432                     int direction = 0;
01433                     switch (type) {
01434                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
01435                                 continue;
01436                               direction = 0;
01437                               break;
01438                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01439                                 continue;
01440                               direction = 0;
01441                               break;
01442                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01443                                 continue;
01444                               direction = 1;
01445                               break;
01446                     }
01447                     if(!USES_LIST(pict->mb_type[mb_index], direction))
01448                         continue;
01449 
01450                     if(IS_8X8(pict->mb_type[mb_index])){
01451                       int i;
01452                       for(i=0; i<4; i++){
01453                         int sx= mb_x*16 + 4 + 8*(i&1);
01454                         int sy= mb_y*16 + 4 + 8*(i>>1);
01455                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01456                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01457                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01458                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01459                       }
01460                     }else if(IS_16X8(pict->mb_type[mb_index])){
01461                       int i;
01462                       for(i=0; i<2; i++){
01463                         int sx=mb_x*16 + 8;
01464                         int sy=mb_y*16 + 4 + 8*i;
01465                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01466                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01467                         int my=(pict->motion_val[direction][xy][1]>>shift);
01468 
01469                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01470                             my*=2;
01471 
01472                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01473                       }
01474                     }else if(IS_8X16(pict->mb_type[mb_index])){
01475                       int i;
01476                       for(i=0; i<2; i++){
01477                         int sx=mb_x*16 + 4 + 8*i;
01478                         int sy=mb_y*16 + 8;
01479                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01480                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01481                         int my=(pict->motion_val[direction][xy][1]>>shift);
01482 
01483                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01484                             my*=2;
01485 
01486                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01487                       }
01488                     }else{
01489                       int sx= mb_x*16 + 8;
01490                       int sy= mb_y*16 + 8;
01491                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01492                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01493                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01494                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01495                     }
01496                   }
01497                 }
01498                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01499                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01500                     int y;
01501                     for(y=0; y<block_height; y++){
01502                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01503                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01504                     }
01505                 }
01506                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01507                     int mb_type= pict->mb_type[mb_index];
01508                     uint64_t u,v;
01509                     int y;
01510 #define COLOR(theta, r)\
01511 u= (int)(128 + r*cos(theta*3.141592/180));\
01512 v= (int)(128 + r*sin(theta*3.141592/180));
01513 
01514 
01515                     u=v=128;
01516                     if(IS_PCM(mb_type)){
01517                         COLOR(120,48)
01518                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01519                         COLOR(30,48)
01520                     }else if(IS_INTRA4x4(mb_type)){
01521                         COLOR(90,48)
01522                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01523 //                        COLOR(120,48)
01524                     }else if(IS_DIRECT(mb_type)){
01525                         COLOR(150,48)
01526                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01527                         COLOR(170,48)
01528                     }else if(IS_GMC(mb_type)){
01529                         COLOR(190,48)
01530                     }else if(IS_SKIP(mb_type)){
01531 //                        COLOR(180,48)
01532                     }else if(!USES_LIST(mb_type, 1)){
01533                         COLOR(240,48)
01534                     }else if(!USES_LIST(mb_type, 0)){
01535                         COLOR(0,48)
01536                     }else{
01537                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01538                         COLOR(300,48)
01539                     }
01540 
01541                     u*= 0x0101010101010101ULL;
01542                     v*= 0x0101010101010101ULL;
01543                     for(y=0; y<block_height; y++){
01544                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01545                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01546                     }
01547 
01548                     //segmentation
01549                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01550                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01551                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01552                     }
01553                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01554                         for(y=0; y<16; y++)
01555                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01556                     }
01557                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01558                         int dm= 1 << (mv_sample_log2-2);
01559                         for(i=0; i<4; i++){
01560                             int sx= mb_x*16 + 8*(i&1);
01561                             int sy= mb_y*16 + 8*(i>>1);
01562                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01563                             //FIXME bidir
01564                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01565                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01566                                 for(y=0; y<8; y++)
01567                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01568                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01569                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01570                         }
01571                     }
01572 
01573                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01574                         // hmm
01575                     }
01576                 }
01577                 s->mbskip_table[mb_index]=0;
01578             }
01579         }
01580     }
01581 }
01582 
01583 static inline int hpel_motion_lowres(MpegEncContext *s,
01584                                   uint8_t *dest, uint8_t *src,
01585                                   int field_based, int field_select,
01586                                   int src_x, int src_y,
01587                                   int width, int height, int stride,
01588                                   int h_edge_pos, int v_edge_pos,
01589                                   int w, int h, h264_chroma_mc_func *pix_op,
01590                                   int motion_x, int motion_y)
01591 {
01592     const int lowres= s->avctx->lowres;
01593     const int op_index= FFMIN(lowres, 2);
01594     const int s_mask= (2<<lowres)-1;
01595     int emu=0;
01596     int sx, sy;
01597 
01598     if(s->quarter_sample){
01599         motion_x/=2;
01600         motion_y/=2;
01601     }
01602 
01603     sx= motion_x & s_mask;
01604     sy= motion_y & s_mask;
01605     src_x += motion_x >> (lowres+1);
01606     src_y += motion_y >> (lowres+1);
01607 
01608     src += src_y * stride + src_x;
01609 
01610     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
01611        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01612         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01613                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01614         src= s->edge_emu_buffer;
01615         emu=1;
01616     }
01617 
01618     sx= (sx << 2) >> lowres;
01619     sy= (sy << 2) >> lowres;
01620     if(field_select)
01621         src += s->linesize;
01622     pix_op[op_index](dest, src, stride, h, sx, sy);
01623     return emu;
01624 }
01625 
01626 /* apply one mpeg motion vector to the three components */
01627 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01628                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01629                                int field_based, int bottom_field, int field_select,
01630                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01631                                int motion_x, int motion_y, int h, int mb_y)
01632 {
01633     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01634     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01635     const int lowres= s->avctx->lowres;
01636     const int op_index= FFMIN(lowres, 2);
01637     const int block_s= 8>>lowres;
01638     const int s_mask= (2<<lowres)-1;
01639     const int h_edge_pos = s->h_edge_pos >> lowres;
01640     const int v_edge_pos = s->v_edge_pos >> lowres;
01641     linesize   = s->current_picture.linesize[0] << field_based;
01642     uvlinesize = s->current_picture.linesize[1] << field_based;
01643 
01644     if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
01645         motion_x/=2;
01646         motion_y/=2;
01647     }
01648 
01649     if(field_based){
01650         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01651     }
01652 
01653     sx= motion_x & s_mask;
01654     sy= motion_y & s_mask;
01655     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
01656     src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01657 
01658     if (s->out_format == FMT_H263) {
01659         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01660         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01661         uvsrc_x = src_x>>1;
01662         uvsrc_y = src_y>>1;
01663     }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
01664         mx = motion_x / 4;
01665         my = motion_y / 4;
01666         uvsx = (2*mx) & s_mask;
01667         uvsy = (2*my) & s_mask;
01668         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
01669         uvsrc_y =    mb_y*block_s               + (my >> lowres);
01670     } else {
01671         mx = motion_x / 2;
01672         my = motion_y / 2;
01673         uvsx = mx & s_mask;
01674         uvsy = my & s_mask;
01675         uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01676         uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
01677     }
01678 
01679     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
01680     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01681     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01682 
01683     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
01684        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01685             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01686                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01687             ptr_y = s->edge_emu_buffer;
01688             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01689                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01690                 s->dsp.emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
01691                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01692                 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01693                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01694                 ptr_cb= uvbuf;
01695                 ptr_cr= uvbuf+16;
01696             }
01697     }
01698 
01699     if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
01700         dest_y += s->linesize;
01701         dest_cb+= s->uvlinesize;
01702         dest_cr+= s->uvlinesize;
01703     }
01704 
01705     if(field_select){
01706         ptr_y += s->linesize;
01707         ptr_cb+= s->uvlinesize;
01708         ptr_cr+= s->uvlinesize;
01709     }
01710 
01711     sx= (sx << 2) >> lowres;
01712     sy= (sy << 2) >> lowres;
01713     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01714 
01715     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01716         uvsx= (uvsx << 2) >> lowres;
01717         uvsy= (uvsy << 2) >> lowres;
01718         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01719         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01720     }
01721     //FIXME h261 lowres loop filter
01722 }
01723 
01724 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01725                                      uint8_t *dest_cb, uint8_t *dest_cr,
01726                                      uint8_t **ref_picture,
01727                                      h264_chroma_mc_func *pix_op,
01728                                      int mx, int my){
01729     const int lowres= s->avctx->lowres;
01730     const int op_index= FFMIN(lowres, 2);
01731     const int block_s= 8>>lowres;
01732     const int s_mask= (2<<lowres)-1;
01733     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01734     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01735     int emu=0, src_x, src_y, offset, sx, sy;
01736     uint8_t *ptr;
01737 
01738     if(s->quarter_sample){
01739         mx/=2;
01740         my/=2;
01741     }
01742 
01743     /* In case of 8X8, we construct a single chroma motion vector
01744        with a special rounding */
01745     mx= ff_h263_round_chroma(mx);
01746     my= ff_h263_round_chroma(my);
01747 
01748     sx= mx & s_mask;
01749     sy= my & s_mask;
01750     src_x = s->mb_x*block_s + (mx >> (lowres+1));
01751     src_y = s->mb_y*block_s + (my >> (lowres+1));
01752 
01753     offset = src_y * s->uvlinesize + src_x;
01754     ptr = ref_picture[1] + offset;
01755     if(s->flags&CODEC_FLAG_EMU_EDGE){
01756         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01757            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01758             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01759             ptr= s->edge_emu_buffer;
01760             emu=1;
01761         }
01762     }
01763     sx= (sx << 2) >> lowres;
01764     sy= (sy << 2) >> lowres;
01765     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01766 
01767     ptr = ref_picture[2] + offset;
01768     if(emu){
01769         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01770         ptr= s->edge_emu_buffer;
01771     }
01772     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01773 }
01774 
01786 static inline void MPV_motion_lowres(MpegEncContext *s,
01787                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01788                               int dir, uint8_t **ref_picture,
01789                               h264_chroma_mc_func *pix_op)
01790 {
01791     int mx, my;
01792     int mb_x, mb_y, i;
01793     const int lowres= s->avctx->lowres;
01794     const int block_s= 8>>lowres;
01795 
01796     mb_x = s->mb_x;
01797     mb_y = s->mb_y;
01798 
01799     switch(s->mv_type) {
01800     case MV_TYPE_16X16:
01801         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01802                     0, 0, 0,
01803                     ref_picture, pix_op,
01804                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01805         break;
01806     case MV_TYPE_8X8:
01807         mx = 0;
01808         my = 0;
01809             for(i=0;i<4;i++) {
01810                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01811                             ref_picture[0], 0, 0,
01812                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01813                             s->width, s->height, s->linesize,
01814                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01815                             block_s, block_s, pix_op,
01816                             s->mv[dir][i][0], s->mv[dir][i][1]);
01817 
01818                 mx += s->mv[dir][i][0];
01819                 my += s->mv[dir][i][1];
01820             }
01821 
01822         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01823             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01824         break;
01825     case MV_TYPE_FIELD:
01826         if (s->picture_structure == PICT_FRAME) {
01827             /* top field */
01828             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01829                         1, 0, s->field_select[dir][0],
01830                         ref_picture, pix_op,
01831                         s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01832             /* bottom field */
01833             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01834                         1, 1, s->field_select[dir][1],
01835                         ref_picture, pix_op,
01836                         s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01837         } else {
01838             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
01839                 ref_picture= s->current_picture_ptr->data;
01840             }
01841 
01842             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01843                         0, 0, s->field_select[dir][0],
01844                         ref_picture, pix_op,
01845                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01846         }
01847         break;
01848     case MV_TYPE_16X8:
01849         for(i=0; i<2; i++){
01850             uint8_t ** ref2picture;
01851 
01852             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
01853                 ref2picture= ref_picture;
01854             }else{
01855                 ref2picture= s->current_picture_ptr->data;
01856             }
01857 
01858             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01859                         0, 0, s->field_select[dir][i],
01860                         ref2picture, pix_op,
01861                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01862 
01863             dest_y += 2*block_s*s->linesize;
01864             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01865             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01866         }
01867         break;
01868     case MV_TYPE_DMV:
01869         if(s->picture_structure == PICT_FRAME){
01870             for(i=0; i<2; i++){
01871                 int j;
01872                 for(j=0; j<2; j++){
01873                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01874                                 1, j, j^i,
01875                                 ref_picture, pix_op,
01876                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
01877                 }
01878                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01879             }
01880         }else{
01881             for(i=0; i<2; i++){
01882                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01883                             0, 0, s->picture_structure != i+1,
01884                             ref_picture, pix_op,
01885                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
01886 
01887                 // after put we make avg of the same block
01888                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01889 
01890                 //opposite parity is always in the same frame if this is second field
01891                 if(!s->first_field){
01892                     ref_picture = s->current_picture_ptr->data;
01893                 }
01894             }
01895         }
01896     break;
01897     default: assert(0);
01898     }
01899 }
01900 
01904 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
01905 {
01906     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
01907     int my, off, i, mvs;
01908 
01909     if (s->picture_structure != PICT_FRAME) goto unhandled;
01910 
01911     switch (s->mv_type) {
01912         case MV_TYPE_16X16:
01913             mvs = 1;
01914             break;
01915         case MV_TYPE_16X8:
01916             mvs = 2;
01917             break;
01918         case MV_TYPE_8X8:
01919             mvs = 4;
01920             break;
01921         default:
01922             goto unhandled;
01923     }
01924 
01925     for (i = 0; i < mvs; i++) {
01926         my = s->mv[dir][i][1]<<qpel_shift;
01927         my_max = FFMAX(my_max, my);
01928         my_min = FFMIN(my_min, my);
01929     }
01930 
01931     off = (FFMAX(-my_min, my_max) + 63) >> 6;
01932 
01933     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
01934 unhandled:
01935     return s->mb_height-1;
01936 }
01937 
01938 /* put block[] to dest[] */
01939 static inline void put_dct(MpegEncContext *s,
01940                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01941 {
01942     s->dct_unquantize_intra(s, block, i, qscale);
01943     s->dsp.idct_put (dest, line_size, block);
01944 }
01945 
01946 /* add block[] to dest[] */
01947 static inline void add_dct(MpegEncContext *s,
01948                            DCTELEM *block, int i, uint8_t *dest, int line_size)
01949 {
01950     if (s->block_last_index[i] >= 0) {
01951         s->dsp.idct_add (dest, line_size, block);
01952     }
01953 }
01954 
01955 static inline void add_dequant_dct(MpegEncContext *s,
01956                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01957 {
01958     if (s->block_last_index[i] >= 0) {
01959         s->dct_unquantize_inter(s, block, i, qscale);
01960 
01961         s->dsp.idct_add (dest, line_size, block);
01962     }
01963 }
01964 
01968 void ff_clean_intra_table_entries(MpegEncContext *s)
01969 {
01970     int wrap = s->b8_stride;
01971     int xy = s->block_index[0];
01972 
01973     s->dc_val[0][xy           ] =
01974     s->dc_val[0][xy + 1       ] =
01975     s->dc_val[0][xy     + wrap] =
01976     s->dc_val[0][xy + 1 + wrap] = 1024;
01977     /* ac pred */
01978     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
01979     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01980     if (s->msmpeg4_version>=3) {
01981         s->coded_block[xy           ] =
01982         s->coded_block[xy + 1       ] =
01983         s->coded_block[xy     + wrap] =
01984         s->coded_block[xy + 1 + wrap] = 0;
01985     }
01986     /* chroma */
01987     wrap = s->mb_stride;
01988     xy = s->mb_x + s->mb_y * wrap;
01989     s->dc_val[1][xy] =
01990     s->dc_val[2][xy] = 1024;
01991     /* ac pred */
01992     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
01993     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
01994 
01995     s->mbintra_table[xy]= 0;
01996 }
01997 
01998 /* generic function called after a macroblock has been parsed by the
01999    decoder or after it has been encoded by the encoder.
02000 
02001    Important variables used:
02002    s->mb_intra : true if intra macroblock
02003    s->mv_dir   : motion vector direction
02004    s->mv_type  : motion vector type
02005    s->mv       : motion vector
02006    s->interlaced_dct : true if interlaced dct used (mpeg2)
02007  */
02008 static av_always_inline
02009 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02010                             int lowres_flag, int is_mpeg12)
02011 {
02012     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02013     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02014         ff_xvmc_decode_mb(s);//xvmc uses pblocks
02015         return;
02016     }
02017 
02018     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02019        /* save DCT coefficients */
02020        int i,j;
02021        DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
02022        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02023        for(i=0; i<6; i++){
02024            for(j=0; j<64; j++){
02025                *dct++ = block[i][s->dsp.idct_permutation[j]];
02026                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02027            }
02028            av_log(s->avctx, AV_LOG_DEBUG, "\n");
02029        }
02030     }
02031 
02032     s->current_picture.qscale_table[mb_xy]= s->qscale;
02033 
02034     /* update DC predictors for P macroblocks */
02035     if (!s->mb_intra) {
02036         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02037             if(s->mbintra_table[mb_xy])
02038                 ff_clean_intra_table_entries(s);
02039         } else {
02040             s->last_dc[0] =
02041             s->last_dc[1] =
02042             s->last_dc[2] = 128 << s->intra_dc_precision;
02043         }
02044     }
02045     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02046         s->mbintra_table[mb_xy]=1;
02047 
02048     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
02049         uint8_t *dest_y, *dest_cb, *dest_cr;
02050         int dct_linesize, dct_offset;
02051         op_pixels_func (*op_pix)[4];
02052         qpel_mc_func (*op_qpix)[16];
02053         const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
02054         const int uvlinesize= s->current_picture.linesize[1];
02055         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02056         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02057 
02058         /* avoid copy if macroblock skipped in last frame too */
02059         /* skip only during decoding as we might trash the buffers during encoding a bit */
02060         if(!s->encoding){
02061             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02062             const int age= s->current_picture.age;
02063 
02064             assert(age);
02065 
02066             if (s->mb_skipped) {
02067                 s->mb_skipped= 0;
02068                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02069 
02070                 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
02071                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02072 
02073                 /* if previous was skipped too, then nothing to do !  */
02074                 if (*mbskip_ptr >= age && s->current_picture.reference){
02075                     return;
02076                 }
02077             } else if(!s->current_picture.reference){
02078                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
02079                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02080             } else{
02081                 *mbskip_ptr = 0; /* not skipped */
02082             }
02083         }
02084 
02085         dct_linesize = linesize << s->interlaced_dct;
02086         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
02087 
02088         if(readable){
02089             dest_y=  s->dest[0];
02090             dest_cb= s->dest[1];
02091             dest_cr= s->dest[2];
02092         }else{
02093             dest_y = s->b_scratchpad;
02094             dest_cb= s->b_scratchpad+16*linesize;
02095             dest_cr= s->b_scratchpad+32*linesize;
02096         }
02097 
02098         if (!s->mb_intra) {
02099             /* motion handling */
02100             /* decoding or more than one mb_type (MC was already done otherwise) */
02101             if(!s->encoding){
02102 
02103                 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02104                     if (s->mv_dir & MV_DIR_FORWARD) {
02105                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02106                     }
02107                     if (s->mv_dir & MV_DIR_BACKWARD) {
02108                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02109                     }
02110                 }
02111 
02112                 if(lowres_flag){
02113                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02114 
02115                     if (s->mv_dir & MV_DIR_FORWARD) {
02116                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
02117                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02118                     }
02119                     if (s->mv_dir & MV_DIR_BACKWARD) {
02120                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
02121                     }
02122                 }else{
02123                     op_qpix= s->me.qpel_put;
02124                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02125                         op_pix = s->dsp.put_pixels_tab;
02126                     }else{
02127                         op_pix = s->dsp.put_no_rnd_pixels_tab;
02128                     }
02129                     if (s->mv_dir & MV_DIR_FORWARD) {
02130                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
02131                         op_pix = s->dsp.avg_pixels_tab;
02132                         op_qpix= s->me.qpel_avg;
02133                     }
02134                     if (s->mv_dir & MV_DIR_BACKWARD) {
02135                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
02136                     }
02137                 }
02138             }
02139 
02140             /* skip dequant / idct if we are really late ;) */
02141             if(s->avctx->skip_idct){
02142                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02143                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02144                    || s->avctx->skip_idct >= AVDISCARD_ALL)
02145                     goto skip_idct;
02146             }
02147 
02148             /* add dct residue */
02149             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02150                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02151                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02152                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02153                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02154                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02155 
02156                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02157                     if (s->chroma_y_shift){
02158                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02159                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02160                     }else{
02161                         dct_linesize >>= 1;
02162                         dct_offset >>=1;
02163                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02164                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02165                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02166                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02167                     }
02168                 }
02169             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02170                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
02171                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
02172                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
02173                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02174 
02175                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02176                     if(s->chroma_y_shift){//Chroma420
02177                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
02178                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
02179                     }else{
02180                         //chroma422
02181                         dct_linesize = uvlinesize << s->interlaced_dct;
02182                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02183 
02184                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
02185                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
02186                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02187                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02188                         if(!s->chroma_x_shift){//Chroma444
02189                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02190                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02191                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02192                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02193                         }
02194                     }
02195                 }//fi gray
02196             }
02197             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02198                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02199             }
02200         } else {
02201             /* dct only in intra block */
02202             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02203                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02204                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02205                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02206                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02207 
02208                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02209                     if(s->chroma_y_shift){
02210                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02211                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02212                     }else{
02213                         dct_offset >>=1;
02214                         dct_linesize >>=1;
02215                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02216                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02217                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02218                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02219                     }
02220                 }
02221             }else{
02222                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02223                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02224                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02225                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02226 
02227                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02228                     if(s->chroma_y_shift){
02229                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02230                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02231                     }else{
02232 
02233                         dct_linesize = uvlinesize << s->interlaced_dct;
02234                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02235 
02236                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02237                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02238                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02239                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02240                         if(!s->chroma_x_shift){//Chroma444
02241                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
02242                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
02243                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02244                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02245                         }
02246                     }
02247                 }//gray
02248             }
02249         }
02250 skip_idct:
02251         if(!readable){
02252             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02253             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02254             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02255         }
02256     }
02257 }
02258 
02259 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02260 #if !CONFIG_SMALL
02261     if(s->out_format == FMT_MPEG1) {
02262         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02263         else                 MPV_decode_mb_internal(s, block, 0, 1);
02264     } else
02265 #endif
02266     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02267     else                  MPV_decode_mb_internal(s, block, 0, 0);
02268 }
02269 
02274 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02275     const int field_pic= s->picture_structure != PICT_FRAME;
02276     if(field_pic){
02277         h <<= 1;
02278         y <<= 1;
02279     }
02280 
02281     if (!s->avctx->hwaccel
02282        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02283        && s->unrestricted_mv
02284        && s->current_picture.reference
02285        && !s->intra_only
02286        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02287         int sides = 0, edge_h;
02288         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02289         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02290         if (y==0) sides |= EDGE_TOP;
02291         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02292 
02293         edge_h= FFMIN(h, s->v_edge_pos - y);
02294 
02295         s->dsp.draw_edges(s->current_picture_ptr->data[0] +  y         *s->linesize,
02296                           s->linesize,           s->h_edge_pos,         edge_h,
02297                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
02298         s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize,
02299                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02300                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02301         s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize,
02302                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02303                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02304     }
02305 
02306     h= FFMIN(h, s->avctx->height - y);
02307 
02308     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02309 
02310     if (s->avctx->draw_horiz_band) {
02311         AVFrame *src;
02312         int offset[4];
02313 
02314         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02315             src= (AVFrame*)s->current_picture_ptr;
02316         else if(s->last_picture_ptr)
02317             src= (AVFrame*)s->last_picture_ptr;
02318         else
02319             return;
02320 
02321         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02322             offset[0]=
02323             offset[1]=
02324             offset[2]=
02325             offset[3]= 0;
02326         }else{
02327             offset[0]= y * s->linesize;
02328             offset[1]=
02329             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02330             offset[3]= 0;
02331         }
02332 
02333         emms_c();
02334 
02335         s->avctx->draw_horiz_band(s->avctx, src, offset,
02336                                   y, s->picture_structure, h);
02337     }
02338 }
02339 
02340 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
02341     const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
02342     const int uvlinesize= s->current_picture.linesize[1];
02343     const int mb_size= 4 - s->avctx->lowres;
02344 
02345     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02346     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02347     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02348     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02349     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02350     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02351     //block_index is not used by mpeg2, so it is not affected by chroma_format
02352 
02353     s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02354     s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02355     s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02356 
02357     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02358     {
02359         if(s->picture_structure==PICT_FRAME){
02360         s->dest[0] += s->mb_y *   linesize << mb_size;
02361         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02362         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02363         }else{
02364             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02365             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02366             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02367             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02368         }
02369     }
02370 }
02371 
02372 void ff_mpeg_flush(AVCodecContext *avctx){
02373     int i;
02374     MpegEncContext *s = avctx->priv_data;
02375 
02376     if(s==NULL || s->picture==NULL)
02377         return;
02378 
02379     for(i=0; i<s->picture_count; i++){
02380        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02381                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
02382         free_frame_buffer(s, &s->picture[i]);
02383     }
02384     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02385 
02386     s->mb_x= s->mb_y= 0;
02387     s->closed_gop= 0;
02388 
02389     s->parse_context.state= -1;
02390     s->parse_context.frame_start_found= 0;
02391     s->parse_context.overread= 0;
02392     s->parse_context.overread_index= 0;
02393     s->parse_context.index= 0;
02394     s->parse_context.last_index= 0;
02395     s->bitstream_buffer_size=0;
02396     s->pp_time=0;
02397 }
02398 
02399 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02400                                    DCTELEM *block, int n, int qscale)
02401 {
02402     int i, level, nCoeffs;
02403     const uint16_t *quant_matrix;
02404 
02405     nCoeffs= s->block_last_index[n];
02406 
02407     if (n < 4)
02408         block[0] = block[0] * s->y_dc_scale;
02409     else
02410         block[0] = block[0] * s->c_dc_scale;
02411     /* XXX: only mpeg1 */
02412     quant_matrix = s->intra_matrix;
02413     for(i=1;i<=nCoeffs;i++) {
02414         int j= s->intra_scantable.permutated[i];
02415         level = block[j];
02416         if (level) {
02417             if (level < 0) {
02418                 level = -level;
02419                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02420                 level = (level - 1) | 1;
02421                 level = -level;
02422             } else {
02423                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02424                 level = (level - 1) | 1;
02425             }
02426             block[j] = level;
02427         }
02428     }
02429 }
02430 
02431 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02432                                    DCTELEM *block, int n, int qscale)
02433 {
02434     int i, level, nCoeffs;
02435     const uint16_t *quant_matrix;
02436 
02437     nCoeffs= s->block_last_index[n];
02438 
02439     quant_matrix = s->inter_matrix;
02440     for(i=0; i<=nCoeffs; i++) {
02441         int j= s->intra_scantable.permutated[i];
02442         level = block[j];
02443         if (level) {
02444             if (level < 0) {
02445                 level = -level;
02446                 level = (((level << 1) + 1) * qscale *
02447                          ((int) (quant_matrix[j]))) >> 4;
02448                 level = (level - 1) | 1;
02449                 level = -level;
02450             } else {
02451                 level = (((level << 1) + 1) * qscale *
02452                          ((int) (quant_matrix[j]))) >> 4;
02453                 level = (level - 1) | 1;
02454             }
02455             block[j] = level;
02456         }
02457     }
02458 }
02459 
02460 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02461                                    DCTELEM *block, int n, int qscale)
02462 {
02463     int i, level, nCoeffs;
02464     const uint16_t *quant_matrix;
02465 
02466     if(s->alternate_scan) nCoeffs= 63;
02467     else nCoeffs= s->block_last_index[n];
02468 
02469     if (n < 4)
02470         block[0] = block[0] * s->y_dc_scale;
02471     else
02472         block[0] = block[0] * s->c_dc_scale;
02473     quant_matrix = s->intra_matrix;
02474     for(i=1;i<=nCoeffs;i++) {
02475         int j= s->intra_scantable.permutated[i];
02476         level = block[j];
02477         if (level) {
02478             if (level < 0) {
02479                 level = -level;
02480                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02481                 level = -level;
02482             } else {
02483                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02484             }
02485             block[j] = level;
02486         }
02487     }
02488 }
02489 
02490 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02491                                    DCTELEM *block, int n, int qscale)
02492 {
02493     int i, level, nCoeffs;
02494     const uint16_t *quant_matrix;
02495     int sum=-1;
02496 
02497     if(s->alternate_scan) nCoeffs= 63;
02498     else nCoeffs= s->block_last_index[n];
02499 
02500     if (n < 4)
02501         block[0] = block[0] * s->y_dc_scale;
02502     else
02503         block[0] = block[0] * s->c_dc_scale;
02504     quant_matrix = s->intra_matrix;
02505     for(i=1;i<=nCoeffs;i++) {
02506         int j= s->intra_scantable.permutated[i];
02507         level = block[j];
02508         if (level) {
02509             if (level < 0) {
02510                 level = -level;
02511                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02512                 level = -level;
02513             } else {
02514                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02515             }
02516             block[j] = level;
02517             sum+=level;
02518         }
02519     }
02520     block[63]^=sum&1;
02521 }
02522 
02523 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02524                                    DCTELEM *block, int n, int qscale)
02525 {
02526     int i, level, nCoeffs;
02527     const uint16_t *quant_matrix;
02528     int sum=-1;
02529 
02530     if(s->alternate_scan) nCoeffs= 63;
02531     else nCoeffs= s->block_last_index[n];
02532 
02533     quant_matrix = s->inter_matrix;
02534     for(i=0; i<=nCoeffs; i++) {
02535         int j= s->intra_scantable.permutated[i];
02536         level = block[j];
02537         if (level) {
02538             if (level < 0) {
02539                 level = -level;
02540                 level = (((level << 1) + 1) * qscale *
02541                          ((int) (quant_matrix[j]))) >> 4;
02542                 level = -level;
02543             } else {
02544                 level = (((level << 1) + 1) * qscale *
02545                          ((int) (quant_matrix[j]))) >> 4;
02546             }
02547             block[j] = level;
02548             sum+=level;
02549         }
02550     }
02551     block[63]^=sum&1;
02552 }
02553 
02554 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02555                                   DCTELEM *block, int n, int qscale)
02556 {
02557     int i, level, qmul, qadd;
02558     int nCoeffs;
02559 
02560     assert(s->block_last_index[n]>=0);
02561 
02562     qmul = qscale << 1;
02563 
02564     if (!s->h263_aic) {
02565         if (n < 4)
02566             block[0] = block[0] * s->y_dc_scale;
02567         else
02568             block[0] = block[0] * s->c_dc_scale;
02569         qadd = (qscale - 1) | 1;
02570     }else{
02571         qadd = 0;
02572     }
02573     if(s->ac_pred)
02574         nCoeffs=63;
02575     else
02576         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02577 
02578     for(i=1; i<=nCoeffs; i++) {
02579         level = block[i];
02580         if (level) {
02581             if (level < 0) {
02582                 level = level * qmul - qadd;
02583             } else {
02584                 level = level * qmul + qadd;
02585             }
02586             block[i] = level;
02587         }
02588     }
02589 }
02590 
02591 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02592                                   DCTELEM *block, int n, int qscale)
02593 {
02594     int i, level, qmul, qadd;
02595     int nCoeffs;
02596 
02597     assert(s->block_last_index[n]>=0);
02598 
02599     qadd = (qscale - 1) | 1;
02600     qmul = qscale << 1;
02601 
02602     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02603 
02604     for(i=0; i<=nCoeffs; i++) {
02605         level = block[i];
02606         if (level) {
02607             if (level < 0) {
02608                 level = level * qmul - qadd;
02609             } else {
02610                 level = level * qmul + qadd;
02611             }
02612             block[i] = level;
02613         }
02614     }
02615 }
02616 
02620 void ff_set_qscale(MpegEncContext * s, int qscale)
02621 {
02622     if (qscale < 1)
02623         qscale = 1;
02624     else if (qscale > 31)
02625         qscale = 31;
02626 
02627     s->qscale = qscale;
02628     s->chroma_qscale= s->chroma_qscale_table[qscale];
02629 
02630     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02631     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02632 }
02633 
02634 void MPV_report_decode_progress(MpegEncContext *s)
02635 {
02636     if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
02637         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02638 }