Libav 0.7.1
|
00001 /* 00002 * Video Decode and Presentation API for UNIX (VDPAU) is used for 00003 * HW decode acceleration for MPEG-1/2, MPEG-4 ASP, H.264 and VC-1. 00004 * 00005 * Copyright (c) 2008 NVIDIA 00006 * 00007 * This file is part of Libav. 00008 * 00009 * Libav is free software; you can redistribute it and/or 00010 * modify it under the terms of the GNU Lesser General Public 00011 * License as published by the Free Software Foundation; either 00012 * version 2.1 of the License, or (at your option) any later version. 00013 * 00014 * Libav is distributed in the hope that it will be useful, 00015 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00016 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00017 * Lesser General Public License for more details. 00018 * 00019 * You should have received a copy of the GNU Lesser General Public 00020 * License along with Libav; if not, write to the Free Software 00021 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00022 */ 00023 00024 #include <limits.h> 00025 #include "avcodec.h" 00026 #include "h264.h" 00027 #include "vc1.h" 00028 00029 #undef NDEBUG 00030 #include <assert.h> 00031 00032 #include "vdpau.h" 00033 #include "vdpau_internal.h" 00034 00041 void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) 00042 { 00043 H264Context *h = s->avctx->priv_data; 00044 struct vdpau_render_state *render, *render_ref; 00045 VdpReferenceFrameH264 *rf, *rf2; 00046 Picture *pic; 00047 int i, list, pic_frame_idx; 00048 00049 render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; 00050 assert(render); 00051 00052 rf = &render->info.h264.referenceFrames[0]; 00053 #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames) 00054 00055 for (list = 0; list < 2; ++list) { 00056 Picture **lp = list ? h->long_ref : h->short_ref; 00057 int ls = list ? 16 : h->short_ref_count; 00058 00059 for (i = 0; i < ls; ++i) { 00060 pic = lp[i]; 00061 if (!pic || !pic->reference) 00062 continue; 00063 pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; 00064 00065 render_ref = (struct vdpau_render_state *)pic->data[0]; 00066 assert(render_ref); 00067 00068 rf2 = &render->info.h264.referenceFrames[0]; 00069 while (rf2 != rf) { 00070 if ( 00071 (rf2->surface == render_ref->surface) 00072 && (rf2->is_long_term == pic->long_ref) 00073 && (rf2->frame_idx == pic_frame_idx) 00074 ) 00075 break; 00076 ++rf2; 00077 } 00078 if (rf2 != rf) { 00079 rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; 00080 rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; 00081 continue; 00082 } 00083 00084 if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT]) 00085 continue; 00086 00087 rf->surface = render_ref->surface; 00088 rf->is_long_term = pic->long_ref; 00089 rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; 00090 rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; 00091 rf->field_order_cnt[0] = pic->field_poc[0]; 00092 rf->field_order_cnt[1] = pic->field_poc[1]; 00093 rf->frame_idx = pic_frame_idx; 00094 00095 ++rf; 00096 } 00097 } 00098 00099 for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) { 00100 rf->surface = VDP_INVALID_HANDLE; 00101 rf->is_long_term = 0; 00102 rf->top_is_reference = 0; 00103 rf->bottom_is_reference = 0; 00104 rf->field_order_cnt[0] = 0; 00105 rf->field_order_cnt[1] = 0; 00106 rf->frame_idx = 0; 00107 } 00108 } 00109 00110 void ff_vdpau_add_data_chunk(MpegEncContext *s, 00111 const uint8_t *buf, int buf_size) 00112 { 00113 struct vdpau_render_state *render; 00114 00115 render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; 00116 assert(render); 00117 00118 render->bitstream_buffers= av_fast_realloc( 00119 render->bitstream_buffers, 00120 &render->bitstream_buffers_allocated, 00121 sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1) 00122 ); 00123 00124 render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION; 00125 render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf; 00126 render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size; 00127 render->bitstream_buffers_used++; 00128 } 00129 00130 void ff_vdpau_h264_picture_start(MpegEncContext *s) 00131 { 00132 H264Context *h = s->avctx->priv_data; 00133 struct vdpau_render_state *render; 00134 int i; 00135 00136 render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; 00137 assert(render); 00138 00139 for (i = 0; i < 2; ++i) { 00140 int foc = s->current_picture_ptr->field_poc[i]; 00141 if (foc == INT_MAX) 00142 foc = 0; 00143 render->info.h264.field_order_cnt[i] = foc; 00144 } 00145 00146 render->info.h264.frame_num = h->frame_num; 00147 } 00148 00149 void ff_vdpau_h264_picture_complete(MpegEncContext *s) 00150 { 00151 H264Context *h = s->avctx->priv_data; 00152 struct vdpau_render_state *render; 00153 00154 render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; 00155 assert(render); 00156 00157 render->info.h264.slice_count = h->slice_num; 00158 if (render->info.h264.slice_count < 1) 00159 return; 00160 00161 render->info.h264.is_reference = (s->current_picture_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; 00162 render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME; 00163 render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; 00164 render->info.h264.num_ref_frames = h->sps.ref_frame_count; 00165 render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; 00166 render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; 00167 render->info.h264.weighted_pred_flag = h->pps.weighted_pred; 00168 render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; 00169 render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; 00170 render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; 00171 render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; 00172 render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; 00173 render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; 00174 render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; 00175 render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; 00176 render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; 00177 render->info.h264.pic_order_cnt_type = h->sps.poc_type; 00178 render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; 00179 render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; 00180 render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; 00181 render->info.h264.entropy_coding_mode_flag = h->pps.cabac; 00182 render->info.h264.pic_order_present_flag = h->pps.pic_order_present; 00183 render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; 00184 render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; 00185 memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); 00186 memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); 00187 memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); 00188 00189 ff_draw_horiz_band(s, 0, s->avctx->height); 00190 render->bitstream_buffers_used = 0; 00191 } 00192 00193 void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, 00194 int buf_size, int slice_count) 00195 { 00196 struct vdpau_render_state *render, *last, *next; 00197 int i; 00198 00199 if (!s->current_picture_ptr) return; 00200 00201 render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; 00202 assert(render); 00203 00204 /* fill VdpPictureInfoMPEG1Or2 struct */ 00205 render->info.mpeg.picture_structure = s->picture_structure; 00206 render->info.mpeg.picture_coding_type = s->pict_type; 00207 render->info.mpeg.intra_dc_precision = s->intra_dc_precision; 00208 render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct; 00209 render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors; 00210 render->info.mpeg.intra_vlc_format = s->intra_vlc_format; 00211 render->info.mpeg.alternate_scan = s->alternate_scan; 00212 render->info.mpeg.q_scale_type = s->q_scale_type; 00213 render->info.mpeg.top_field_first = s->top_field_first; 00214 render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 00215 render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 00216 render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. 00217 render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; 00218 render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; 00219 render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1]; 00220 for (i = 0; i < 64; ++i) { 00221 render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i]; 00222 render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; 00223 } 00224 00225 render->info.mpeg.forward_reference = VDP_INVALID_HANDLE; 00226 render->info.mpeg.backward_reference = VDP_INVALID_HANDLE; 00227 00228 switch(s->pict_type){ 00229 case AV_PICTURE_TYPE_B: 00230 next = (struct vdpau_render_state *)s->next_picture.data[0]; 00231 assert(next); 00232 render->info.mpeg.backward_reference = next->surface; 00233 // no return here, going to set forward prediction 00234 case AV_PICTURE_TYPE_P: 00235 last = (struct vdpau_render_state *)s->last_picture.data[0]; 00236 if (!last) // FIXME: Does this test make sense? 00237 last = render; // predict second field from the first 00238 render->info.mpeg.forward_reference = last->surface; 00239 } 00240 00241 ff_vdpau_add_data_chunk(s, buf, buf_size); 00242 00243 render->info.mpeg.slice_count = slice_count; 00244 00245 if (slice_count) 00246 ff_draw_horiz_band(s, 0, s->avctx->height); 00247 render->bitstream_buffers_used = 0; 00248 } 00249 00250 void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, 00251 int buf_size) 00252 { 00253 VC1Context *v = s->avctx->priv_data; 00254 struct vdpau_render_state *render, *last, *next; 00255 00256 render = (struct vdpau_render_state *)s->current_picture.data[0]; 00257 assert(render); 00258 00259 /* fill LvPictureInfoVC1 struct */ 00260 render->info.vc1.frame_coding_mode = v->fcm; 00261 render->info.vc1.postprocflag = v->postprocflag; 00262 render->info.vc1.pulldown = v->broadcast; 00263 render->info.vc1.interlace = v->interlace; 00264 render->info.vc1.tfcntrflag = v->tfcntrflag; 00265 render->info.vc1.finterpflag = v->finterpflag; 00266 render->info.vc1.psf = v->psf; 00267 render->info.vc1.dquant = v->dquant; 00268 render->info.vc1.panscan_flag = v->panscanflag; 00269 render->info.vc1.refdist_flag = v->refdist_flag; 00270 render->info.vc1.quantizer = v->quantizer_mode; 00271 render->info.vc1.extended_mv = v->extended_mv; 00272 render->info.vc1.extended_dmv = v->extended_dmv; 00273 render->info.vc1.overlap = v->overlap; 00274 render->info.vc1.vstransform = v->vstransform; 00275 render->info.vc1.loopfilter = v->s.loop_filter; 00276 render->info.vc1.fastuvmc = v->fastuvmc; 00277 render->info.vc1.range_mapy_flag = v->range_mapy_flag; 00278 render->info.vc1.range_mapy = v->range_mapy; 00279 render->info.vc1.range_mapuv_flag = v->range_mapuv_flag; 00280 render->info.vc1.range_mapuv = v->range_mapuv; 00281 /* Specific to simple/main profile only */ 00282 render->info.vc1.multires = v->multires; 00283 render->info.vc1.syncmarker = v->s.resync_marker; 00284 render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1); 00285 render->info.vc1.maxbframes = v->s.max_b_frames; 00286 00287 render->info.vc1.deblockEnable = v->postprocflag & 1; 00288 render->info.vc1.pquant = v->pq; 00289 00290 render->info.vc1.forward_reference = VDP_INVALID_HANDLE; 00291 render->info.vc1.backward_reference = VDP_INVALID_HANDLE; 00292 00293 if (v->bi_type) 00294 render->info.vc1.picture_type = 4; 00295 else 00296 render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3; 00297 00298 switch(s->pict_type){ 00299 case AV_PICTURE_TYPE_B: 00300 next = (struct vdpau_render_state *)s->next_picture.data[0]; 00301 assert(next); 00302 render->info.vc1.backward_reference = next->surface; 00303 // no break here, going to set forward prediction 00304 case AV_PICTURE_TYPE_P: 00305 last = (struct vdpau_render_state *)s->last_picture.data[0]; 00306 if (!last) // FIXME: Does this test make sense? 00307 last = render; // predict second field from the first 00308 render->info.vc1.forward_reference = last->surface; 00309 } 00310 00311 ff_vdpau_add_data_chunk(s, buf, buf_size); 00312 00313 render->info.vc1.slice_count = 1; 00314 00315 ff_draw_horiz_band(s, 0, s->avctx->height); 00316 render->bitstream_buffers_used = 0; 00317 } 00318 00319 void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, 00320 int buf_size) 00321 { 00322 struct vdpau_render_state *render, *last, *next; 00323 int i; 00324 00325 if (!s->current_picture_ptr) return; 00326 00327 render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; 00328 assert(render); 00329 00330 /* fill VdpPictureInfoMPEG4Part2 struct */ 00331 render->info.mpeg4.trd[0] = s->pp_time; 00332 render->info.mpeg4.trb[0] = s->pb_time; 00333 render->info.mpeg4.trd[1] = s->pp_field_time >> 1; 00334 render->info.mpeg4.trb[1] = s->pb_field_time >> 1; 00335 render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den; 00336 render->info.mpeg4.vop_coding_type = 0; 00337 render->info.mpeg4.vop_fcode_forward = s->f_code; 00338 render->info.mpeg4.vop_fcode_backward = s->b_code; 00339 render->info.mpeg4.resync_marker_disable = !s->resync_marker; 00340 render->info.mpeg4.interlaced = !s->progressive_sequence; 00341 render->info.mpeg4.quant_type = s->mpeg_quant; 00342 render->info.mpeg4.quarter_sample = s->quarter_sample; 00343 render->info.mpeg4.short_video_header = s->avctx->codec->id == CODEC_ID_H263; 00344 render->info.mpeg4.rounding_control = s->no_rounding; 00345 render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan; 00346 render->info.mpeg4.top_field_first = s->top_field_first; 00347 for (i = 0; i < 64; ++i) { 00348 render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i]; 00349 render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; 00350 } 00351 render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE; 00352 render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE; 00353 00354 switch (s->pict_type) { 00355 case AV_PICTURE_TYPE_B: 00356 next = (struct vdpau_render_state *)s->next_picture.data[0]; 00357 assert(next); 00358 render->info.mpeg4.backward_reference = next->surface; 00359 render->info.mpeg4.vop_coding_type = 2; 00360 // no break here, going to set forward prediction 00361 case AV_PICTURE_TYPE_P: 00362 last = (struct vdpau_render_state *)s->last_picture.data[0]; 00363 assert(last); 00364 render->info.mpeg4.forward_reference = last->surface; 00365 } 00366 00367 ff_vdpau_add_data_chunk(s, buf, buf_size); 00368 00369 ff_draw_horiz_band(s, 0, s->avctx->height); 00370 render->bitstream_buffers_used = 0; 00371 } 00372 00373 /* @}*/