Go to the documentation of this file.
24 #include <vdpau/vdpau.h>
36 Picture *pic =
s->current_picture_ptr;
43 info->forward_reference = VDP_INVALID_HANDLE;
44 info->backward_reference = VDP_INVALID_HANDLE;
46 switch (
s->pict_type) {
49 assert(
ref != VDP_INVALID_HANDLE);
57 info->slice_count = 0;
58 info->picture_structure =
s->picture_structure;
59 info->picture_coding_type =
s->pict_type;
60 info->intra_dc_precision =
s->intra_dc_precision;
61 info->frame_pred_frame_dct =
s->frame_pred_frame_dct;
62 info->concealment_motion_vectors =
s->concealment_motion_vectors;
63 info->intra_vlc_format =
s->intra_vlc_format;
64 info->alternate_scan =
s->alternate_scan;
65 info->q_scale_type =
s->q_scale_type;
66 info->top_field_first =
s->top_field_first;
68 info->full_pel_forward_vector =
s->full_pel[0];
69 info->full_pel_backward_vector =
s->full_pel[1];
71 info->f_code[0][0] =
s->mpeg_f_code[0][0];
72 info->f_code[0][1] =
s->mpeg_f_code[0][1];
73 info->f_code[1][0] =
s->mpeg_f_code[1][0];
74 info->f_code[1][1] =
s->mpeg_f_code[1][1];
75 for (
i = 0;
i < 64; ++
i) {
76 info->intra_quantizer_matrix[
i] =
s->intra_matrix[
i];
77 info->non_intra_quantizer_matrix[
i] =
s->inter_matrix[
i];
87 Picture *pic =
s->current_picture_ptr;
99 #if CONFIG_MPEG1_VDPAU_HWACCEL
103 VDP_DECODER_LEVEL_MPEG1_NA);
107 .
name =
"mpeg1_vdpau",
115 .
init = vdpau_mpeg1_init,
122 #if CONFIG_MPEG2_VDPAU_HWACCEL
129 profile = VDP_DECODER_PROFILE_MPEG2_MAIN;
132 profile = VDP_DECODER_PROFILE_MPEG2_SIMPLE;
142 .
name =
"mpeg2_vdpau",
150 .
init = vdpau_mpeg2_init,
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
static av_cold int init(AVCodecContext *avctx)
#define FF_PROFILE_MPEG2_SIMPLE
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int vdpau_mpeg_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
VdpPictureInfoMPEG1Or2 mpeg
#define FF_PROFILE_MPEG2_MAIN
const AVHWAccel ff_mpeg1_vdpau_hwaccel
static double val(void *priv, double ch)
const AVHWAccel ff_mpeg2_vdpau_hwaccel
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
void * hwaccel_picture_private
Hardware accelerator private data.
int ff_vdpau_common_uninit(AVCodecContext *avctx)
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
const char * name
Name of the hardware accelerated codec.
#define i(width, name, range_min, range_max)
#define HWACCEL_CAP_ASYNC_SAFE
main external API structure.
union VDPAUPictureInfo info
VDPAU picture information.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static int ref[MAX_W *MAX_W]
@ AV_PICTURE_TYPE_P
Predicted.
static int vdpau_mpeg_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding