Go to the documentation of this file.
48 case VDP_STATUS_NO_IMPLEMENTATION:
50 case VDP_STATUS_DISPLAY_PREEMPTED:
52 case VDP_STATUS_INVALID_HANDLE:
54 case VDP_STATUS_INVALID_POINTER:
56 case VDP_STATUS_RESOURCES:
58 case VDP_STATUS_HANDLE_DEVICE_MISMATCH:
60 case VDP_STATUS_ERROR:
79 uint32_t
w = avctx->coded_width;
80 uint32_t
h = avctx->coded_height;
83 switch (avctx->sw_pix_fmt) {
86 t = VDP_CHROMA_TYPE_420;
92 t = VDP_CHROMA_TYPE_422;
98 t = VDP_CHROMA_TYPE_444;
138 VdpVideoSurfaceQueryCapabilities *surface_query_caps;
139 VdpDecoderQueryCapabilities *decoder_query_caps;
141 VdpGetInformationString *
info;
142 const char *info_string;
146 uint32_t max_level, max_mb, max_width, max_height;
152 vdctx->
width = UINT32_MAX;
153 vdctx->
height = UINT32_MAX;
164 vdctx->
device = VDP_INVALID_HANDLE;
175 type != VDP_CHROMA_TYPE_420)
199 VDP_FUNC_ID_GET_INFORMATION_STRING,
201 if (
status != VDP_STATUS_OK)
207 if (
status != VDP_STATUS_OK)
211 int driver_version = 0;
212 sscanf(info_string,
"NVIDIA VDPAU Driver Shared Library %d", &driver_version);
213 if (driver_version < 410) {
220 VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES,
222 if (
status != VDP_STATUS_OK)
225 surface_query_caps =
func;
228 &max_width, &max_height);
229 if (
status != VDP_STATUS_OK)
231 if (supported != VDP_TRUE ||
236 VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES,
238 if (
status != VDP_STATUS_OK)
241 decoder_query_caps =
func;
244 &max_mb, &max_width, &max_height);
245 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
246 if ((
status != VDP_STATUS_OK || supported != VDP_TRUE) &&
profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) {
247 profile = VDP_DECODER_PROFILE_H264_MAIN;
250 &max_width, &max_height);
253 if (
status != VDP_STATUS_OK)
256 if (supported != VDP_TRUE || max_level <
level ||
262 if (
status != VDP_STATUS_OK)
269 if (
status != VDP_STATUS_OK)
276 if (
status == VDP_STATUS_OK) {
291 if (vdctx->
device == VDP_INVALID_HANDLE)
293 if (vdctx->
width == UINT32_MAX && vdctx->
height == UINT32_MAX)
297 VDP_FUNC_ID_DECODER_DESTROY, &
func);
298 if (
status != VDP_STATUS_OK)
312 if (vdctx->
device == VDP_INVALID_HANDLE)
358 #if CONFIG_MPEG1_VDPAU_HWACCEL || \
359 CONFIG_MPEG2_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL || \
360 CONFIG_VC1_VDPAU_HWACCEL || CONFIG_WMV3_VDPAU_HWACCEL
364 Picture *pic =
s->current_picture_ptr;
390 buffers->struct_version = VDP_BITSTREAM_BUFFER_VERSION;
391 buffers->bitstream =
buf;
392 buffers->bitstream_bytes =
size;
396 #if FF_API_VDPAU_PROFILE
399 #define PROFILE(prof) \
401 *profile = VDP_DECODER_PROFILE_##prof; \
411 default:
return AVERROR(EINVAL);
418 default:
return AVERROR(EINVAL);
426 #ifdef VDP_DECODER_PROFILE_H264_EXTENDED
429 default:
return AVERROR(EINVAL);
437 default:
return AVERROR(EINVAL);
451 VdpGetProcAddress *get_proc,
unsigned flags)
463 memset(hwctx, 0,
sizeof(*hwctx));
int(* func)(AVBPrint *dst, const char *in, const char *arg)
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
void * hwaccel_context
Hardware accelerator context.
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
#define FF_PROFILE_MPEG2_SIMPLE
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void destroy(struct ResampleContext **c)
#define FF_PROFILE_H264_INTRA
#define FF_PROFILE_MPEG4_SIMPLE
uint8_t * data
The data buffer.
#define FF_PROFILE_H264_BASELINE
static int ff_vdpau_common_reinit(AVCodecContext *avctx)
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
This structure describes decoded (raw) audio or video data.
VdpGetProcAddress * get_proc_address
int bitstream_buffers_used
Useful bitstream buffers in the bitstream buffers table.
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
VdpGetProcAddress * get_proc_address
This struct is allocated as AVHWDeviceContext.hwctx.
#define AV_LOG_VERBOSE
Detailed information.
int width
The allocated dimensions of the frames in this pool.
#define FF_PROFILE_MPEG2_MAIN
VdpDecoderRender * render
VDPAU decoder render callback.
#define AV_HWACCEL_FLAG_IGNORE_LEVEL
Hardware acceleration should be used for decoding even if the codec level used is unknown or higher t...
This structure is used to share data between the libavcodec library and the client video application.
#define MAKE_ACCESSORS(str, name, type, field)
int refs
number of reference frames
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
#define FF_PROFILE_H264_HIGH
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
#define FF_PROFILE_H264_EXTENDED
int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, VdpGetProcAddress *get_proc, unsigned flags)
Associate a VDPAU device with a codec context for hardware acceleration.
#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH
Hardware acceleration can output YUV pixel formats with a different chroma sampling than 4:2:0 and/or...
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
void * hwaccel_picture_private
Hardware accelerator private data.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
#define FF_PROFILE_VC1_SIMPLE
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
static struct ResampleContext * create(struct ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta, double precision, int cheby, int exact_rational)
struct AVCodecInternal * internal
Private context used for internal data.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define FF_PROFILE_VC1_MAIN
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH
Hardware acceleration should still be attempted for decoding when the codec profile does not match th...
VdpDecoderRender * render
VDPAU decoder render callback.
int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, uint32_t *width, uint32_t *height)
Gets the parameters to create an adequate VDPAU video surface for the codec context using VDPAU hardw...
VdpDevice device
VDPAU device handle.
int ff_vdpau_common_uninit(AVCodecContext *avctx)
VdpDecoder decoder
VDPAU decoder handle.
void * hwaccel_priv_data
hwaccel-specific private data
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
VdpDecoder decoder
VDPAU decoder handle.
const char const char void * val
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
int hwaccel_flags
Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated decoding (if active).
#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE
VdpBitstreamBuffer * bitstream_buffers
Table of bitstream buffers.
AVVDPAUContext * av_vdpau_alloc_context(void)
Allocate an AVVDPAUContext.
int(* AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, const VdpPictureInfo *, uint32_t, const VdpBitstreamBuffer *)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
This struct describes a set or pool of "hardware" frames (i.e.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
VdpGetProcAddress * get_proc_address
VDPAU device driver.
int bitstream_buffers_allocated
Allocated size of the bitstream_buffers table.
main external API structure.
union VDPAUPictureInfo info
VDPAU picture information.
#define FF_PROFILE_VC1_ADVANCED
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
Get a decoder profile that should be used for initializing a VDPAU decoder.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
#define FF_PROFILE_H264_MAIN
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int vdpau_error(VdpStatus status)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
A reference to a data buffer.
AVVDPAUContext * av_alloc_vdpaucontext(void)
allocation function for AVVDPAUContext
#define flags(name, subs,...)
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding