FFmpeg
vulkan_vp9.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "vp9dec.h"
20 
21 #include "vulkan_decode.h"
22 
25  .decode_extension = FF_VK_EXT_VIDEO_DECODE_VP9,
26  .queue_flags = VK_QUEUE_VIDEO_DECODE_BIT_KHR,
27  .decode_op = VK_VIDEO_CODEC_OPERATION_DECODE_VP9_BIT_KHR,
28  .ext_props = {
29  .extensionName = VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_EXTENSION_NAME,
30  .specVersion = VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_SPEC_VERSION,
31  },
32 };
33 
34 typedef struct VP9VulkanDecodePicture {
36 
37  /* TODO: investigate if this can be removed to make decoding completely
38  * independent. */
40 
41  /* Current picture */
42  StdVideoVP9ColorConfig color_config;
43  StdVideoVP9Segmentation segmentation;
44  StdVideoVP9LoopFilter loop_filter;
45  StdVideoDecodeVP9PictureInfo std_pic_info;
46  VkVideoDecodeVP9PictureInfoKHR vp9_pic_info;
47 
48  const VP9Frame *ref_src[8];
49 
50  uint8_t frame_id_set;
51  uint8_t frame_id;
54 
55 static int vk_vp9_fill_pict(AVCodecContext *avctx, const VP9Frame **ref_src,
56  VkVideoReferenceSlotInfoKHR *ref_slot, /* Main structure */
57  VkVideoPictureResourceInfoKHR *ref, /* Goes in ^ */
58  const VP9Frame *pic, int is_current)
59 {
63  FFVulkanDecodePicture *vkpic = &hp->vp;
64 
65  int err = ff_vk_decode_prepare_frame(dec, pic->tf.f, vkpic, is_current,
66  dec->dedicated_dpb);
67  if (err < 0)
68  return err;
69 
70  *ref = (VkVideoPictureResourceInfoKHR) {
71  .sType = VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR,
72  .codedOffset = (VkOffset2D){ 0, 0 },
73  .codedExtent = (VkExtent2D){ pic->tf.f->width, pic->tf.f->height },
74  .baseArrayLayer = (dec->dedicated_dpb && ctx->common.layered_dpb) ?
75  hp->frame_id : 0,
76  .imageViewBinding = vkpic->view.ref[0],
77  };
78 
79  *ref_slot = (VkVideoReferenceSlotInfoKHR) {
80  .sType = VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_INFO_KHR,
81  .slotIndex = hp->frame_id,
82  .pPictureResource = ref,
83  };
84 
85  if (ref_src)
86  *ref_src = pic;
87 
88  return 0;
89 }
90 
91 static enum StdVideoVP9InterpolationFilter remap_interp(uint8_t is_filter_switchable,
92  uint8_t raw_interpolation_filter_type)
93 {
94  static const enum StdVideoVP9InterpolationFilter remap[] = {
95  STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH,
96  STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP,
97  STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP_SHARP,
98  STD_VIDEO_VP9_INTERPOLATION_FILTER_BILINEAR,
99  };
100  if (is_filter_switchable)
101  return STD_VIDEO_VP9_INTERPOLATION_FILTER_SWITCHABLE;
102  return remap[raw_interpolation_filter_type];
103 }
104 
106  av_unused const AVBufferRef *buffer_ref,
107  av_unused const uint8_t *buffer,
108  av_unused uint32_t size)
109 {
110  int err;
111  int ref_count = 0;
112  const VP9Context *priv = avctx->priv_data;
113  const CodedBitstreamVP9Context *vp9 = priv->cbc->priv_data;
114  const VP9SharedContext *s = &priv->s;
115  uint32_t frame_id_alloc_mask = 0;
116 
117  const VP9Frame *pic = &s->frames[CUR_FRAME];
119  uint8_t profile = (pic->frame_header->profile_high_bit << 1) | pic->frame_header->profile_low_bit;
120 
122  FFVulkanDecodePicture *vp = &ap->vp;
123 
124  /* Use the current frame_ids in ref_frames[] to decide occupied frame_ids */
125  for (int i = 0; i < STD_VIDEO_VP9_NUM_REF_FRAMES; i++) {
126  const VP9VulkanDecodePicture* rp = s->ref_frames[i].hwaccel_picture_private;
127  if (rp)
128  frame_id_alloc_mask |= 1 << rp->frame_id;
129  }
130 
131  if (!ap->frame_id_set) {
132  unsigned slot_idx = 0;
133  for (unsigned i = 0; i < 32; i++) {
134  if (!(frame_id_alloc_mask & (1 << i))) {
135  slot_idx = i;
136  break;
137  }
138  }
139  ap->frame_id = slot_idx;
140  ap->frame_id_set = 1;
141  frame_id_alloc_mask |= (1 << slot_idx);
142  }
143 
144  for (int i = 0; i < STD_VIDEO_VP9_REFS_PER_FRAME; i++) {
145  const int idx = pic->frame_header->ref_frame_idx[i];
146  const VP9Frame *ref_frame = &s->ref_frames[idx];
147  VP9VulkanDecodePicture *hp = ref_frame->hwaccel_picture_private;
148  int found = 0;
149 
150  if (!ref_frame->tf.f)
151  continue;
152 
153  for (int j = 0; j < ref_count; j++) {
154  if (vp->ref_slots[j].slotIndex == hp->frame_id) {
155  found = 1;
156  break;
157  }
158  }
159  if (found)
160  continue;
161 
162  err = vk_vp9_fill_pict(avctx, &ap->ref_src[ref_count],
163  &vp->ref_slots[ref_count], &vp->refs[ref_count],
164  ref_frame, 0);
165  if (err < 0)
166  return err;
167 
168  ref_count++;
169  }
170 
171  err = vk_vp9_fill_pict(avctx, NULL, &vp->ref_slot, &vp->ref,
172  pic, 1);
173  if (err < 0)
174  return err;
175 
176  ap->loop_filter = (StdVideoVP9LoopFilter) {
177  .flags = (StdVideoVP9LoopFilterFlags) {
178  .loop_filter_delta_enabled = pic->frame_header->loop_filter_delta_enabled,
179  .loop_filter_delta_update = pic->frame_header->loop_filter_delta_update,
180  },
181  .loop_filter_level = pic->frame_header->loop_filter_level,
182  .loop_filter_sharpness = pic->frame_header->loop_filter_sharpness,
183  .update_ref_delta = 0x0,
184  .update_mode_delta = 0x0,
185  };
186 
187  for (int i = 0; i < STD_VIDEO_VP9_MAX_REF_FRAMES; i++) {
188  ap->loop_filter.loop_filter_ref_deltas[i] = vp9->loop_filter_ref_deltas[i];
189  ap->loop_filter.update_ref_delta |= pic->frame_header->update_ref_delta[i];
190  }
191  for (int i = 0; i < STD_VIDEO_VP9_LOOP_FILTER_ADJUSTMENTS; i++) {
192  ap->loop_filter.loop_filter_mode_deltas[i] = vp9->loop_filter_mode_deltas[i];
193  ap->loop_filter.update_mode_delta |= pic->frame_header->update_mode_delta[i];
194  }
195 
196  ap->segmentation = (StdVideoVP9Segmentation) {
197  .flags = (StdVideoVP9SegmentationFlags) {
198  .segmentation_update_map = pic->frame_header->segmentation_update_map,
199  .segmentation_temporal_update = pic->frame_header->segmentation_temporal_update,
200  .segmentation_update_data = pic->frame_header->segmentation_update_data,
201  .segmentation_abs_or_delta_update = pic->frame_header->segmentation_abs_or_delta_update,
202  },
203  };
204 
205  for (int i = 0; i < STD_VIDEO_VP9_MAX_SEGMENTATION_TREE_PROBS; i++)
206  ap->segmentation.segmentation_tree_probs[i] = vp9->segmentation_tree_probs[i];
207  for (int i = 0; i < STD_VIDEO_VP9_MAX_SEGMENTATION_PRED_PROB; i++)
208  ap->segmentation.segmentation_pred_prob[i] = vp9->segmentation_pred_prob[i];
209  for (int i = 0; i < STD_VIDEO_VP9_MAX_SEGMENTS; i++) {
210  ap->segmentation.FeatureEnabled[i] = 0x0;
211  for (int j = 0; j < STD_VIDEO_VP9_SEG_LVL_MAX; j++) {
212  ap->segmentation.FeatureEnabled[i] |= vp9->feature_enabled[i][j] << j;
213  ap->segmentation.FeatureData[i][j] = vp9->feature_sign[i][j] ?
214  -vp9->feature_value[i][j] :
215  +vp9->feature_value[i][j];
216  }
217  }
218 
219  ap->color_config = (StdVideoVP9ColorConfig) {
220  .flags = (StdVideoVP9ColorConfigFlags) {
221  .color_range = pic->frame_header->color_range,
222  },
223  .BitDepth = profile < 2 ? 8 :
224  pic->frame_header->ten_or_twelve_bit ? 12 : 10,
225  .subsampling_x = pic->frame_header->subsampling_x,
226  .subsampling_y = pic->frame_header->subsampling_y,
227  .color_space = pic->frame_header->color_space,
228  };
229 
230  ap->std_pic_info = (StdVideoDecodeVP9PictureInfo) {
231  .flags = (StdVideoDecodeVP9PictureInfoFlags) {
232  .error_resilient_mode = pic->frame_header->error_resilient_mode,
233  .intra_only = pic->frame_header->intra_only,
234  .allow_high_precision_mv = pic->frame_header->allow_high_precision_mv,
235  .refresh_frame_context = pic->frame_header->refresh_frame_context,
236  .frame_parallel_decoding_mode = pic->frame_header->frame_parallel_decoding_mode,
237  .segmentation_enabled = pic->frame_header->segmentation_enabled,
238  .show_frame = pic->frame_header->segmentation_enabled,
239  .UsePrevFrameMvs = s->h.use_last_frame_mvs,
240  },
241  .profile = profile,
242  .frame_type = pic->frame_header->frame_type,
243  .frame_context_idx = pic->frame_header->frame_context_idx,
244  .reset_frame_context = pic->frame_header->reset_frame_context,
245  .refresh_frame_flags = pic->frame_header->refresh_frame_flags,
246  .ref_frame_sign_bias_mask = 0x0,
247  .interpolation_filter = remap_interp(pic->frame_header->is_filter_switchable,
249  .base_q_idx = pic->frame_header->base_q_idx,
250  .delta_q_y_dc = pic->frame_header->delta_q_y_dc,
251  .delta_q_uv_dc = pic->frame_header->delta_q_uv_dc,
252  .delta_q_uv_ac = pic->frame_header->delta_q_uv_ac,
253  .tile_cols_log2 = pic->frame_header->tile_cols_log2,
254  .tile_rows_log2 = pic->frame_header->tile_rows_log2,
255  /* Reserved */
256  .pColorConfig = &ap->color_config,
257  .pLoopFilter = &ap->loop_filter,
258  .pSegmentation = &ap->segmentation,
259  };
260 
261  for (int i = VP9_LAST_FRAME; i <= VP9_ALTREF_FRAME; i++)
262  ap->std_pic_info.ref_frame_sign_bias_mask |= pic->frame_header->ref_frame_sign_bias[i] << i;
263 
264  ap->vp9_pic_info = (VkVideoDecodeVP9PictureInfoKHR) {
265  .sType = VK_STRUCTURE_TYPE_VIDEO_DECODE_VP9_PICTURE_INFO_KHR,
266  .pStdPictureInfo = &ap->std_pic_info,
267  .uncompressedHeaderOffset = 0,
268  .compressedHeaderOffset = s->h.uncompressed_header_size,
269  .tilesOffset = s->h.uncompressed_header_size +
270  s->h.compressed_header_size,
271  };
272 
273  for (int i = 0; i < STD_VIDEO_VP9_REFS_PER_FRAME; i++) {
274  const int idx = pic->frame_header->ref_frame_idx[i];
275  const VP9Frame *ref_frame = &s->ref_frames[idx];
276  VP9VulkanDecodePicture *hp = ref_frame->hwaccel_picture_private;
277 
278  if (!ref_frame->tf.f)
279  ap->vp9_pic_info.referenceNameSlotIndices[i] = -1;
280  else
281  ap->vp9_pic_info.referenceNameSlotIndices[i] = hp->frame_id;
282  }
283 
284  vp->decode_info = (VkVideoDecodeInfoKHR) {
285  .sType = VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR,
286  .pNext = &ap->vp9_pic_info,
287  .flags = 0x0,
288  .pSetupReferenceSlot = &vp->ref_slot,
289  .referenceSlotCount = ref_count,
290  .pReferenceSlots = vp->ref_slots,
291  .dstPictureResource = (VkVideoPictureResourceInfoKHR) {
292  .sType = VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR,
293  .codedOffset = (VkOffset2D){ 0, 0 },
294  .codedExtent = (VkExtent2D){ pic->tf.f->width, pic->tf.f->height },
295  .baseArrayLayer = 0,
296  .imageViewBinding = vp->view.out[0],
297  },
298  };
299 
300  ap->dec = dec;
301 
302  return 0;
303 }
304 
306  const uint8_t *data,
307  uint32_t size)
308 {
309  int err;
310  const VP9SharedContext *s = avctx->priv_data;
311  VP9VulkanDecodePicture *ap = s->frames[CUR_FRAME].hwaccel_picture_private;
312  FFVulkanDecodePicture *vp = &ap->vp;
313 
314  err = ff_vk_decode_add_slice(avctx, vp, data, size, 0, NULL, NULL);
315  if (err < 0)
316  return err;
317 
318  return 0;
319 }
320 
322 {
323  const VP9SharedContext *s = avctx->priv_data;
324 
325  const VP9Frame *pic = &s->frames[CUR_FRAME];
327  FFVulkanDecodePicture *vp = &ap->vp;
328  FFVulkanDecodePicture *rvp[STD_VIDEO_VP9_REFS_PER_FRAME] = { 0 };
329  AVFrame *rav[STD_VIDEO_VP9_REFS_PER_FRAME] = { 0 };
330 
331  for (int i = 0; i < vp->decode_info.referenceSlotCount; i++) {
332  const VP9Frame *rp = ap->ref_src[i];
334 
335  rvp[i] = &rhp->vp;
336  rav[i] = ap->ref_src[i]->tf.f;
337  }
338 
339  av_log(avctx, AV_LOG_VERBOSE, "Decoding frame, %"SIZE_SPECIFIER" bytes\n",
340  vp->slices_size);
341 
342  return ff_vk_decode_frame(avctx, pic->tf.f, vp, rav, rvp);
343 }
344 
346 {
347  AVHWDeviceContext *hwctx = _hwctx.nc;
349 
350  /* Free frame resources, this also destroys the session parameters. */
351  ff_vk_decode_free_frame(hwctx, &ap->vp);
352 }
353 
355  .p.name = "vp9_vulkan",
356  .p.type = AVMEDIA_TYPE_VIDEO,
357  .p.id = AV_CODEC_ID_VP9,
358  .p.pix_fmt = AV_PIX_FMT_VULKAN,
359  .start_frame = &vk_vp9_start_frame,
360  .decode_slice = &vk_vp9_decode_slice,
361  .end_frame = &vk_vp9_end_frame,
362  .free_frame_priv = &vk_vp9_free_frame_priv,
363  .frame_priv_data_size = sizeof(VP9VulkanDecodePicture),
368  .frame_params = &ff_vk_frame_params,
369  .priv_data_size = sizeof(FFVulkanDecodeContext),
370  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
371 };
VP9RawFrameHeader::intra_only
uint8_t intra_only
Definition: cbs_vp9.h:104
FFVulkanDecodePicture::slices_size
size_t slices_size
Definition: vulkan_decode.h:102
VP9VulkanDecodePicture::frame_id_set
uint8_t frame_id_set
Definition: vulkan_vp9.c:50
VP9RawFrameHeader::ref_frame_idx
uint8_t ref_frame_idx[VP9_REFS_PER_FRAME]
Definition: cbs_vp9.h:107
VP9RawFrameHeader::delta_q_uv_dc
int8_t delta_q_uv_dc
Definition: cbs_vp9.h:142
CodedBitstreamContext::priv_data
void * priv_data
Internal codec-specific data.
Definition: cbs.h:247
VP9Frame
Definition: vp9shared.h:66
FFVulkanDecodeContext::shared_ctx
FFVulkanDecodeShared * shared_ctx
Definition: vulkan_decode.h:57
AVRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
VP9VulkanDecodePicture::segmentation
StdVideoVP9Segmentation segmentation
Definition: vulkan_vp9.c:43
VP9RawFrameHeader::tile_rows_log2
uint8_t tile_rows_log2
Definition: cbs_vp9.h:159
AVRefStructOpaque::nc
void * nc
Definition: refstruct.h:59
av_unused
#define av_unused
Definition: attributes.h:131
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFrame::width
int width
Definition: frame.h:499
VP9VulkanDecodePicture::ref_src
const VP9Frame * ref_src[8]
Definition: vulkan_vp9.c:48
VP9RawFrameHeader::profile_high_bit
uint8_t profile_high_bit
Definition: cbs_vp9.h:86
VP9RawFrameHeader::error_resilient_mode
uint8_t error_resilient_mode
Definition: cbs_vp9.h:93
data
const char data[16]
Definition: mxf.c:149
FFVulkanDecodeDescriptor::codec_id
enum AVCodecID codec_id
Definition: vulkan_decode.h:30
VP9RawFrameHeader::frame_context_idx
uint8_t frame_context_idx
Definition: cbs_vp9.h:115
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
VP9Frame::tf
ProgressFrame tf
Definition: vp9shared.h:70
VP9RawFrameHeader::raw_interpolation_filter_type
uint8_t raw_interpolation_filter_type
Definition: cbs_vp9.h:127
CodedBitstreamVP9Context::feature_sign
uint8_t feature_sign[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX]
Definition: cbs_vp9.h:215
VP9RawFrameHeader::subsampling_y
uint8_t subsampling_y
Definition: cbs_vp9.h:100
FFVulkanDecodeContext
Definition: vulkan_decode.h:56
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
ff_vk_decode_prepare_frame
int ff_vk_decode_prepare_frame(FFVulkanDecodeContext *dec, AVFrame *pic, FFVulkanDecodePicture *vkpic, int is_current, int alloc_dpb)
Prepare a frame, creates the image view, and sets up the dpb fields.
Definition: vulkan_decode.c:176
vk_vp9_free_frame_priv
static void vk_vp9_free_frame_priv(AVRefStructOpaque _hwctx, void *data)
Definition: vulkan_vp9.c:345
VP9VulkanDecodePicture
Definition: vulkan_vp9.c:34
FFHWAccel
Definition: hwaccel_internal.h:34
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:689
VP9RawFrameHeader::subsampling_x
uint8_t subsampling_x
Definition: cbs_vp9.h:99
ff_vk_decode_frame
int ff_vk_decode_frame(AVCodecContext *avctx, AVFrame *pic, FFVulkanDecodePicture *vp, AVFrame *rpic[], FFVulkanDecodePicture *rvkp[])
Decode a frame.
Definition: vulkan_decode.c:397
FFVulkanDecodeShared
Definition: vulkan_decode.h:38
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: vp9shared.h:76
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:63
VP9VulkanDecodePicture::vp9_pic_info
VkVideoDecodeVP9PictureInfoKHR vp9_pic_info
Definition: vulkan_vp9.c:46
VP9_LAST_FRAME
@ VP9_LAST_FRAME
Definition: cbs_vp9.h:70
VP9_ALTREF_FRAME
@ VP9_ALTREF_FRAME
Definition: cbs_vp9.h:72
VP9VulkanDecodePicture::color_config
StdVideoVP9ColorConfig color_config
Definition: vulkan_vp9.c:42
VP9VulkanDecodePicture::dec
FFVulkanDecodeContext * dec
Definition: vulkan_vp9.c:39
s
#define s(width, name)
Definition: cbs_vp9.c:198
VP9SharedContext
Definition: vp9shared.h:168
FFVulkanDecodePicture
Definition: vulkan_decode.h:75
VP9Context::s
VP9SharedContext s
Definition: vp9dec.h:98
ff_vk_dec_vp9_desc
const FFVulkanDecodeDescriptor ff_vk_dec_vp9_desc
Definition: vulkan_vp9.c:23
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
remap_interp
static enum StdVideoVP9InterpolationFilter remap_interp(uint8_t is_filter_switchable, uint8_t raw_interpolation_filter_type)
Definition: vulkan_vp9.c:91
vk_vp9_end_frame
static int vk_vp9_end_frame(AVCodecContext *avctx)
Definition: vulkan_vp9.c:321
VP9VulkanDecodePicture::vp
FFVulkanDecodePicture vp
Definition: vulkan_vp9.c:35
VP9VulkanDecodePicture::std_pic_info
StdVideoDecodeVP9PictureInfo std_pic_info
Definition: vulkan_vp9.c:45
ctx
AVFormatContext * ctx
Definition: movenc.c:49
VP9RawFrameHeader::ten_or_twelve_bit
uint8_t ten_or_twelve_bit
Definition: cbs_vp9.h:96
CodedBitstreamVP9Context::segmentation_pred_prob
uint8_t segmentation_pred_prob[3]
Definition: cbs_vp9.h:212
VP9VulkanDecodePicture::loop_filter
StdVideoVP9LoopFilter loop_filter
Definition: vulkan_vp9.c:44
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
Definition: hwaccel_internal.h:31
VP9RawFrameHeader::refresh_frame_context
uint8_t refresh_frame_context
Definition: cbs_vp9.h:112
VP9VulkanDecodePicture::frame_id
uint8_t frame_id
Definition: vulkan_vp9.c:51
NULL
#define NULL
Definition: coverity.c:32
VP9Frame::frame_header
VP9RawFrameHeader * frame_header
Definition: vp9shared.h:68
ff_vk_decode_free_frame
void ff_vk_decode_free_frame(AVHWDeviceContext *dev_ctx, FFVulkanDecodePicture *vp)
Free a frame and its state.
Definition: vulkan_decode.c:603
VP9Context
Definition: vp9dec.h:97
FF_VK_EXT_VIDEO_DECODE_VP9
#define FF_VK_EXT_VIDEO_DECODE_VP9
Definition: vulkan_functions.h:62
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
ff_vk_decode_uninit
int ff_vk_decode_uninit(AVCodecContext *avctx)
Free decoder.
Definition: vulkan_decode.c:1191
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:370
CodedBitstreamVP9Context::loop_filter_mode_deltas
int8_t loop_filter_mode_deltas[2]
Definition: cbs_vp9.h:210
VP9RawFrameHeader::profile_low_bit
uint8_t profile_low_bit
Definition: cbs_vp9.h:85
ff_vk_frame_params
int ff_vk_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Initialize hw_frames_ctx with the parameters needed to decode the stream using the parameters from av...
Definition: vulkan_decode.c:1075
VP9RawFrameHeader::ref_frame_sign_bias
uint8_t ref_frame_sign_bias[VP9_MAX_REF_FRAMES]
Definition: cbs_vp9.h:108
VP9RawFrameHeader::delta_q_y_dc
int8_t delta_q_y_dc
Definition: cbs_vp9.h:141
VP9RawFrameHeader::segmentation_temporal_update
uint8_t segmentation_temporal_update
Definition: cbs_vp9.h:149
VP9RawFrameHeader::segmentation_update_map
uint8_t segmentation_update_map
Definition: cbs_vp9.h:147
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:130
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
FFVulkanDecodePicture::ref
VkImageView ref[AV_NUM_DATA_POINTERS]
Definition: vulkan_decode.h:79
size
int size
Definition: twinvq_data.h:10344
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: dec.c:616
VP9RawFrameHeader::frame_parallel_decoding_mode
uint8_t frame_parallel_decoding_mode
Definition: cbs_vp9.h:113
remap
static const int remap[16]
Definition: msvideo1enc.c:66
CodedBitstreamVP9Context
Definition: cbs_vp9.h:192
VP9RawFrameHeader::is_filter_switchable
uint8_t is_filter_switchable
Definition: cbs_vp9.h:126
FFVulkanDecodePicture::view
struct FFVulkanDecodePicture::@320 view
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:1942
VP9RawFrameHeader::segmentation_abs_or_delta_update
uint8_t segmentation_abs_or_delta_update
Definition: cbs_vp9.h:152
VP9RawFrameHeader::delta_q_uv_ac
int8_t delta_q_uv_ac
Definition: cbs_vp9.h:143
ff_vp9_vulkan_hwaccel
const FFHWAccel ff_vp9_vulkan_hwaccel
Definition: vulkan_vp9.c:354
uninit
static void uninit(AVBSFContext *ctx)
Definition: pcm_rechunk.c:68
VP9RawFrameHeader::color_range
uint8_t color_range
Definition: cbs_vp9.h:98
CodedBitstreamVP9Context::loop_filter_ref_deltas
int8_t loop_filter_ref_deltas[VP9_MAX_REF_FRAMES]
Definition: cbs_vp9.h:209
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
VP9RawFrameHeader::tile_cols_log2
uint8_t tile_cols_log2
Definition: cbs_vp9.h:158
vk_vp9_fill_pict
static int vk_vp9_fill_pict(AVCodecContext *avctx, const VP9Frame **ref_src, VkVideoReferenceSlotInfoKHR *ref_slot, VkVideoPictureResourceInfoKHR *ref, const VP9Frame *pic, int is_current)
Definition: vulkan_vp9.c:55
VP9RawFrameHeader::base_q_idx
uint8_t base_q_idx
Definition: cbs_vp9.h:140
vk_vp9_decode_slice
static int vk_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *data, uint32_t size)
Definition: vulkan_vp9.c:305
VP9RawFrameHeader::reset_frame_context
uint8_t reset_frame_context
Definition: cbs_vp9.h:105
VP9RawFrameHeader::frame_type
uint8_t frame_type
Definition: cbs_vp9.h:91
profile
int profile
Definition: mxfenc.c:2278
ff_vk_decode_flush
void ff_vk_decode_flush(AVCodecContext *avctx)
Flush decoder.
Definition: vulkan_decode.c:361
VP9RawFrameHeader::loop_filter_sharpness
uint8_t loop_filter_sharpness
Definition: cbs_vp9.h:131
ProgressFrame::f
struct AVFrame * f
Definition: progressframe.h:74
ff_vk_decode_add_slice
int ff_vk_decode_add_slice(AVCodecContext *avctx, FFVulkanDecodePicture *vp, const uint8_t *data, size_t size, int add_startcode, uint32_t *nb_slices, const uint32_t **offsets)
Add slice data to frame.
Definition: vulkan_decode.c:280
VP9RawFrameHeader::update_ref_delta
uint8_t update_ref_delta[VP9_MAX_REF_FRAMES]
Definition: cbs_vp9.h:134
SIZE_SPECIFIER
#define SIZE_SPECIFIER
Definition: internal.h:129
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:499
FFVulkanDecodeContext::dedicated_dpb
int dedicated_dpb
Definition: vulkan_decode.h:60
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
CodedBitstreamVP9Context::feature_value
uint8_t feature_value[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX]
Definition: cbs_vp9.h:214
VP9RawFrameHeader::color_space
uint8_t color_space
Definition: cbs_vp9.h:97
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
FFVulkanDecodeDescriptor
Definition: vulkan_decode.h:29
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
VP9Context::cbc
CodedBitstreamContext * cbc
Definition: vp9dec.h:101
ff_vk_update_thread_context
int ff_vk_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Synchronize the contexts between 2 threads.
Definition: vulkan_decode.c:120
vp9dec.h
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
VP9RawFrameHeader::segmentation_enabled
uint8_t segmentation_enabled
Definition: cbs_vp9.h:146
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:172
VP9RawFrameHeader::segmentation_update_data
uint8_t segmentation_update_data
Definition: cbs_vp9.h:151
VP9RawFrameHeader::loop_filter_delta_enabled
uint8_t loop_filter_delta_enabled
Definition: cbs_vp9.h:132
vulkan_decode.h
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
VP9RawFrameHeader::update_mode_delta
uint8_t update_mode_delta[2]
Definition: cbs_vp9.h:136
VP9RawFrameHeader::allow_high_precision_mv
uint8_t allow_high_precision_mv
Definition: cbs_vp9.h:110
VP9VulkanDecodePicture::ref_frame_sign_bias_mask
uint8_t ref_frame_sign_bias_mask
Definition: vulkan_vp9.c:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CodedBitstreamVP9Context::segmentation_tree_probs
uint8_t segmentation_tree_probs[7]
Definition: cbs_vp9.h:211
VP9RawFrameHeader::loop_filter_delta_update
uint8_t loop_filter_delta_update
Definition: cbs_vp9.h:133
ff_vk_decode_init
int ff_vk_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: vulkan_decode.c:1243
FFVulkanDecodePicture::decode_info
VkVideoDecodeInfoKHR decode_info
Definition: vulkan_decode.h:98
vk_vp9_start_frame
static int vk_vp9_start_frame(AVCodecContext *avctx, av_unused const AVBufferRef *buffer_ref, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vulkan_vp9.c:105
VP9RawFrameHeader::loop_filter_level
uint8_t loop_filter_level
Definition: cbs_vp9.h:130
CodedBitstreamVP9Context::feature_enabled
uint8_t feature_enabled[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX]
Definition: cbs_vp9.h:213
VP9RawFrameHeader::refresh_frame_flags
uint8_t refresh_frame_flags
Definition: cbs_vp9.h:102