Go to the documentation of this file.
70 void *spv_opaque =
NULL;
79 spv = ff_vk_spirv_init();
88 VK_SHADER_STAGE_COMPUTE_BIT,
93 GLSLC(0,
layout(push_constant, std430) uniform pushConstants { );
94 GLSLC(1, vec4 color_comp; );
99 VK_SHADER_STAGE_COMPUTE_BIT);
103 .
name =
"output_img",
104 .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
106 .mem_quali =
"writeonly",
109 .stages = VK_SHADER_STAGE_COMPUTE_BIT,
117 GLSLC(1, ivec2
pos = ivec2(gl_GlobalInvocationID.xy); );
138 for (
int i = 0;
i < 4;
i++)
139 rgbad[
i] =
s->color_rgba[
i] / 255.0;
144 memcpy(yuvad, rgbad,
sizeof(rgbad));
149 for (
int i = 0;
i < 3;
i++) {
152 yuvad[
i] *= (
chroma ? 224.0 : 219.0) / 255.0;
153 yuvad[
i] += (
chroma ? 128.0 : 16.0) / 255.0;
161 if (
desc->nb_components <= 2)
164 for (
int i = 0;
i < 4;
i++)
165 s->opts.color_comp[
i] = yuvad[
i];
169 for (
int i = 0, c_off = 0;
i <
planes;
i++) {
170 for (
int c = 0;
c <
desc->nb_components;
c++) {
171 if (
desc->comp[
c].plane ==
i) {
173 GLSLF(1,
r[%
i] = color_comp[%
i]; ,off, c_off++);
206 if (!
s->initialized) {
215 if (
s->duration >= 0 &&
222 if (
s->draw_once_reset) {
224 s->draw_once_reset = 0;
232 VK_NULL_HANDLE, &
s->opts,
sizeof(
s->opts));
248 frame->sample_aspect_ratio =
s->sar;
251 VK_NULL_HANDLE, &
s->opts,
sizeof(
s->opts));
271 if (!
s->out_format_string) {
294 s->vkctx.output_width =
s->w;
295 s->vkctx.output_height =
s->h;
320 #define OFFSET(x) offsetof(TestSrcVulkanContext, x)
321 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
323 #define COMMON_OPTS \
324 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS }, \
325 { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS }, \
327 { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "60" }, 0, INT_MAX, FLAGS }, \
328 { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "60" }, 0, INT_MAX, FLAGS }, \
330 { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = -1 }, -1, INT64_MAX, FLAGS }, \
331 { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = -1 }, -1, INT64_MAX, FLAGS }, \
333 { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, INT_MAX, FLAGS }, \
335 { "format", "Output video format (software format of hardware frames)", OFFSET(out_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS },
362 .
name =
"color_vulkan",
372 .priv_class = &color_vulkan_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_vk_shader_free(FFVulkanContext *s, FFVulkanShader *shd)
Free a shader.
int ff_vk_shader_init(FFVulkanContext *s, FFVulkanShader *shd, const char *name, VkPipelineStageFlags stage, const char *extensions[], int nb_extensions, int lg_x, int lg_y, int lg_z, uint32_t required_subgroup_size)
Initialize a shader object, with a specific set of extensions, type+bind, local group size,...
static void testsrc_vulkan_uninit(AVFilterContext *avctx)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int ff_vk_filter_process_simple(FFVulkanContext *vkctx, FFVkExecPool *e, FFVulkanShader *shd, AVFrame *out_f, AVFrame *in_f, VkSampler sampler, void *push_src, size_t push_size)
Submit a compute shader with a zero/one input and single out for execution.
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
int ff_vk_qf_init(FFVulkanContext *s, FFVkQueueFamilyCtx *qf, VkQueueFlagBits dev_family)
Chooses a QF and loads it into a context.
int64_t duration
duration expressed in microseconds
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int ff_vk_filter_init(AVFilterContext *avctx)
General lavfi IO functions.
@ AVCOL_RANGE_JPEG
Full range content.
const struct AVLumaCoefficients * av_csp_luma_coeffs_from_avcsp(enum AVColorSpace csp)
Retrieves the Luma coefficients necessary to construct a conversion matrix from an enum constant desc...
static const char rgb2yuv[]
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Struct containing luma coefficients to be used for RGB to YUV/YCoCg, or similar calculations.
void ff_vk_uninit(FFVulkanContext *s)
Frees main context.
static av_cold int init_filter(AVFilterContext *ctx, enum TestSrcVulkanMode mode)
void(* uninit)(struct FFVkSPIRVCompiler **ctx)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
const char * name
Filter name.
AVFILTER_DEFINE_CLASS(color_vulkan)
A link between two filters.
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Link properties exposed to filter code, but not external callers.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
void * priv
private data for use by the filter
int ff_vk_filter_init_context(AVFilterContext *avctx, FFVulkanContext *s, AVBufferRef *frames_ref, int width, int height, enum AVPixelFormat sw_format)
Can be called manually, if not using ff_vk_filter_config_output.
int ff_vk_shader_register_exec(FFVulkanContext *s, FFVkExecPool *pool, FFVulkanShader *shd)
Register a shader with an exec pool.
int ff_vk_shader_add_descriptor_set(FFVulkanContext *s, FFVulkanShader *shd, FFVulkanDescriptorSetBinding *desc, int nb, int singular, int print_to_shader_only)
Add descriptor to a shader.
int draw_once_reset
draw only the first frame or in case of reset
A filter pad used for either input or output.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
enum AVColorRange out_range
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
AVFrame * picref
cached reference containing the painted picture
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static int testsrc_vulkan_activate(AVFilterContext *ctx)
void ff_matrix_mul_3x3_vec(double dst[3], const double vec[3], const double mat[3][3])
void ff_vk_exec_pool_free(FFVulkanContext *s, FFVkExecPool *pool)
#define FILTER_OUTPUTS(array)
static const struct @465 planes[]
const AVFilter ff_vsrc_color_vulkan
const char * ff_vk_shader_rep_fmt(enum AVPixelFormat pix_fmt, enum FFVkShaderRepFormat rep_fmt)
TestSrcVulkanPushData opts
Rational number (pair of numerator and denominator).
filter_frame For filters that do not use the activate() callback
@ AV_OPT_TYPE_COLOR
Underlying C type is uint8_t[4].
@ AV_PICTURE_TYPE_I
Intra.
static const AVFilterPad testsrc_vulkan_outputs[]
int ff_vk_exec_pool_init(FFVulkanContext *s, FFVkQueueFamilyCtx *qf, FFVkExecPool *pool, int nb_contexts, int nb_queries, VkQueryType query_type, int query_64bit, const void *query_create_pnext)
Allocates/frees an execution pool.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
@ AVCOL_RANGE_UNSPECIFIED
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static FilterLink * ff_filter_link(AVFilterLink *link)
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
int(* init)(AVBSFContext *ctx)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
enum AVPixelFormat output_format
AVFilterContext * src
source filter
int(* compile_shader)(FFVulkanContext *s, struct FFVkSPIRVCompiler *ctx, FFVulkanShader *shd, uint8_t **data, size_t *size, const char *entrypoint, void **opaque)
#define AVERROR_EXTERNAL
Generic error in an external library.
AVRational sar
sample aspect ratio
AVBufferRef * hw_frames_ctx
For hwaccel pixel formats, this should be a reference to the AVHWFramesContext describing the frames.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
static const AVOption color_vulkan_options[]
static void uninit(AVBSFContext *ctx)
void ff_fill_rgb2yuv_table(const AVLumaCoefficients *coeffs, double rgb2yuv[3][3])
#define i(width, name, range_min, range_max)
int w
agreed upon image width
AVColorSpace
YUV colorspace type.
int ff_vk_shader_link(FFVulkanContext *s, FFVulkanShader *shd, uint8_t *spirv, size_t spirv_len, const char *entrypoint)
Link a shader into an executable.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
void(* free_shader)(struct FFVkSPIRVCompiler *ctx, void **opaque)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_vk_shader_add_push_const(FFVulkanShader *shd, int offset, int size, VkShaderStageFlagBits stage)
Add/update push constants for execution.
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
int h
agreed upon image height
@ AV_OPT_TYPE_INT
Underlying C type is int.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
int draw_once
draw only the first frame, always put out the same picture
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int testsrc_vulkan_config_props(AVFilterLink *outlink)
#define flags(name, subs,...)
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable.
AVColorRange
Visual content value range.
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
#define FILTER_SINGLE_PIXFMT(pix_fmt_)