Go to the documentation of this file.
19 #include "config_components.h"
31 #if CONFIG_LIBLCEVC_DEC
38 return LCEVC_I420_10_LE;
47 return LCEVC_ColorFormat_Unknown;
53 LCEVC_PictureDesc
desc;
55 LCEVC_PictureLockHandle
lock;
57 int linesizes[4] = { 0 };
61 res = LCEVC_DefaultPictureDesc(&
desc, fmt,
frame->width,
frame->height);
62 if (res != LCEVC_Success)
69 desc.sampleAspectRatioNum =
frame->sample_aspect_ratio.num;
70 desc.sampleAspectRatioDen =
frame->sample_aspect_ratio.den;
73 res = LCEVC_AllocPicture(lcevc->
decoder, &
desc, picture);
74 if (res != LCEVC_Success) {
77 res = LCEVC_LockPicture(lcevc->
decoder, *picture, LCEVC_Access_Write, &
lock);
78 if (res != LCEVC_Success)
81 res = LCEVC_GetPicturePlaneCount(lcevc->
decoder, *picture, &
planes);
82 if (res != LCEVC_Success)
86 LCEVC_PicturePlaneDesc plane;
88 res = LCEVC_GetPictureLockPlaneDesc(lcevc->
decoder,
lock,
i, &plane);
89 if (res != LCEVC_Success)
92 data[
i] = plane.firstSample;
93 linesizes[
i] = plane.rowByteStride;
100 if (res != LCEVC_Success)
107 LCEVC_PictureHandle *picture)
110 LCEVC_PictureDesc
desc ;
112 LCEVC_PicturePlaneDesc
planes[4] = { 0 };
113 LCEVC_ReturnCode res;
116 if (res != LCEVC_Success)
120 for (
int i = 0;
i < 4;
i++) {
127 if (res != LCEVC_Success) {
137 LCEVC_PictureHandle picture;
138 LCEVC_ReturnCode res;
144 res = LCEVC_SendDecoderEnhancementData(lcevc->
decoder, in->
pts, 0, sd->
data, sd->
size);
145 if (res != LCEVC_Success)
152 res = LCEVC_SendDecoderBase(lcevc->
decoder, in->
pts, 0, picture, -1,
NULL);
153 if (res != LCEVC_Success)
156 memset(&picture, 0,
sizeof(picture));
161 res = LCEVC_SendDecoderPicture(lcevc->
decoder, picture);
162 if (res != LCEVC_Success)
171 LCEVC_PictureDesc
desc;
172 LCEVC_DecodeInformation
info;
173 LCEVC_PictureHandle picture;
174 LCEVC_ReturnCode res;
176 res = LCEVC_ReceiveDecoderPicture(lcevc->
decoder, &picture, &
info);
177 if (res != LCEVC_Success)
180 res = LCEVC_GetPictureDesc(lcevc->
decoder, picture, &
desc);
181 if (res != LCEVC_Success)
185 out->crop_bottom =
desc.cropBottom;
186 out->crop_left =
desc.cropLeft;
187 out->crop_right =
desc.cropRight;
188 out->sample_aspect_ratio.num =
desc.sampleAspectRatioNum;
189 out->sample_aspect_ratio.den =
desc.sampleAspectRatioDen;
196 out->height =
desc.height +
out->crop_top +
out->crop_bottom;
198 res = LCEVC_FreePicture(lcevc->
decoder, picture);
199 if (res != LCEVC_Success)
208 LCEVC_PictureHandle picture;
209 LCEVC_ReturnCode res;
217 res = LCEVC_ReceiveDecoderBase (lcevc->
decoder, &picture);
218 if (res != LCEVC_Success && res != LCEVC_Again)
221 if (res == LCEVC_Again)
224 res = LCEVC_FreePicture(lcevc->
decoder, picture);
225 if (res != LCEVC_Success)
233 LCEVC_PictureHandle pic,
const LCEVC_DecodeInformation *
info,
234 const uint8_t *
data, uint32_t
size,
void *logctx)
249 LCEVC_DestroyDecoder(lcevc->
decoder);
250 memset(lcevc, 0,
sizeof(*lcevc));
256 #if CONFIG_LIBLCEVC_DEC
257 LCEVC_AccelContextHandle
dummy = { 0 };
258 const int32_t event = LCEVC_Log;
260 if (LCEVC_CreateDecoder(&lcevc->
decoder,
dummy) != LCEVC_Success) {
265 LCEVC_ConfigureDecoderInt(lcevc->
decoder,
"log_level", 4);
266 LCEVC_ConfigureDecoderIntArray(lcevc->
decoder,
"events", 1, &event);
267 LCEVC_SetDecoderEventCallback(lcevc->
decoder, event_callback, logctx);
269 if (LCEVC_InitializeDecoder(lcevc->
decoder) != LCEVC_Success) {
271 LCEVC_DestroyDecoder(lcevc->
decoder);
294 #if CONFIG_LIBLCEVC_DEC
298 ret = lcevc_send_frame(logctx, frame_ctx,
frame);
302 lcevc_receive_frame(logctx, frame_ctx,
frame);
315 #if CONFIG_LIBLCEVC_DEC
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
int ff_lcevc_alloc(FFLCEVCContext **plcevc)
RefStruct is an API for creating reference-counted objects with minimal overhead.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static int lcevc_init(FFLCEVCContext *lcevc, void *logctx)
#define AV_PIX_FMT_YUV420P10
static LCEVC_ColorFormat map_format(int format)
LCEVC_DecoderHandle decoder
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_lcevc_process(void *logctx, AVFrame *frame)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * post_process_opaque
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
static const struct @506 planes[]
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static int alloc_enhanced_frame(AVFilterLink *inlink, const AVFrame *out, LCEVC_PictureHandle *picture)
static int generate_output(AVFilterLink *inlink, AVFrame *out)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
void ff_lcevc_unref(void *opaque)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
uintptr_t LCEVC_DecoderHandle
static int alloc_base_frame(AVFilterLink *inlink, const AVFrame *in, LCEVC_PictureHandle *picture)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
#define AVERROR_EXTERNAL
Generic error in an external library.
#define AV_LOG_INFO
Standard information.
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
static pthread_mutex_t lock
#define i(width, name, range_min, range_max)
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
Structure to hold side data for an AVFrame.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...