Go to the documentation of this file.
58 "Valid DV profiles are:\n",
75 memset(&fdsp,0,
sizeof(fdsp));
76 memset(&mecc,0,
sizeof(mecc));
77 memset(&pdsp,0,
sizeof(pdsp));
86 s->fdct[0] = fdsp.
fdct;
110 *vlc = 0xfe00 | (
level << 1) | sign;
180 vlc = av_mod_uintp2(vlc,
size);
182 if (pb + 1 >= pb_end) {
226 131072, 257107, 257107, 242189, 252167, 242189, 235923, 237536,
227 237536, 235923, 229376, 231390, 223754, 231390, 229376, 222935,
228 224969, 217965, 217965, 224969, 222935, 200636, 218652, 211916,
229 212325, 211916, 218652, 200636, 188995, 196781, 205965, 206433,
230 206433, 205965, 196781, 188995, 185364, 185364, 200636, 200704,
231 200636, 185364, 185364, 174609, 180568, 195068, 195068, 180568,
232 174609, 170091, 175557, 189591, 175557, 170091, 165371, 170627,
233 170627, 165371, 160727, 153560, 160727, 144651, 144651, 136258,
236 131072, 262144, 257107, 257107, 242189, 242189, 242189, 242189,
237 237536, 237536, 229376, 229376, 200636, 200636, 224973, 224973,
238 223754, 223754, 235923, 235923, 229376, 229376, 217965, 217965,
239 211916, 211916, 196781, 196781, 185364, 185364, 206433, 206433,
240 211916, 211916, 222935, 222935, 200636, 200636, 205964, 205964,
241 200704, 200704, 180568, 180568, 175557, 175557, 195068, 195068,
242 185364, 185364, 188995, 188995, 174606, 174606, 175557, 175557,
243 170627, 170627, 153560, 153560, 165371, 165371, 144651, 144651,
265 static const int classes[] = { 12, 24, 36, 0xffff };
267 static const int classes[] = { -1, -1, 255, 0xffff };
269 int max = classes[0];
271 const unsigned deadzone =
s->quant_deadzone;
272 const unsigned threshold = 2 * deadzone;
291 memset(
blk, 0, 64 *
sizeof(*
blk));
299 for (area = 0; area < 4; area++) {
300 bi->
prev[area] = prev;
305 if (
level + deadzone > threshold) {
333 for (area = 0; area < 4; area++) {
334 bi->
prev[area] = prev;
356 int i, j, k,
a, prev,
a2;
366 for (
i = 0;
i < 5;
i++) {
372 for (j = 0; j < 6; j++,
b++) {
373 for (
a = 0;
a < 4;
a++) {
395 b->next[prev] =
b->next[k];
398 b->prev[
a + 1] = prev;
406 }
while (qnos[0] | qnos[1] | qnos[2] | qnos[3] | qnos[4]);
411 for (j = 0; j < 6 * 5; j++,
b++) {
413 for (k =
b->next[prev]; k < 64; k = b->next[k]) {
414 if (
b->mb[k] <
a &&
b->mb[k] > -
a) {
415 b->next[prev] =
b->next[k];
430 int mb_x, mb_y, c_offset;
431 ptrdiff_t linesize, y_stride;
440 int qnos[5] = { 15, 15, 15, 15, 15 };
441 int *qnosp = &qnos[0];
444 enc_blk = &enc_blks[0];
445 for (mb_index = 0; mb_index < 5; mb_index++) {
451 (
s->sys->height >= 720 && mb_y != 134)) {
452 y_stride =
s->frame->linesize[0] << 3;
456 y_ptr =
s->frame->data[0] +
457 ((mb_y *
s->frame->linesize[0] + mb_x) << 3);
458 linesize =
s->frame->linesize[0];
460 if (
s->sys->video_stype == 4) {
478 for (j = 2; j; j--) {
479 uint8_t *c_ptr =
s->frame->data[j] + c_offset;
480 linesize =
s->frame->linesize[j];
481 y_stride = (mb_y == 134) ? 8 : (
s->frame->linesize[j] << 3);
485 for (
i = 0;
i < 8;
i++) {
486 d = c_ptr + (linesize << 3);
503 if (
s->sys->bpm == 8)
513 for (j = 0; j < 5 *
s->sys->bpm;) {
520 for (
i = 0;
i <
s->sys->bpm;
i++, j++) {
521 int sz =
s->sys->block_sizes[
i] >> 3;
524 put_sbits(&pbs[j], 9, ((enc_blks[j].
mb[0] >> 3) - 1024 + 2) >> 2);
525 put_bits(&pbs[j], 1, enc_blks[j].dct_mode);
526 put_bits(&pbs[j], 2, enc_blks[j].cno);
534 for (
i = 0;
i <
s->sys->bpm;
i++)
535 if (enc_blks[start_mb +
i].partial_bit_count)
537 &pbs[start_mb +
s->sys->bpm]);
542 for (j = 0; j < 5 *
s->sys->bpm; j++) {
543 if (enc_blks[j].partial_bit_count)
545 if (enc_blks[j].partial_bit_count)
549 for (j = 0; j < 5 *
s->sys->bpm; j++) {
556 "bitstream written beyond buffer size\n");
559 memset(pbs[j].
buf + pos, 0xff,
size - pos);
587 int fs =
c->frame->top_field_first ? 0x00 : 0x40;
590 if ((
int) (
av_q2d(
c->avctx->sample_aspect_ratio) *
591 c->avctx->width /
c->avctx->height * 10) >= 17)
647 buf[1] = (seq_num << 4) |
656 if (syb_num == 0 || syb_num == 6) {
660 }
else if (syb_num == 11) {
678 for (chan = 0; chan <
c->sys->n_difchan; chan++) {
679 for (
i = 0;
i <
c->sys->difseg_size;
i++) {
680 memset(
buf, 0xff, 80 * 6);
689 for (j = 0; j < 2; j++) {
691 for (k = 0; k < 6; k++)
697 for (j = 0; j < 3; j++) {
708 for (j = 0; j < 135; j++) {
710 memset(
buf, 0xff, 80);
733 c->pix_fmt =
s->sys->pix_fmt;
735 #if FF_API_CODED_FRAME
737 c->coded_frame->key_frame = 1;
756 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
757 #define OFFSET(x) offsetof(DVVideoContext, x)
759 {
"quant_deadzone",
"Quantizer dead zone",
OFFSET(quant_deadzone),
AV_OPT_TYPE_INT, { .i64 = 7 }, 0, 1024,
VE },
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
#define FF_ENABLE_DEPRECATION_WARNINGS
static void dv_guess_qnos(EncBlockInfo *blks, int *qnos)
AVPixelFormat
Pixel format.
static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data, ptrdiff_t linesize, DVVideoContext *s, int bias)
#define DV_MAX_BPM
maximum number of blocks per macroblock in any DV format
static av_cold void dv_vlc_map_tableinit(void)
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void(* fdct248)(int16_t *block)
The official guide to swscale for confused that is
static av_always_inline int dv_rl2vlc(int run, int l, int sign, uint32_t *vlc)
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
AVCodec ff_dvvideo_encoder
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
uint8_t partial_bit_count
static const int mb_area_start[5]
static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
#define DV_VLC_MAP_RUN_SIZE
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
void ff_dv_print_profiles(void *logctx, int loglevel)
Print all allowed DV profiles into logctx at specified logging level.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const int dv_weight_88[64]
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
static int put_bits_left(PutBitContext *s)
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
static const AVClass dvvideo_encode_class
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
#define LOCAL_ALIGNED_8(t, v,...)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_dv_init_dynamic_tables(DVVideoContext *ctx, const AVDVProfile *d)
static const int dv_weight_bits
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
static enum AVPixelFormat pix_fmts[]
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
const uint8_t ff_dv_quant_shifts[22][4]
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define fs(width, name, subs,...)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
int ildct_cmp
interlaced DCT comparison function
static av_always_inline PutBitContext * dv_encode_ac(EncBlockInfo *bi, PutBitContext *pb_pool, PutBitContext *pb_end)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static int weight(int i, int blen, int offset)
static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static av_cold int dvvideo_encode_init(AVCodecContext *avctx)
const uint8_t ff_dv_zigzag248_direct[64]
uint32_t partial_bit_buffer
static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt, const AVFrame *frame, int *got_packet)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
int flags
A combination of AV_PKT_FLAG values.
static const int vs_total_ac_bits
static int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c, uint8_t *buf)
static const AVOption dv_options[]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
static struct dv_vlc_pair dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE]
static int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t *buf)
#define xf(width, name, var, range_min, range_max, subs,...)
static int dv_work_pool_size(const AVDVProfile *d)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const uint8_t ff_zigzag_direct[64]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void dv_calculate_mb_xy(DVVideoContext *s, DVwork_chunk *work_chunk, int m, int *mb_x, int *mb_y)
static av_always_inline int dv_guess_dct_mode(DVVideoContext *s, uint8_t *data, ptrdiff_t linesize)
main external API structure.
const uint8_t ff_dv_quant_offset[4]
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
void(* fdct)(int16_t *block)
static int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num, uint8_t seq_num, uint8_t dif_num, uint8_t *buf)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
av_cold int ff_dvvideo_init(AVCodecContext *avctx)
#define LOCAL_ALIGNED_16(t, v,...)
This structure stores compressed data.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int width
picture width / height.
#define DV_VLC_MAP_LEV_SIZE
static av_always_inline int dv_rl2vlc_size(int run, int l)
static const int dv_weight_248[64]
const AVDVProfile * av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate)
Get a DV profile for the provided stream parameters.
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.