Go to the documentation of this file.
36 #define BITSTREAM_READER_LE
43 #define SMKTREE_BITS 9
44 #define SMK_NODE 0x80000000
46 #define SMKTREE_DECODE_MAX_RECURSION 32
47 #define SMKTREE_DECODE_BIG_MAX_RECURSION 500
80 1, 2, 3, 4, 5, 6, 7, 8,
81 9, 10, 11, 12, 13, 14, 15, 16,
82 17, 18, 19, 20, 21, 22, 23, 24,
83 25, 26, 27, 28, 29, 30, 31, 32,
84 33, 34, 35, 36, 37, 38, 39, 40,
85 41, 42, 43, 44, 45, 46, 47, 48,
86 49, 50, 51, 52, 53, 54, 55, 56,
87 57, 58, 59, 128, 256, 512, 1024, 2048 };
152 if (i1 < 0 || i2 < 0)
154 val =
ctx->recode1[i1] | (
ctx->recode2[i2] << 8);
155 if(
val ==
ctx->escapes[0]) {
158 }
else if(
val ==
ctx->escapes[1]) {
161 }
else if(
val ==
ctx->escapes[2]) {
192 VLC vlc[2] = { { 0 } };
197 if(
size >= UINT_MAX>>4){
230 tmp1.
lengths,
sizeof(
int),
sizeof(
int),
251 tmp2.
lengths,
sizeof(
int),
sizeof(
int),
268 last[0] = last[1] = last[2] = -1;
270 ctx.escapes[0] = escapes[0];
271 ctx.escapes[1] = escapes[1];
272 ctx.escapes[2] = escapes[2];
321 int mmap_size, mclr_size, full_size, type_size,
ret;
385 recode[last[0]] = recode[last[1]] = recode[last[2]] = 0;
390 register int *
table = recode;
402 if(v != recode[last[0]]) {
403 recode[last[2]] = recode[last[1]];
404 recode[last[1]] = recode[last[0]];
418 int blocks,
blk, bw, bh;
423 if (avpkt->
size <= 769)
430 pal = (uint32_t*)smk->
pic->
data[1];
432 flags = bytestream2_get_byteu(&gb2);
440 for(
i = 0;
i < 256;
i++)
441 *pal++ = 0xFFU << 24 | bytestream2_get_be24u(&gb2);
451 bw = avctx->
width >> 2;
455 while(
blk < blocks) {
465 while(
run-- &&
blk < blocks){
473 for(
i = 0;
i < 4;
i++) {
474 if(
map & 1)
out[0] = hi;
else out[0] = lo;
475 if(
map & 2)
out[1] = hi;
else out[1] = lo;
476 if(
map & 4)
out[2] = hi;
else out[2] = lo;
477 if(
map & 8)
out[3] = hi;
else out[3] = lo;
492 while(
run-- &&
blk < blocks){
496 for(
i = 0;
i < 4;
i++) {
506 out[0] =
out[1] = pix & 0xFF;
507 out[2] =
out[3] = pix >> 8;
509 out[0] =
out[1] = pix & 0xFF;
510 out[2] =
out[3] = pix >> 8;
513 out[0] =
out[1] = pix & 0xFF;
514 out[2] =
out[3] = pix >> 8;
516 out[0] =
out[1] = pix & 0xFF;
517 out[2] =
out[3] = pix >> 8;
520 for(
i = 0;
i < 2;
i++) {
537 while(
run-- &&
blk < blocks)
542 while(
run-- &&
blk < blocks){
545 col =
mode * 0x01010101
U;
546 for(
i = 0;
i < 4;
i++) {
547 *((uint32_t*)
out) = col;
628 int *got_frame_ptr,
AVPacket *avpkt)
632 int buf_size = avpkt->
size;
635 VLC vlc[4] = { { 0 } };
642 int pred[2] = {0, 0};
651 if (unp_size > (1
U<<24)) {
666 if (stereo ^ (avctx->
channels != 1)) {
679 "The buffer does not contain an integer number of samples\n");
685 samples8 =
frame->data[0];
688 for(
i = 0;
i < (1 << (
bits + stereo));
i++) {
705 if(
h[
i].current > 1) {
707 h[
i].lengths,
sizeof(
int),
sizeof(
int),
718 for(
i = stereo;
i >= 0;
i--)
720 for(
i = 0;
i <= stereo;
i++)
722 for(;
i < unp_size / 2;
i++) {
734 val =
h[2].values[res];
743 val |=
h[3].values[res] << 8;
755 val =
h[0].values[res];
764 val |=
h[1].values[res] << 8;
770 for(
i = stereo;
i >= 0;
i--)
772 for(
i = 0;
i <= stereo;
i++)
773 *samples8++ =
pred[
i];
774 for(;
i < unp_size;
i++) {
787 *samples8++ =
pred[1];
798 *samples8++ =
pred[0];
807 for(
i = 0;
i < 4;
i++) {
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
Context used for code reconstructing.
#define MKTAG(a, b, c, d)
#define AV_CH_LAYOUT_MONO
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static const int block_runs[64]
This structure describes decoded (raw) audio or video data.
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
static const uint16_t table[]
void * av_mallocz_array(size_t nmemb, size_t size)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define SMKTREE_DECODE_BIG_MAX_RECURSION
int key_frame
1 -> keyframe, 0-> not
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_CH_LAYOUT_STEREO
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static av_cold int decode_init(AVCodecContext *avctx)
AVCodec ff_smacker_decoder
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static av_cold int smka_decode_init(AVCodecContext *avctx)
static int smka_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Decode Smacker audio data.
void ff_free_vlc(VLC *vlc)
static int smacker_decode_tree(GetBitContext *gb, HuffContext *hc, uint32_t prefix, int length)
Decode local frame tree.
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
AVCodec ff_smackaud_decoder
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define SMKTREE_DECODE_MAX_RECURSION
static void error(const char *err)
static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int **recodes, int *last, int size)
Store large tree as FFmpeg's vlc codes.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static av_cold int decode_end(AVCodecContext *avctx)
enum AVSampleFormat sample_fmt
audio sample format
const char const char void * val
static void skip_bits1(GetBitContext *s)
#define AV_LOG_INFO
Standard information.
int channels
number of audio channels
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
static const float pred[4]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_always_inline void last_reset(int *recode, int *last)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
main external API structure.
int palette_has_changed
Tell user application that palette has changed from previous frame.
static av_const int sign_extend(int val, unsigned bits)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Filter the word “frame” indicates either a video frame or a group of audio samples
@ AV_PICTURE_TYPE_P
Predicted.
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
const VDPAUPixFmtMap * map
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
static int decode_header_trees(SmackVContext *smk)
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int smacker_decode_bigtree(GetBitContext *gb, HuffContext *hc, DBCtx *ctx, int length)
Decode header tree.
static av_always_inline int smk_get_code(GetBitContext *gb, int *recode, int *last)