FFmpeg
roqvideoenc.c
Go to the documentation of this file.
1 /*
2  * RoQ Video Encoder.
3  *
4  * Copyright (C) 2007 Vitor Sessak <vitor1001@gmail.com>
5  * Copyright (C) 2004-2007 Eric Lasota
6  * Based on RoQ specs (C) 2001 Tim Ferguson
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * id RoQ encoder by Vitor. Based on the Switchblade3 library and the
28  * Switchblade3 FFmpeg glue by Eric Lasota.
29  */
30 
31 /*
32  * COSTS:
33  * Level 1:
34  * SKIP - 2 bits
35  * MOTION - 2 + 8 bits
36  * CODEBOOK - 2 + 8 bits
37  * SUBDIVIDE - 2 + combined subcel cost
38  *
39  * Level 2:
40  * SKIP - 2 bits
41  * MOTION - 2 + 8 bits
42  * CODEBOOK - 2 + 8 bits
43  * SUBDIVIDE - 2 + 4*8 bits
44  *
45  * Maximum cost: 138 bits per cel
46  *
47  * Proper evaluation requires LCD fraction comparison, which requires
48  * Squared Error (SE) loss * savings increase
49  *
50  * Maximum savings increase: 136 bits
51  * Maximum SE loss without overflow: 31580641
52  * Components in 8x8 supercel: 192
53  * Maximum SE precision per component: 164482
54  * >65025, so no truncation is needed (phew)
55  */
56 
57 #include <string.h>
58 
59 #include "libavutil/attributes.h"
60 #include "libavutil/lfg.h"
61 #include "libavutil/mem.h"
62 #include "libavutil/opt.h"
63 #include "roqvideo.h"
64 #include "bytestream.h"
65 #include "codec_internal.h"
66 #include "elbg.h"
67 #include "encode.h"
68 #include "mathops.h"
69 
70 #define CHROMA_BIAS 1
71 
72 /**
73  * Maximum number of generated 4x4 codebooks. Can't be 256 to workaround a
74  * Quake 3 bug.
75  */
76 #define MAX_CBS_4x4 256
77 
78 #define MAX_CBS_2x2 256 ///< Maximum number of 2x2 codebooks.
79 
80 /* The cast is useful when multiplying it by INT_MAX */
81 #define ROQ_LAMBDA_SCALE ((uint64_t) FF_LAMBDA_SCALE)
82 
83 typedef struct RoqCodebooks {
84  int numCB4;
85  int numCB2;
88  uint8_t unpacked_cb2[MAX_CBS_2x2*2*2*3];
89  uint8_t unpacked_cb4[MAX_CBS_4x4*4*4*3];
91 } RoqCodebooks;
92 
93 /**
94  * Temporary vars
95  */
96 typedef struct RoqTempData
97 {
102 
104 
105  int numCB4;
106  int numCB2;
107 
109 
110  int used_option[4];
111 } RoqTempData;
112 
113 typedef struct SubcelEvaluation {
114  int eval_dist[4];
117 
118  int subCels[4];
120  int cbEntry;
122 
123 typedef struct CelEvaluation {
124  int eval_dist[4];
126 
128 
130  int cbEntry;
131 
133 } CelEvaluation;
134 
135 typedef struct RoqEncContext {
137  struct ELBGContext *elbg;
139  uint64_t lambda;
140 
143 
146 
147  unsigned int framesSinceKeyframe;
148 
150  uint8_t *out_buf;
154 
157  int *points; // Allocated together with closest_cb
158 
160  int quake3_compat; // Quake 3 compatibility option
161 } RoqEncContext;
162 
163 /* Macroblock support functions */
164 static void unpack_roq_cell(roq_cell *cell, uint8_t u[4*3])
165 {
166  memcpy(u , cell->y, 4);
167  memset(u+4, cell->u, 4);
168  memset(u+8, cell->v, 4);
169 }
170 
171 static void unpack_roq_qcell(uint8_t cb2[], roq_qcell *qcell, uint8_t u[4*4*3])
172 {
173  int i,cp;
174  static const int offsets[4] = {0, 2, 8, 10};
175 
176  for (cp=0; cp<3; cp++)
177  for (i=0; i<4; i++) {
178  u[4*4*cp + offsets[i] ] = cb2[qcell->idx[i]*2*2*3 + 4*cp ];
179  u[4*4*cp + offsets[i]+1] = cb2[qcell->idx[i]*2*2*3 + 4*cp+1];
180  u[4*4*cp + offsets[i]+4] = cb2[qcell->idx[i]*2*2*3 + 4*cp+2];
181  u[4*4*cp + offsets[i]+5] = cb2[qcell->idx[i]*2*2*3 + 4*cp+3];
182  }
183 }
184 
185 
186 static void enlarge_roq_mb4(uint8_t base[3*16], uint8_t u[3*64])
187 {
188  int x,y,cp;
189 
190  for(cp=0; cp<3; cp++)
191  for(y=0; y<8; y++)
192  for(x=0; x<8; x++)
193  *u++ = base[(y/2)*4 + (x/2) + 16*cp];
194 }
195 
196 static inline int square(int x)
197 {
198  return x*x;
199 }
200 
201 static inline int eval_sse(const uint8_t *a, const uint8_t *b, int count)
202 {
203  int diff=0;
204 
205  while(count--)
206  diff += square(*b++ - *a++);
207 
208  return diff;
209 }
210 
211 // FIXME Could use DSPContext.sse, but it is not so speed critical (used
212 // just for motion estimation).
213 static int block_sse(uint8_t * const *buf1, uint8_t * const *buf2, int x1, int y1,
214  int x2, int y2, const int *stride1, const int *stride2, int size)
215 {
216  int i, k;
217  int sse=0;
218 
219  for (k=0; k<3; k++) {
220  int bias = (k ? CHROMA_BIAS : 4);
221  for (i=0; i<size; i++)
222  sse += bias*eval_sse(buf1[k] + (y1+i)*stride1[k] + x1,
223  buf2[k] + (y2+i)*stride2[k] + x2, size);
224  }
225 
226  return sse;
227 }
228 
229 static int eval_motion_dist(RoqEncContext *enc, int x, int y, motion_vect vect,
230  int size)
231 {
232  RoqContext *const roq = &enc->common;
233  int mx=vect.d[0];
234  int my=vect.d[1];
235 
236  if (mx < -7 || mx > 7)
237  return INT_MAX;
238 
239  if (my < -7 || my > 7)
240  return INT_MAX;
241 
242  mx += x;
243  my += y;
244 
245  if ((unsigned) mx > roq->width-size || (unsigned) my > roq->height-size)
246  return INT_MAX;
247 
248  return block_sse(enc->frame_to_enc->data, roq->last_frame->data, x, y,
249  mx, my,
251  size);
252 }
253 
254 /**
255  * @return distortion between two macroblocks
256  */
257 static inline int squared_diff_macroblock(uint8_t a[], uint8_t b[], int size)
258 {
259  int cp, sdiff=0;
260 
261  for(cp=0;cp<3;cp++) {
262  int bias = (cp ? CHROMA_BIAS : 4);
263  sdiff += bias*eval_sse(a, b, size*size);
264  a += size*size;
265  b += size*size;
266  }
267 
268  return sdiff;
269 }
270 
271 /**
272  * Initialize cel evaluators and set their source coordinates
273  */
275 {
276  RoqContext *const roq = &enc->common;
277 
278  enc->cel_evals = av_malloc_array(roq->width * roq->height / 64, sizeof(CelEvaluation));
279  if (!enc->cel_evals)
280  return AVERROR(ENOMEM);
281 
282  /* Map to the ROQ quadtree order */
283  for (int y = 0, n = 0; y < roq->height; y += 16)
284  for (int x = 0; x < roq->width; x += 16)
285  for(int i = 0; i < 4; i++) {
286  enc->cel_evals[n ].sourceX = x + (i&1)*8;
287  enc->cel_evals[n++].sourceY = y + (i&2)*4;
288  }
289 
290  return 0;
291 }
292 
293 /**
294  * Get macroblocks from parts of the image
295  */
296 static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
297 {
298  int i, j, cp;
299 
300  for (cp=0; cp<3; cp++) {
301  int stride = frame->linesize[cp];
302  for (i=0; i<dim; i++)
303  for (j=0; j<dim; j++)
304  *mb++ = frame->data[cp][(y+i)*stride + x + j];
305  }
306 }
307 
308 /**
309  * Find the codebook with the lowest distortion from an image
310  */
311 static int index_mb(uint8_t cluster[], uint8_t cb[], int numCB,
312  int *outIndex, int dim)
313 {
314  int i, lDiff = INT_MAX, pick=0;
315 
316  /* Diff against the others */
317  for (i=0; i<numCB; i++) {
318  int diff = squared_diff_macroblock(cluster, cb + i*dim*dim*3, dim);
319  if (diff < lDiff) {
320  lDiff = diff;
321  pick = i;
322  }
323  }
324 
325  *outIndex = pick;
326  return lDiff;
327 }
328 
329 #define EVAL_MOTION(MOTION) \
330  do { \
331  diff = eval_motion_dist(enc, j, i, MOTION, blocksize); \
332  \
333  if (diff < lowestdiff) { \
334  lowestdiff = diff; \
335  bestpick = MOTION; \
336  } \
337  } while(0)
338 
339 static void motion_search(RoqEncContext *enc, int blocksize)
340 {
341  static const motion_vect offsets[8] = {
342  {{ 0,-1}},
343  {{ 0, 1}},
344  {{-1, 0}},
345  {{ 1, 0}},
346  {{-1, 1}},
347  {{ 1,-1}},
348  {{-1,-1}},
349  {{ 1, 1}},
350  };
351 
352  RoqContext *const roq = &enc->common;
353  int diff, lowestdiff, oldbest;
354  int off[3];
355  motion_vect bestpick = {{0,0}};
356  int i, j, k, offset;
357 
358  motion_vect *last_motion;
359  motion_vect *this_motion;
360  motion_vect vect, vect2;
361  const int max = (roq->width / blocksize) * roq->height / blocksize;
362 
363  if (blocksize == 4) {
364  last_motion = enc->last_motion4;
365  this_motion = enc->this_motion4;
366  } else {
367  last_motion = enc->last_motion8;
368  this_motion = enc->this_motion8;
369  }
370 
371  for (i = 0; i< roq->height; i += blocksize)
372  for (j = 0; j < roq->width; j += blocksize) {
373  lowestdiff = eval_motion_dist(enc, j, i, (motion_vect) {{0,0}},
374  blocksize);
375  bestpick.d[0] = 0;
376  bestpick.d[1] = 0;
377 
378  if (blocksize == 4)
379  EVAL_MOTION(enc->this_motion8[(i/8) * (roq->width/8) + j/8]);
380 
381  offset = (i/blocksize) * roq->width / blocksize + j / blocksize;
382  if (offset < max && offset >= 0)
383  EVAL_MOTION(last_motion[offset]);
384 
385  offset++;
386  if (offset < max && offset >= 0)
387  EVAL_MOTION(last_motion[offset]);
388 
389  offset = (i/blocksize + 1) * roq->width / blocksize + j / blocksize;
390  if (offset < max && offset >= 0)
391  EVAL_MOTION(last_motion[offset]);
392 
393  off[0]= (i/blocksize) * roq->width / blocksize + j/blocksize - 1;
394  off[1]= off[0] - roq->width / blocksize + 1;
395  off[2]= off[1] + 1;
396 
397  if (i) {
398 
399  for(k=0; k<2; k++)
400  vect.d[k]= mid_pred(this_motion[off[0]].d[k],
401  this_motion[off[1]].d[k],
402  this_motion[off[2]].d[k]);
403 
404  EVAL_MOTION(vect);
405  for(k=0; k<3; k++)
406  EVAL_MOTION(this_motion[off[k]]);
407  } else if(j)
408  EVAL_MOTION(this_motion[off[0]]);
409 
410  vect = bestpick;
411 
412  oldbest = -1;
413  while (oldbest != lowestdiff) {
414  oldbest = lowestdiff;
415  for (k=0; k<8; k++) {
416  vect2 = vect;
417  vect2.d[0] += offsets[k].d[0];
418  vect2.d[1] += offsets[k].d[1];
419  EVAL_MOTION(vect2);
420  }
421  vect = bestpick;
422  }
423  offset = (i/blocksize) * roq->width / blocksize + j/blocksize;
424  this_motion[offset] = bestpick;
425  }
426 }
427 
428 /**
429  * Get distortion for all options available to a subcel
430  */
431 static void gather_data_for_subcel(SubcelEvaluation *subcel, int x,
432  int y, RoqEncContext *enc)
433 {
434  RoqContext *const roq = &enc->common;
435  RoqTempData *const tempData = &enc->tmp_data;
436  uint8_t mb4[4*4*3];
437  uint8_t mb2[2*2*3];
438  int cluster_index;
439  int i, best_dist;
440 
441  static const int bitsUsed[4] = {2, 10, 10, 34};
442 
443  if (enc->framesSinceKeyframe >= 1) {
444  subcel->motion = enc->this_motion4[y * roq->width / 16 + x / 4];
445 
446  subcel->eval_dist[RoQ_ID_FCC] =
447  eval_motion_dist(enc, x, y,
448  enc->this_motion4[y * roq->width / 16 + x / 4], 4);
449  } else
450  subcel->eval_dist[RoQ_ID_FCC] = INT_MAX;
451 
452  if (enc->framesSinceKeyframe >= 2)
454  roq->current_frame->data, x,
455  y, x, y,
456  enc->frame_to_enc->linesize,
457  roq->current_frame->linesize,
458  4);
459  else
460  subcel->eval_dist[RoQ_ID_MOT] = INT_MAX;
461 
462  cluster_index = y * roq->width / 16 + x / 4;
463 
464  get_frame_mb(enc->frame_to_enc, x, y, mb4, 4);
465 
466  subcel->eval_dist[RoQ_ID_SLD] = index_mb(mb4,
467  tempData->codebooks.unpacked_cb4,
468  tempData->codebooks.numCB4,
469  &subcel->cbEntry, 4);
470 
471  subcel->eval_dist[RoQ_ID_CCC] = 0;
472 
473  for(i=0;i<4;i++) {
474  subcel->subCels[i] = enc->closest_cb[cluster_index*4+i];
475 
476  get_frame_mb(enc->frame_to_enc, x+2*(i&1),
477  y+(i&2), mb2, 2);
478 
479  subcel->eval_dist[RoQ_ID_CCC] +=
480  squared_diff_macroblock(tempData->codebooks.unpacked_cb2 + subcel->subCels[i]*2*2*3, mb2, 2);
481  }
482 
483  best_dist = INT_MAX;
484  for (i=0; i<4; i++)
485  if (ROQ_LAMBDA_SCALE*subcel->eval_dist[i] + enc->lambda*bitsUsed[i] <
486  best_dist) {
487  subcel->best_coding = i;
488  subcel->best_bit_use = bitsUsed[i];
489  best_dist = ROQ_LAMBDA_SCALE*subcel->eval_dist[i] +
490  enc->lambda*bitsUsed[i];
491  }
492 }
493 
494 /**
495  * Get distortion for all options available to a cel
496  */
498 {
499  RoqContext *const roq = &enc->common;
500  RoqTempData *const tempData = &enc->tmp_data;
501  uint8_t mb8[8*8*3];
502  int index = cel->sourceY * roq->width / 64 + cel->sourceX/8;
503  int i, j, best_dist, divide_bit_use;
504 
505  int bitsUsed[4] = {2, 10, 10, 0};
506 
507  if (enc->framesSinceKeyframe >= 1) {
508  cel->motion = enc->this_motion8[index];
509 
510  cel->eval_dist[RoQ_ID_FCC] =
511  eval_motion_dist(enc, cel->sourceX, cel->sourceY,
512  enc->this_motion8[index], 8);
513  } else
514  cel->eval_dist[RoQ_ID_FCC] = INT_MAX;
515 
516  if (enc->framesSinceKeyframe >= 2)
518  roq->current_frame->data,
519  cel->sourceX, cel->sourceY,
520  cel->sourceX, cel->sourceY,
521  enc->frame_to_enc->linesize,
522  roq->current_frame->linesize,8);
523  else
524  cel->eval_dist[RoQ_ID_MOT] = INT_MAX;
525 
526  get_frame_mb(enc->frame_to_enc, cel->sourceX, cel->sourceY, mb8, 8);
527 
528  cel->eval_dist[RoQ_ID_SLD] =
530  tempData->codebooks.numCB4, &cel->cbEntry, 8);
531 
532  gather_data_for_subcel(cel->subCels + 0, cel->sourceX+0, cel->sourceY+0, enc);
533  gather_data_for_subcel(cel->subCels + 1, cel->sourceX+4, cel->sourceY+0, enc);
534  gather_data_for_subcel(cel->subCels + 2, cel->sourceX+0, cel->sourceY+4, enc);
535  gather_data_for_subcel(cel->subCels + 3, cel->sourceX+4, cel->sourceY+4, enc);
536 
537  cel->eval_dist[RoQ_ID_CCC] = 0;
538  divide_bit_use = 0;
539  for (i=0; i<4; i++) {
540  cel->eval_dist[RoQ_ID_CCC] +=
541  cel->subCels[i].eval_dist[cel->subCels[i].best_coding];
542  divide_bit_use += cel->subCels[i].best_bit_use;
543  }
544 
545  best_dist = INT_MAX;
546  bitsUsed[3] = 2 + divide_bit_use;
547 
548  for (i=0; i<4; i++)
549  if (ROQ_LAMBDA_SCALE*cel->eval_dist[i] + enc->lambda*bitsUsed[i] <
550  best_dist) {
551  cel->best_coding = i;
552  best_dist = ROQ_LAMBDA_SCALE*cel->eval_dist[i] +
553  enc->lambda*bitsUsed[i];
554  }
555 
556  tempData->used_option[cel->best_coding]++;
557  tempData->mainChunkSize += bitsUsed[cel->best_coding];
558 
559  if (cel->best_coding == RoQ_ID_SLD)
560  tempData->codebooks.usedCB4[cel->cbEntry]++;
561 
562  if (cel->best_coding == RoQ_ID_CCC)
563  for (i=0; i<4; i++) {
564  if (cel->subCels[i].best_coding == RoQ_ID_SLD)
565  tempData->codebooks.usedCB4[cel->subCels[i].cbEntry]++;
566  else if (cel->subCels[i].best_coding == RoQ_ID_CCC)
567  for (j=0; j<4; j++)
568  tempData->codebooks.usedCB2[cel->subCels[i].subCels[j]]++;
569  }
570 }
571 
573 {
574  RoqContext *const roq = &enc->common;
575  RoqTempData *const tempData = &enc->tmp_data;
576  int i, j, idx=0;
577 
578  /* Make remaps for the final codebook usage */
579  for (i=0; i<(enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4); i++) {
580  if (tempData->codebooks.usedCB4[i]) {
581  tempData->i2f4[i] = idx;
582  tempData->f2i4[idx] = i;
583  for (j=0; j<4; j++)
584  tempData->codebooks.usedCB2[roq->cb4x4[i].idx[j]]++;
585  idx++;
586  }
587  }
588 
589  tempData->numCB4 = idx;
590 
591  idx = 0;
592  for (i=0; i<MAX_CBS_2x2; i++) {
593  if (tempData->codebooks.usedCB2[i]) {
594  tempData->i2f2[i] = idx;
595  tempData->f2i2[idx] = i;
596  idx++;
597  }
598  }
599  tempData->numCB2 = idx;
600 
601 }
602 
603 /**
604  * Write codebook chunk
605  */
607 {
608  RoqContext *const roq = &enc->common;
609  RoqTempData *const tempData = &enc->tmp_data;
610  int i, j;
611  uint8_t **outp= &enc->out_buf;
612 
613  if (tempData->numCB2) {
614  bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK);
615  bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4);
616  bytestream_put_byte(outp, tempData->numCB4);
617  bytestream_put_byte(outp, tempData->numCB2);
618 
619  for (i=0; i<tempData->numCB2; i++) {
620  bytestream_put_buffer(outp, roq->cb2x2[tempData->f2i2[i]].y, 4);
621  bytestream_put_byte(outp, roq->cb2x2[tempData->f2i2[i]].u);
622  bytestream_put_byte(outp, roq->cb2x2[tempData->f2i2[i]].v);
623  }
624 
625  for (i=0; i<tempData->numCB4; i++)
626  for (j=0; j<4; j++)
627  bytestream_put_byte(outp, tempData->i2f2[roq->cb4x4[tempData->f2i4[i]].idx[j]]);
628 
629  }
630 }
631 
632 static inline uint8_t motion_arg(motion_vect mot)
633 {
634  uint8_t ax = 8 - ((uint8_t) mot.d[0]);
635  uint8_t ay = 8 - ((uint8_t) mot.d[1]);
636  return ((ax&15)<<4) | (ay&15);
637 }
638 
639 typedef struct CodingSpool {
642  uint8_t argumentSpool[64];
643  uint8_t *args;
644  uint8_t **pout;
645 } CodingSpool;
646 
647 /* NOTE: Typecodes must be spooled AFTER arguments!! */
648 static void write_typecode(CodingSpool *s, uint8_t type)
649 {
650  s->typeSpool |= (type & 3) << (14 - s->typeSpoolLength);
651  s->typeSpoolLength += 2;
652  if (s->typeSpoolLength == 16) {
653  bytestream_put_le16(s->pout, s->typeSpool);
654  bytestream_put_buffer(s->pout, s->argumentSpool,
655  s->args - s->argumentSpool);
656  s->typeSpoolLength = 0;
657  s->typeSpool = 0;
658  s->args = s->argumentSpool;
659  }
660 }
661 
663  int w, int h, int numBlocks)
664 {
665  RoqContext *const roq = &enc->common;
666  RoqTempData *const tempData = &enc->tmp_data;
667  int i, j, k;
668  int x, y;
669  int subX, subY;
670 
671  roq_qcell *qcell;
672  CelEvaluation *eval;
673 
674  CodingSpool spool;
675 
676  spool.typeSpool=0;
677  spool.typeSpoolLength=0;
678  spool.args = spool.argumentSpool;
679  spool.pout = &enc->out_buf;
680 
681  if (tempData->used_option[RoQ_ID_CCC]%2)
682  tempData->mainChunkSize+=8; //FIXME
683 
684  /* Write the video chunk header */
685  bytestream_put_le16(&enc->out_buf, RoQ_QUAD_VQ);
686  bytestream_put_le32(&enc->out_buf, tempData->mainChunkSize/8);
687  bytestream_put_byte(&enc->out_buf, 0x0);
688  bytestream_put_byte(&enc->out_buf, 0x0);
689 
690  for (i=0; i<numBlocks; i++) {
691  eval = enc->cel_evals + i;
692 
693  x = eval->sourceX;
694  y = eval->sourceY;
695 
696  switch (eval->best_coding) {
697  case RoQ_ID_MOT:
698  write_typecode(&spool, RoQ_ID_MOT);
699  break;
700 
701  case RoQ_ID_FCC:
702  bytestream_put_byte(&spool.args, motion_arg(eval->motion));
703 
704  write_typecode(&spool, RoQ_ID_FCC);
705  ff_apply_motion_8x8(roq, x, y,
706  eval->motion.d[0], eval->motion.d[1]);
707  break;
708 
709  case RoQ_ID_SLD:
710  bytestream_put_byte(&spool.args, tempData->i2f4[eval->cbEntry]);
711  write_typecode(&spool, RoQ_ID_SLD);
712 
713  qcell = roq->cb4x4 + eval->cbEntry;
714  ff_apply_vector_4x4(roq, x , y , roq->cb2x2 + qcell->idx[0]);
715  ff_apply_vector_4x4(roq, x+4, y , roq->cb2x2 + qcell->idx[1]);
716  ff_apply_vector_4x4(roq, x , y+4, roq->cb2x2 + qcell->idx[2]);
717  ff_apply_vector_4x4(roq, x+4, y+4, roq->cb2x2 + qcell->idx[3]);
718  break;
719 
720  case RoQ_ID_CCC:
721  write_typecode(&spool, RoQ_ID_CCC);
722 
723  for (j=0; j<4; j++) {
724  subX = x + 4*(j&1);
725  subY = y + 2*(j&2);
726 
727  switch(eval->subCels[j].best_coding) {
728  case RoQ_ID_MOT:
729  break;
730 
731  case RoQ_ID_FCC:
732  bytestream_put_byte(&spool.args,
733  motion_arg(eval->subCels[j].motion));
734 
735  ff_apply_motion_4x4(roq, subX, subY,
736  eval->subCels[j].motion.d[0],
737  eval->subCels[j].motion.d[1]);
738  break;
739 
740  case RoQ_ID_SLD:
741  bytestream_put_byte(&spool.args,
742  tempData->i2f4[eval->subCels[j].cbEntry]);
743 
744  qcell = roq->cb4x4 + eval->subCels[j].cbEntry;
745 
746  ff_apply_vector_2x2(roq, subX , subY ,
747  roq->cb2x2 + qcell->idx[0]);
748  ff_apply_vector_2x2(roq, subX+2, subY ,
749  roq->cb2x2 + qcell->idx[1]);
750  ff_apply_vector_2x2(roq, subX , subY+2,
751  roq->cb2x2 + qcell->idx[2]);
752  ff_apply_vector_2x2(roq, subX+2, subY+2,
753  roq->cb2x2 + qcell->idx[3]);
754  break;
755 
756  case RoQ_ID_CCC:
757  for (k=0; k<4; k++) {
758  int cb_idx = eval->subCels[j].subCels[k];
759  bytestream_put_byte(&spool.args,
760  tempData->i2f2[cb_idx]);
761 
762  ff_apply_vector_2x2(roq, subX + 2*(k&1), subY + (k&2),
763  roq->cb2x2 + cb_idx);
764  }
765  break;
766  }
767  write_typecode(&spool, eval->subCels[j].best_coding);
768  }
769  break;
770  }
771  }
772 
773  /* Flush the remainder of the argument/type spool */
774  while (spool.typeSpoolLength)
775  write_typecode(&spool, 0x0);
776 }
777 
778 
779 /**
780  * Create a single YUV cell from a 2x2 section of the image
781  */
782 static inline void frame_block_to_cell(int *block, uint8_t * const *data,
783  int top, int left, const int *stride)
784 {
785  int i, j, u=0, v=0;
786 
787  for (i=0; i<2; i++)
788  for (j=0; j<2; j++) {
789  int x = (top+i)*stride[0] + left + j;
790  *block++ = data[0][x];
791  x = (top+i)*stride[1] + left + j;
792  u += data[1][x];
793  v += data[2][x];
794  }
795 
796  *block++ = (u + 2) / 4 * CHROMA_BIAS;
797  *block++ = (v + 2) / 4 * CHROMA_BIAS;
798 }
799 
800 /**
801  * Create YUV clusters for the entire image
802  */
803 static void create_clusters(const AVFrame *frame, int w, int h, int *points)
804 {
805  int i, j, k, l;
806 
807  for (i=0; i<h; i+=4)
808  for (j=0; j<w; j+=4) {
809  for (k=0; k < 2; k++)
810  for (l=0; l < 2; l++)
811  frame_block_to_cell(points + (l + 2*k)*6, frame->data,
812  i+2*k, j+2*l, frame->linesize);
813  points += 24;
814  }
815 }
816 
818  int *points, int inputCount, roq_cell *results,
819  int size, int cbsize)
820 {
821  int i, j, k, ret = 0;
822  int c_size = size*size/4;
823  int *buf;
824  int *codebook = enc->tmp_codebook_buf;
825  int *closest_cb = enc->closest_cb;
826 
827  ret = avpriv_elbg_do(&enc->elbg, points, 6 * c_size, inputCount, codebook,
828  cbsize, 1, closest_cb, &enc->randctx, 0);
829  if (ret < 0)
830  return ret;
831 
832  buf = codebook;
833  for (i=0; i<cbsize; i++)
834  for (k=0; k<c_size; k++) {
835  for(j=0; j<4; j++)
836  results->y[j] = *buf++;
837 
838  results->u = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
839  results->v = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
840  results++;
841  }
842  return 0;
843 }
844 
846 {
847  int i, j, ret = 0;
849  RoqContext *const roq = &enc->common;
850  int max = roq->width * roq->height / 16;
851  uint8_t mb2[3*4];
852  int *points = enc->points;
853 
854  /* Subsample YUV data */
855  create_clusters(enc->frame_to_enc, roq->width, roq->height, points);
856 
857  codebooks->numCB4 = (enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4);
858 
859  /* Create 4x4 codebooks */
860  if ((ret = generate_codebook(enc, points, max, enc->results4,
861  4, codebooks->numCB4)) < 0)
862  return ret;
863 
864  /* Create 2x2 codebooks */
865  if ((ret = generate_codebook(enc, points, max * 4,
866  roq->cb2x2, 2, MAX_CBS_2x2)) < 0)
867  return ret;
868 
869  codebooks->numCB2 = MAX_CBS_2x2;
870 
871  /* Unpack 2x2 codebook clusters */
872  for (i=0; i<codebooks->numCB2; i++)
873  unpack_roq_cell(roq->cb2x2 + i, codebooks->unpacked_cb2 + i*2*2*3);
874 
875  /* Index all 4x4 entries to the 2x2 entries, unpack, and enlarge */
876  for (i=0; i<codebooks->numCB4; i++) {
877  for (j=0; j<4; j++) {
878  unpack_roq_cell(&enc->results4[4*i + j], mb2);
879  index_mb(mb2, codebooks->unpacked_cb2, codebooks->numCB2,
880  &roq->cb4x4[i].idx[j], 2);
881  }
882  unpack_roq_qcell(codebooks->unpacked_cb2, roq->cb4x4 + i,
883  codebooks->unpacked_cb4 + i*4*4*3);
884  enlarge_roq_mb4(codebooks->unpacked_cb4 + i*4*4*3,
885  codebooks->unpacked_cb4_enlarged + i*8*8*3);
886  }
887 
888  return 0;
889 }
890 
892 {
893  RoqTempData *const tempData = &enc->tmp_data;
894  RoqContext *const roq = &enc->common;
895  int ret;
896 
897  memset(tempData, 0, sizeof(*tempData));
898 
900  if (ret < 0)
901  return ret;
902 
903  if (enc->framesSinceKeyframe >= 1) {
904  motion_search(enc, 8);
905  motion_search(enc, 4);
906  }
907 
908  retry_encode:
909  for (int i = 0; i < roq->width * roq->height / 64; i++)
910  gather_data_for_cel(enc->cel_evals + i, enc);
911 
912  /* Quake 3 can't handle chunks bigger than 65535 bytes */
913  if (tempData->mainChunkSize/8 > 65535 && enc->quake3_compat) {
914  if (enc->lambda > 100000) {
915  av_log(roq->logctx, AV_LOG_ERROR, "Cannot encode video in Quake compatible form\n");
916  return AVERROR(EINVAL);
917  }
918  av_log(roq->logctx, AV_LOG_ERROR,
919  "Warning, generated a frame too big for Quake (%d > 65535), "
920  "now switching to a bigger qscale value.\n",
921  tempData->mainChunkSize/8);
922  enc->lambda *= 1.5;
923  tempData->mainChunkSize = 0;
924  memset(tempData->used_option, 0, sizeof(tempData->used_option));
925  memset(tempData->codebooks.usedCB4, 0,
926  sizeof(tempData->codebooks.usedCB4));
927  memset(tempData->codebooks.usedCB2, 0,
928  sizeof(tempData->codebooks.usedCB2));
929 
930  goto retry_encode;
931  }
932 
933  remap_codebooks(enc);
934 
935  write_codebooks(enc);
936 
937  reconstruct_and_encode_image(enc, roq->width, roq->height,
938  roq->width * roq->height / 64);
939 
940  /* Rotate frame history */
941  FFSWAP(AVFrame *, roq->current_frame, roq->last_frame);
944 
945  enc->framesSinceKeyframe++;
946 
947  return 0;
948 }
949 
951 {
952  RoqEncContext *const enc = avctx->priv_data;
953 
956 
957  av_freep(&enc->cel_evals);
958  av_freep(&enc->closest_cb);
959  av_freep(&enc->this_motion4);
960  av_freep(&enc->last_motion4);
961  av_freep(&enc->this_motion8);
962  av_freep(&enc->last_motion8);
963 
964  avpriv_elbg_free(&enc->elbg);
965 
966  return 0;
967 }
968 
970 {
971  RoqEncContext *const enc = avctx->priv_data;
972  RoqContext *const roq = &enc->common;
973 
974  av_lfg_init(&enc->randctx, 1);
975 
976  roq->logctx = avctx;
977 
978  enc->framesSinceKeyframe = 0;
979  if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
980  av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
981  return AVERROR(EINVAL);
982  }
983 
984  if (avctx->width > 65535 || avctx->height > 65535) {
985  av_log(avctx, AV_LOG_ERROR, "Dimensions are max %d\n", enc->quake3_compat ? 32768 : 65535);
986  return AVERROR(EINVAL);
987  }
988 
989  if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1)))
990  av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two, this is not supported by quake\n");
991 
992  roq->width = avctx->width;
993  roq->height = avctx->height;
994 
995  enc->framesSinceKeyframe = 0;
996  enc->first_frame = 1;
997 
998  roq->last_frame = av_frame_alloc();
999  roq->current_frame = av_frame_alloc();
1000  if (!roq->last_frame || !roq->current_frame)
1001  return AVERROR(ENOMEM);
1002 
1003  enc->this_motion4 =
1004  av_calloc(roq->width * roq->height / 16, sizeof(*enc->this_motion4));
1005 
1006  enc->last_motion4 =
1007  av_malloc_array (roq->width * roq->height / 16, sizeof(motion_vect));
1008 
1009  enc->this_motion8 =
1010  av_calloc(roq->width * roq->height / 64, sizeof(*enc->this_motion8));
1011 
1012  enc->last_motion8 =
1013  av_malloc_array (roq->width * roq->height / 64, sizeof(motion_vect));
1014 
1015  /* 4x4 codebook needs 6 * 4 * 4 / 4 * width * height / 16 * sizeof(int);
1016  * and so does the points buffer. */
1017  enc->closest_cb =
1018  av_malloc_array(roq->width * roq->height, 3 * sizeof(int));
1019 
1020  if (!enc->this_motion4 || !enc->last_motion4 ||
1021  !enc->this_motion8 || !enc->last_motion8 || !enc->closest_cb)
1022  return AVERROR(ENOMEM);
1023 
1024  enc->points = enc->closest_cb + roq->width * roq->height * 3 / 2;
1025 
1026  return create_cel_evals(enc);
1027 }
1028 
1030 {
1031  /* ROQ info chunk */
1032  bytestream_put_le16(&enc->out_buf, RoQ_INFO);
1033 
1034  /* Size: 8 bytes */
1035  bytestream_put_le32(&enc->out_buf, 8);
1036 
1037  /* Unused argument */
1038  bytestream_put_byte(&enc->out_buf, 0x00);
1039  bytestream_put_byte(&enc->out_buf, 0x00);
1040 
1041  /* Width */
1042  bytestream_put_le16(&enc->out_buf, enc->common.width);
1043 
1044  /* Height */
1045  bytestream_put_le16(&enc->out_buf, enc->common.height);
1046 
1047  /* Unused in Quake 3, mimics the output of the real encoder */
1048  bytestream_put_byte(&enc->out_buf, 0x08);
1049  bytestream_put_byte(&enc->out_buf, 0x00);
1050  bytestream_put_byte(&enc->out_buf, 0x04);
1051  bytestream_put_byte(&enc->out_buf, 0x00);
1052 }
1053 
1055  const AVFrame *frame, int *got_packet)
1056 {
1057  RoqEncContext *const enc = avctx->priv_data;
1058  RoqContext *const roq = &enc->common;
1059  int size, ret;
1060 
1061  enc->frame_to_enc = frame;
1062 
1063  if (frame->quality)
1064  enc->lambda = frame->quality - 1;
1065  else
1066  enc->lambda = 2*ROQ_LAMBDA_SCALE;
1067 
1068  /* 138 bits max per 8x8 block +
1069  * 256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
1070  size = ((roq->width * roq->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
1071  if ((ret = ff_alloc_packet(avctx, pkt, size)) < 0)
1072  return ret;
1073  enc->out_buf = pkt->data;
1074 
1075  /* Check for I-frame */
1076  if (enc->framesSinceKeyframe == avctx->gop_size)
1077  enc->framesSinceKeyframe = 0;
1078 
1079  if (enc->first_frame) {
1080  /* Alloc memory for the reconstruction data (we must know the stride
1081  for that) */
1082  if ((ret = ff_encode_alloc_frame(avctx, roq->current_frame)) < 0 ||
1083  (ret = ff_encode_alloc_frame(avctx, roq->last_frame )) < 0)
1084  return ret;
1085 
1086  /* Before the first video frame, write a "video info" chunk */
1088 
1089  enc->first_frame = 0;
1090  }
1091 
1092  /* Encode the actual frame */
1093  ret = roq_encode_video(enc);
1094  if (ret < 0)
1095  return ret;
1096 
1097  pkt->size = enc->out_buf - pkt->data;
1098  if (enc->framesSinceKeyframe == 1)
1100  *got_packet = 1;
1101 
1102  return 0;
1103 }
1104 
1105 #define OFFSET(x) offsetof(RoqEncContext, x)
1106 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1107 static const AVOption options[] = {
1108  { "quake3_compat", "Whether to respect known limitations in Quake 3 decoder", OFFSET(quake3_compat), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
1109  { NULL },
1110 };
1111 
1112 static const AVClass roq_class = {
1113  .class_name = "RoQ",
1114  .item_name = av_default_item_name,
1115  .option = options,
1116  .version = LIBAVUTIL_VERSION_INT,
1117 };
1118 
1120  .p.name = "roqvideo",
1121  CODEC_LONG_NAME("id RoQ video"),
1122  .p.type = AVMEDIA_TYPE_VIDEO,
1123  .p.id = AV_CODEC_ID_ROQ,
1125  .priv_data_size = sizeof(RoqEncContext),
1126  .init = roq_encode_init,
1128  .close = roq_encode_end,
1129  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ444P,
1130  AV_PIX_FMT_NONE },
1131  .p.priv_class = &roq_class,
1132  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1133 };
RoqCodebooks
Definition: roqvideoenc.c:83
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
RoqContext::cb4x4
roq_qcell cb4x4[256]
Definition: roqvideo.h:48
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
roqvideo.h
RoQ_ID_SLD
#define RoQ_ID_SLD
Definition: roqvideo.h:59
RoqCodebooks::usedCB4
int usedCB4[MAX_CBS_4x4]
Definition: roqvideoenc.c:87
av_lfg_init
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
Definition: lfg.c:32
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:242
RoqContext::current_frame
AVFrame * current_frame
Definition: roqvideo.h:44
CodingSpool::argumentSpool
uint8_t argumentSpool[64]
Definition: roqvideoenc.c:642
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
VE
#define VE
Definition: roqvideoenc.c:1106
RoqCodebooks::unpacked_cb4
uint8_t unpacked_cb4[MAX_CBS_4x4 *4 *4 *3]
Definition: roqvideoenc.c:89
write_typecode
static void write_typecode(CodingSpool *s, uint8_t type)
Definition: roqvideoenc.c:648
SubcelEvaluation
Definition: roqvideoenc.c:113
RoqCodebooks::unpacked_cb2
uint8_t unpacked_cb2[MAX_CBS_2x2 *2 *2 *3]
Definition: roqvideoenc.c:88
RoqEncContext::tmp_codebook_buf
int tmp_codebook_buf[FFMAX(24 *MAX_CBS_4x4, 6 *MAX_CBS_2x2)]
Definition: roqvideoenc.c:153
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
RoqContext
Definition: roqvideo.h:40
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:524
ROQ_LAMBDA_SCALE
#define ROQ_LAMBDA_SCALE
Definition: roqvideoenc.c:81
roq_cell::y
unsigned char y[4]
Definition: roqvideo.h:28
RoqTempData::f2i4
int f2i4[MAX_CBS_4x4]
Definition: roqvideoenc.c:98
RoqCodebooks::unpacked_cb4_enlarged
uint8_t unpacked_cb4_enlarged[MAX_CBS_4x4 *8 *8 *3]
Definition: roqvideoenc.c:90
AVOption
AVOption.
Definition: opt.h:346
encode.h
b
#define b
Definition: input.c:41
RoQ_ID_FCC
#define RoQ_ID_FCC
Definition: roqvideo.h:58
codebooks
static const uint8_t codebooks[]
Definition: vorbis_enc_data.h:26
data
const char data[16]
Definition: mxf.c:148
unpack_roq_qcell
static void unpack_roq_qcell(uint8_t cb2[], roq_qcell *qcell, uint8_t u[4 *4 *3])
Definition: roqvideoenc.c:171
CelEvaluation::motion
motion_vect motion
Definition: roqvideoenc.c:129
FFCodec
Definition: codec_internal.h:126
ff_apply_motion_8x8
void ff_apply_motion_8x8(RoqContext *ri, int x, int y, int deltax, int deltay)
Definition: roqvideo.c:140
roq_cell
Definition: roqvideo.h:27
base
uint8_t base
Definition: vp3data.h:128
unpack_roq_cell
static void unpack_roq_cell(roq_cell *cell, uint8_t u[4 *3])
Definition: roqvideoenc.c:164
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
motion_search
static void motion_search(RoqEncContext *enc, int blocksize)
Definition: roqvideoenc.c:339
RoqTempData::mainChunkSize
int mainChunkSize
Definition: roqvideoenc.c:103
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:579
SubcelEvaluation::subCels
int subCels[4]
Definition: roqvideoenc.c:118
RoqEncContext::last_motion8
motion_vect * last_motion8
Definition: roqvideoenc.c:145
RoqContext::height
int height
Definition: roqvideo.h:45
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
write_codebooks
static void write_codebooks(RoqEncContext *enc)
Write codebook chunk.
Definition: roqvideoenc.c:606
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
RoqTempData::i2f4
int i2f4[MAX_CBS_4x4]
Definition: roqvideoenc.c:99
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
RoqEncContext::elbg
struct ELBGContext * elbg
Definition: roqvideoenc.c:137
RoQ_ID_CCC
#define RoQ_ID_CCC
Definition: roqvideo.h:60
SubcelEvaluation::eval_dist
int eval_dist[4]
Definition: roqvideoenc.c:114
CelEvaluation::best_coding
int best_coding
Definition: roqvideoenc.c:125
CodingSpool::typeSpool
int typeSpool
Definition: roqvideoenc.c:640
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:295
RoqEncContext::points
int * points
Definition: roqvideoenc.c:157
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
avpriv_elbg_do
int avpriv_elbg_do(ELBGContext **elbgp, int *points, int dim, int numpoints, int *codebook, int num_cb, int max_steps, int *closest_cb, AVLFG *rand_state, uintptr_t flags)
Implementation of the Enhanced LBG Algorithm Based on the paper "Neural Networks 14:1219-1237" that c...
Definition: elbg.c:463
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_apply_vector_2x2
void ff_apply_vector_2x2(RoqContext *ri, int x, int y, roq_cell *cell)
Definition: roqvideo.c:42
RoqEncContext::framesSinceKeyframe
unsigned int framesSinceKeyframe
Definition: roqvideoenc.c:147
s
#define s(width, name)
Definition: cbs_vp9.c:198
RoqEncContext::this_motion4
motion_vect * this_motion4
Definition: roqvideoenc.c:141
motion_arg
static uint8_t motion_arg(motion_vect mot)
Definition: roqvideoenc.c:632
RoQ_QUAD_CODEBOOK
#define RoQ_QUAD_CODEBOOK
Definition: roqvideo.h:52
RoqCodebooks::numCB2
int numCB2
Definition: roqvideoenc.c:85
offsets
static const int offsets[]
Definition: hevc_pel.c:34
roq_encode_video
static int roq_encode_video(RoqEncContext *enc)
Definition: roqvideoenc.c:891
RoQ_ID_MOT
#define RoQ_ID_MOT
Definition: roqvideo.h:57
motion_vect
Definition: roqvideo.h:36
SubcelEvaluation::cbEntry
int cbEntry
Definition: roqvideoenc.c:120
lfg.h
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
CelEvaluation::cbEntry
int cbEntry
Definition: roqvideoenc.c:130
RoqEncContext::frame_to_enc
const AVFrame * frame_to_enc
Definition: roqvideoenc.c:149
RoqEncContext
Definition: roqvideoenc.c:135
CodingSpool::args
uint8_t * args
Definition: roqvideoenc.c:643
RoqEncContext::closest_cb
int * closest_cb
Definition: roqvideoenc.c:156
roq_cell::v
unsigned char v
Definition: roqvideo.h:29
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
gather_data_for_subcel
static void gather_data_for_subcel(SubcelEvaluation *subcel, int x, int y, RoqEncContext *enc)
Get distortion for all options available to a subcel.
Definition: roqvideoenc.c:431
if
if(ret)
Definition: filter_design.txt:179
elbg.h
SubcelEvaluation::best_coding
int best_coding
Definition: roqvideoenc.c:116
generate_codebook
static int generate_codebook(RoqEncContext *enc, int *points, int inputCount, roq_cell *results, int size, int cbsize)
Definition: roqvideoenc.c:817
RoqTempData
Temporary vars.
Definition: roqvideoenc.c:96
CelEvaluation::subCels
SubcelEvaluation subCels[4]
Definition: roqvideoenc.c:127
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
RoqTempData::used_option
int used_option[4]
Definition: roqvideoenc.c:110
index_mb
static int index_mb(uint8_t cluster[], uint8_t cb[], int numCB, int *outIndex, int dim)
Find the codebook with the lowest distortion from an image.
Definition: roqvideoenc.c:311
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
enlarge_roq_mb4
static void enlarge_roq_mb4(uint8_t base[3 *16], uint8_t u[3 *64])
Definition: roqvideoenc.c:186
RoqTempData::codebooks
RoqCodebooks codebooks
Definition: roqvideoenc.c:108
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
reconstruct_and_encode_image
static void reconstruct_and_encode_image(RoqEncContext *enc, int w, int h, int numBlocks)
Definition: roqvideoenc.c:662
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
mathops.h
RoqEncContext::cel_evals
CelEvaluation * cel_evals
Definition: roqvideoenc.c:155
RoqEncContext::tmp_data
RoqTempData tmp_data
Definition: roqvideoenc.c:151
gather_data_for_cel
static void gather_data_for_cel(CelEvaluation *cel, RoqEncContext *enc)
Get distortion for all options available to a cel.
Definition: roqvideoenc.c:497
RoqTempData::i2f2
int i2f2[MAX_CBS_2x2]
Definition: roqvideoenc.c:101
squared_diff_macroblock
static int squared_diff_macroblock(uint8_t a[], uint8_t b[], int size)
Definition: roqvideoenc.c:257
index
int index
Definition: gxfenc.c:90
roq_qcell
Definition: roqvideo.h:32
CelEvaluation::sourceX
int sourceX
Definition: roqvideoenc.c:132
CelEvaluation::sourceY
int sourceY
Definition: roqvideoenc.c:132
RoqEncContext::common
RoqContext common
Definition: roqvideoenc.c:136
CelEvaluation::eval_dist
int eval_dist[4]
Definition: roqvideoenc.c:124
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
RoqCodebooks::numCB4
int numCB4
Definition: roqvideoenc.c:84
AVLFG
Context structure for the Lagged Fibonacci PRNG.
Definition: lfg.h:33
roq_encode_init
static av_cold int roq_encode_init(AVCodecContext *avctx)
Definition: roqvideoenc.c:969
AV_CODEC_ID_ROQ
@ AV_CODEC_ID_ROQ
Definition: codec_id.h:90
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1031
MAX_CBS_2x2
#define MAX_CBS_2x2
Maximum number of 2x2 codebooks.
Definition: roqvideoenc.c:78
frame_block_to_cell
static void frame_block_to_cell(int *block, uint8_t *const *data, int top, int left, const int *stride)
Create a single YUV cell from a 2x2 section of the image.
Definition: roqvideoenc.c:782
codec_internal.h
SubcelEvaluation::motion
motion_vect motion
Definition: roqvideoenc.c:119
CodingSpool::pout
uint8_t ** pout
Definition: roqvideoenc.c:644
ff_roq_encoder
const FFCodec ff_roq_encoder
Definition: roqvideoenc.c:1119
get_frame_mb
static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
Get macroblocks from parts of the image.
Definition: roqvideoenc.c:296
size
int size
Definition: twinvq_data.h:10344
EVAL_MOTION
#define EVAL_MOTION(MOTION)
Definition: roqvideoenc.c:329
avpriv_elbg_free
av_cold void avpriv_elbg_free(ELBGContext **elbgp)
Free an ELBGContext and reset the pointer to it.
Definition: elbg.c:516
CHROMA_BIAS
#define CHROMA_BIAS
Definition: roqvideoenc.c:70
ff_apply_vector_4x4
void ff_apply_vector_4x4(RoqContext *ri, int x, int y, roq_cell *cell)
Definition: roqvideo.c:72
RoqEncContext::last_motion4
motion_vect * last_motion4
Definition: roqvideoenc.c:142
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
RoQ_INFO
#define RoQ_INFO
Definition: roqvideo.h:51
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
RoqEncContext::this_motion8
motion_vect * this_motion8
Definition: roqvideoenc.c:144
roq_encode_end
static av_cold int roq_encode_end(AVCodecContext *avctx)
Definition: roqvideoenc.c:950
roq_write_video_info_chunk
static void roq_write_video_info_chunk(RoqEncContext *enc)
Definition: roqvideoenc.c:1029
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:530
mb
#define mb
Definition: vf_colormatrix.c:99
options
static const AVOption options[]
Definition: roqvideoenc.c:1107
generate_new_codebooks
static int generate_new_codebooks(RoqEncContext *enc)
Definition: roqvideoenc.c:845
roq_qcell::idx
int idx[4]
Definition: roqvideo.h:33
create_clusters
static void create_clusters(const AVFrame *frame, int w, int h, int *points)
Create YUV clusters for the entire image.
Definition: roqvideoenc.c:803
bytestream_put_buffer
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
Definition: bytestream.h:372
eval_motion_dist
static int eval_motion_dist(RoqEncContext *enc, int x, int y, motion_vect vect, int size)
Definition: roqvideoenc.c:229
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
RoqCodebooks::usedCB2
int usedCB2[MAX_CBS_2x2]
Definition: roqvideoenc.c:86
block_sse
static int block_sse(uint8_t *const *buf1, uint8_t *const *buf2, int x1, int y1, int x2, int y2, const int *stride1, const int *stride2, int size)
Definition: roqvideoenc.c:213
ELBGContext
ELBG internal data.
Definition: elbg.c:47
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:590
RoqContext::last_frame
AVFrame * last_frame
Definition: roqvideo.h:43
CodingSpool::typeSpoolLength
int typeSpoolLength
Definition: roqvideoenc.c:641
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::height
int height
Definition: avcodec.h:618
motion_vect::d
int d[2]
Definition: roqvideo.h:37
MAX_CBS_4x4
#define MAX_CBS_4x4
Maximum number of generated 4x4 codebooks.
Definition: roqvideoenc.c:76
RoqEncContext::out_buf
uint8_t * out_buf
Definition: roqvideoenc.c:150
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
stride
#define stride
Definition: h264pred_template.c:537
RoQ_QUAD_VQ
#define RoQ_QUAD_VQ
Definition: roqvideo.h:53
roq_encode_frame
static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: roqvideoenc.c:1054
dim
int dim
Definition: vorbis_enc_data.h:425
mid_pred
#define mid_pred
Definition: mathops.h:98
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
square
static int square(int x)
Definition: roqvideoenc.c:196
remap_codebooks
static void remap_codebooks(RoqEncContext *enc)
Definition: roqvideoenc.c:572
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
create_cel_evals
static int create_cel_evals(RoqEncContext *enc)
Initialize cel evaluators and set their source coordinates.
Definition: roqvideoenc.c:274
ff_apply_motion_4x4
void ff_apply_motion_4x4(RoqContext *ri, int x, int y, int deltax, int deltay)
Definition: roqvideo.c:134
CodingSpool
Definition: roqvideoenc.c:639
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
RoqEncContext::results4
roq_cell results4[4 *MAX_CBS_4x4]
Definition: roqvideoenc.c:152
mem.h
OFFSET
#define OFFSET(x)
Definition: roqvideoenc.c:1105
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
roq_cell::u
unsigned char u
Definition: roqvideo.h:29
CelEvaluation
Definition: roqvideoenc.c:123
eval_sse
static int eval_sse(const uint8_t *a, const uint8_t *b, int count)
Definition: roqvideoenc.c:201
d
d
Definition: ffmpeg_filter.c:424
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
RoqEncContext::randctx
AVLFG randctx
Definition: roqvideoenc.c:138
roq_class
static const AVClass roq_class
Definition: roqvideoenc.c:1112
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2636
RoqTempData::numCB4
int numCB4
Definition: roqvideoenc.c:105
RoqTempData::numCB2
int numCB2
Definition: roqvideoenc.c:106
h
h
Definition: vp9dsp_template.c:2038
RoqEncContext::lambda
uint64_t lambda
Definition: roqvideoenc.c:139
RoqEncContext::quake3_compat
int quake3_compat
Definition: roqvideoenc.c:160
SubcelEvaluation::best_bit_use
int best_bit_use
Definition: roqvideoenc.c:115
RoqEncContext::first_frame
int first_frame
Definition: roqvideoenc.c:159
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
RoqContext::logctx
void * logctx
Definition: roqvideo.h:42
RoqContext::width
int width
Definition: roqvideo.h:45
RoqTempData::f2i2
int f2i2[MAX_CBS_2x2]
Definition: roqvideoenc.c:100
codebook
static const unsigned codebook[256][2]
Definition: cfhdenc.c:41
RoqContext::cb2x2
roq_cell cb2x2[256]
Definition: roqvideo.h:47