FFmpeg  4.4
mss4.c
Go to the documentation of this file.
1 /*
2  * Microsoft Screen 4 (aka Microsoft Expression Encoder Screen) decoder
3  * Copyright (c) 2012 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Microsoft Screen 4 (aka Microsoft Titanium Screen 2,
25  * aka Microsoft Expression Encoder Screen) decoder
26  */
27 
28 #include "libavutil/thread.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "get_bits.h"
33 #include "internal.h"
34 #include "jpegtables.h"
35 #include "mss34dsp.h"
36 #include "unary.h"
37 
38 #define HEADER_SIZE 8
39 
40 enum FrameType {
44 };
45 
46 enum BlockType {
50 };
51 
52 enum CachePos {
53  LEFT = 0,
55  TOP,
56 };
57 
58 static const uint8_t mss4_dc_vlc_lens[2][16] = {
59  { 0, 1, 5, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0 },
60  { 0, 3, 1, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0 }
61 };
62 
63 static const uint8_t vec_len_syms[2][4] = {
64  { 4, 2, 3, 1 },
65  { 4, 1, 2, 3 }
66 };
67 
68 static const uint8_t mss4_vec_entry_vlc_lens[2][16] = {
69  { 0, 2, 2, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
70  { 0, 1, 5, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
71 };
72 
73 static const uint8_t mss4_vec_entry_vlc_syms[2][9] = {
74  { 0, 7, 6, 5, 8, 4, 3, 1, 2 },
75  { 0, 2, 3, 4, 5, 6, 7, 1, 8 }
76 };
77 
78 #define MAX_ENTRIES 162
79 
80 typedef struct MSS4Context {
82 
83  int block[64];
84  uint8_t imgbuf[3][16 * 16];
85 
86  int quality;
87  uint16_t quant_mat[2][64];
88 
89  int *prev_dc[3];
90  ptrdiff_t dc_stride[3];
91  int dc_cache[4][4];
92 
93  int prev_vec[3][4];
94 } MSS4Context;
95 
96 static VLC dc_vlc[2], ac_vlc[2];
97 static VLC vec_entry_vlc[2];
98 
99 static av_cold void mss4_init_vlc(VLC *vlc, unsigned *offset,
100  const uint8_t *lens, const uint8_t *syms)
101 {
102  static VLC_TYPE vlc_buf[2146][2];
104  int i, j;
105  int idx = 0;
106 
107  for (i = 0; i < 16; i++) {
108  for (j = 0; j < lens[i]; j++) {
109  bits[idx] = i + 1;
110  idx++;
111  }
112  }
113 
114  vlc->table = &vlc_buf[*offset];
116  ff_init_vlc_from_lengths(vlc, FFMIN(bits[idx - 1], 9), idx,
117  bits, 1, syms, 1, 1,
119  *offset += vlc->table_size;
120 }
121 
122 static av_cold void mss4_init_vlcs(void)
123 {
124  for (unsigned i = 0, offset = 0; i < 2; i++) {
133  }
134 }
135 
136 /* This function returns values in the range
137  * (-range + 1; -range/2] U [range/2; range - 1)
138  * i.e.
139  * nbits = 0 -> 0
140  * nbits = 1 -> -1, 1
141  * nbits = 2 -> -3, -2, 2, 3
142  */
144 {
145  int val;
146 
147  if (!nbits)
148  return 0;
149 
150  val = get_bits(gb, nbits);
151  if (val < (1 << (nbits - 1)))
152  val -= (1 << nbits) - 1;
153 
154  return val;
155 }
156 
157 static inline int get_coeff(GetBitContext *gb, VLC *vlc)
158 {
159  int val = get_vlc2(gb, vlc->table, vlc->bits, 2);
160 
161  return get_coeff_bits(gb, val);
162 }
163 
165  int *block, int *dc_cache,
166  int bx, int by, uint16_t *quant_mat)
167 {
168  int skip, val, pos = 1, zz_pos, dc;
169 
170  memset(block, 0, sizeof(*block) * 64);
171 
172  dc = get_coeff(gb, dc_vlc);
173  // DC prediction is the same as in MSS3
174  if (by) {
175  if (bx) {
176  int l, tl, t;
177 
178  l = dc_cache[LEFT];
179  tl = dc_cache[TOP_LEFT];
180  t = dc_cache[TOP];
181 
182  if (FFABS(t - tl) <= FFABS(l - tl))
183  dc += l;
184  else
185  dc += t;
186  } else {
187  dc += dc_cache[TOP];
188  }
189  } else if (bx) {
190  dc += dc_cache[LEFT];
191  }
192  dc_cache[LEFT] = dc;
193  block[0] = dc * quant_mat[0];
194 
195  while (pos < 64) {
196  val = get_vlc2(gb, ac_vlc->table, 9, 2);
197  if (!val)
198  return 0;
199  if (val == -1)
200  return -1;
201  if (val == 0xF0) {
202  pos += 16;
203  continue;
204  }
205  skip = val >> 4;
206  val = get_coeff_bits(gb, val & 0xF);
207  pos += skip;
208  if (pos >= 64)
209  return -1;
210 
211  zz_pos = ff_zigzag_direct[pos];
212  block[zz_pos] = val * quant_mat[zz_pos];
213  pos++;
214  }
215 
216  return pos == 64 ? 0 : -1;
217 }
218 
220  uint8_t *dst[3], int mb_x, int mb_y)
221 {
222  int i, j, k, ret;
223  uint8_t *out = dst[0];
224 
225  for (j = 0; j < 2; j++) {
226  for (i = 0; i < 2; i++) {
227  int xpos = mb_x * 2 + i;
228  c->dc_cache[j][TOP_LEFT] = c->dc_cache[j][TOP];
229  c->dc_cache[j][TOP] = c->prev_dc[0][mb_x * 2 + i];
230  ret = mss4_decode_dct(gb, &dc_vlc[0], &ac_vlc[0], c->block,
231  c->dc_cache[j],
232  xpos, mb_y * 2 + j, c->quant_mat[0]);
233  if (ret)
234  return ret;
235  c->prev_dc[0][mb_x * 2 + i] = c->dc_cache[j][LEFT];
236 
237  ff_mss34_dct_put(out + xpos * 8, c->pic->linesize[0],
238  c->block);
239  }
240  out += 8 * c->pic->linesize[0];
241  }
242 
243  for (i = 1; i < 3; i++) {
244  c->dc_cache[i + 1][TOP_LEFT] = c->dc_cache[i + 1][TOP];
245  c->dc_cache[i + 1][TOP] = c->prev_dc[i][mb_x];
246  ret = mss4_decode_dct(gb, &dc_vlc[1], &ac_vlc[1],
247  c->block, c->dc_cache[i + 1], mb_x, mb_y,
248  c->quant_mat[1]);
249  if (ret)
250  return ret;
251  c->prev_dc[i][mb_x] = c->dc_cache[i + 1][LEFT];
252 
253  ff_mss34_dct_put(c->imgbuf[i], 8, c->block);
254  out = dst[i] + mb_x * 16;
255  // Since the DCT block is coded as YUV420 and the whole frame as YUV444,
256  // we need to scale chroma.
257  for (j = 0; j < 16; j++) {
258  for (k = 0; k < 8; k++)
259  AV_WN16A(out + k * 2, c->imgbuf[i][k + (j & ~1) * 4] * 0x101);
260  out += c->pic->linesize[i];
261  }
262  }
263 
264  return 0;
265 }
266 
267 static void read_vec_pos(GetBitContext *gb, int *vec_pos, int *sel_flag,
268  int *sel_len, int *prev)
269 {
270  int i, y_flag = 0;
271 
272  for (i = 2; i >= 0; i--) {
273  if (!sel_flag[i]) {
274  vec_pos[i] = 0;
275  continue;
276  }
277  if ((!i && !y_flag) || get_bits1(gb)) {
278  if (sel_len[i] > 0) {
279  int pval = prev[i];
280  vec_pos[i] = get_bits(gb, sel_len[i]);
281  if (vec_pos[i] >= pval)
282  vec_pos[i]++;
283  } else {
284  vec_pos[i] = !prev[i];
285  }
286  y_flag = 1;
287  } else {
288  vec_pos[i] = prev[i];
289  }
290  }
291 }
292 
293 static int get_value_cached(GetBitContext *gb, int vec_pos, uint8_t *vec,
294  int vec_size, int component, int shift, int *prev)
295 {
296  if (vec_pos < vec_size)
297  return vec[vec_pos];
298  if (!get_bits1(gb))
299  return prev[component];
300  prev[component] = get_bits(gb, 8 - shift) << shift;
301  return prev[component];
302 }
303 
304 #define MKVAL(vals) ((vals)[0] | ((vals)[1] << 3) | ((vals)[2] << 6))
305 
306 /* Image mode - the hardest to comprehend MSS4 coding mode.
307  *
308  * In this mode all three 16x16 blocks are coded together with a method
309  * remotely similar to the methods employed in MSS1-MSS3.
310  * The idea is that every component has a vector of 1-4 most common symbols
311  * and an escape mode for reading new value from the bitstream. Decoding
312  * consists of retrieving pixel values from the vector or reading new ones
313  * from the bitstream; depending on flags read from the bitstream, these vector
314  * positions can be updated or reused from the state of the previous line
315  * or previous pixel.
316  */
318  uint8_t *picdst[3], int mb_x, int mb_y)
319 {
320  uint8_t vec[3][4];
321  int vec_len[3];
322  int sel_len[3], sel_flag[3];
323  int i, j, k, mode, split;
324  int prev_vec1 = 0, prev_split = 0;
325  int vals[3] = { 0 };
326  int prev_pix[3] = { 0 };
327  int prev_mode[16] = { 0 };
328  uint8_t *dst[3];
329 
330  const int val_shift = ctx->quality == 100 ? 0 : 2;
331 
332  for (i = 0; i < 3; i++)
333  dst[i] = ctx->imgbuf[i];
334 
335  for (i = 0; i < 3; i++) {
336  vec_len[i] = vec_len_syms[!!i][get_unary(gb, 0, 3)];
337  for (j = 0; j < vec_len[i]; j++) {
338  vec[i][j] = get_coeff(gb, &vec_entry_vlc[!!i]);
339  vec[i][j] += ctx->prev_vec[i][j];
340  ctx->prev_vec[i][j] = vec[i][j];
341  }
342  sel_flag[i] = vec_len[i] > 1;
343  sel_len[i] = vec_len[i] > 2 ? vec_len[i] - 2 : 0;
344  }
345 
346  for (j = 0; j < 16; j++) {
347  if (get_bits1(gb)) {
348  split = 0;
349  if (get_bits1(gb)) {
350  prev_mode[0] = 0;
351  vals[0] = vals[1] = vals[2] = 0;
352  mode = 2;
353  } else {
354  mode = get_bits1(gb);
355  if (mode)
356  split = get_bits(gb, 4);
357  }
358  for (i = 0; i < 16; i++) {
359  if (mode <= 1) {
360  vals[0] = prev_mode[i] & 7;
361  vals[1] = (prev_mode[i] >> 3) & 7;
362  vals[2] = prev_mode[i] >> 6;
363  if (mode == 1 && i == split) {
364  read_vec_pos(gb, vals, sel_flag, sel_len, vals);
365  }
366  } else if (mode == 2) {
367  if (get_bits1(gb))
368  read_vec_pos(gb, vals, sel_flag, sel_len, vals);
369  }
370  for (k = 0; k < 3; k++)
371  *dst[k]++ = get_value_cached(gb, vals[k], vec[k],
372  vec_len[k], k,
373  val_shift, prev_pix);
374  prev_mode[i] = MKVAL(vals);
375  }
376  } else {
377  if (get_bits1(gb)) {
378  split = get_bits(gb, 4);
379  if (split >= prev_split)
380  split++;
381  prev_split = split;
382  } else {
383  split = prev_split;
384  }
385  if (split) {
386  vals[0] = prev_mode[0] & 7;
387  vals[1] = (prev_mode[0] >> 3) & 7;
388  vals[2] = prev_mode[0] >> 6;
389  for (i = 0; i < 3; i++) {
390  for (k = 0; k < split; k++) {
391  *dst[i]++ = get_value_cached(gb, vals[i], vec[i],
392  vec_len[i], i, val_shift,
393  prev_pix);
394  prev_mode[k] = MKVAL(vals);
395  }
396  }
397  }
398 
399  if (split != 16) {
400  vals[0] = prev_vec1 & 7;
401  vals[1] = (prev_vec1 >> 3) & 7;
402  vals[2] = prev_vec1 >> 6;
403  if (get_bits1(gb)) {
404  read_vec_pos(gb, vals, sel_flag, sel_len, vals);
405  prev_vec1 = MKVAL(vals);
406  }
407  for (i = 0; i < 3; i++) {
408  for (k = 0; k < 16 - split; k++) {
409  *dst[i]++ = get_value_cached(gb, vals[i], vec[i],
410  vec_len[i], i, val_shift,
411  prev_pix);
412  prev_mode[split + k] = MKVAL(vals);
413  }
414  }
415  }
416  }
417  }
418 
419  for (i = 0; i < 3; i++)
420  for (j = 0; j < 16; j++)
421  memcpy(picdst[i] + mb_x * 16 + j * ctx->pic->linesize[i],
422  ctx->imgbuf[i] + j * 16, 16);
423 
424  return 0;
425 }
426 
427 static inline void mss4_update_dc_cache(MSS4Context *c, int mb_x)
428 {
429  int i;
430 
431  c->dc_cache[0][TOP] = c->prev_dc[0][mb_x * 2 + 1];
432  c->dc_cache[0][LEFT] = 0;
433  c->dc_cache[1][TOP] = 0;
434  c->dc_cache[1][LEFT] = 0;
435 
436  for (i = 0; i < 2; i++)
437  c->prev_dc[0][mb_x * 2 + i] = 0;
438 
439  for (i = 1; i < 3; i++) {
440  c->dc_cache[i + 1][TOP] = c->prev_dc[i][mb_x];
441  c->dc_cache[i + 1][LEFT] = 0;
442  c->prev_dc[i][mb_x] = 0;
443  }
444 }
445 
446 static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
447  AVPacket *avpkt)
448 {
449  const uint8_t *buf = avpkt->data;
450  int buf_size = avpkt->size;
451  MSS4Context *c = avctx->priv_data;
452  GetBitContext gb;
453  GetByteContext bc;
454  uint8_t *dst[3];
455  int width, height, quality, frame_type;
456  int x, y, i, mb_width, mb_height, blk_type;
457  int ret;
458 
459  if (buf_size < HEADER_SIZE) {
460  av_log(avctx, AV_LOG_ERROR,
461  "Frame should have at least %d bytes, got %d instead\n",
462  HEADER_SIZE, buf_size);
463  return AVERROR_INVALIDDATA;
464  }
465 
466  bytestream2_init(&bc, buf, buf_size);
467  width = bytestream2_get_be16(&bc);
468  height = bytestream2_get_be16(&bc);
469  bytestream2_skip(&bc, 2);
470  quality = bytestream2_get_byte(&bc);
471  frame_type = bytestream2_get_byte(&bc);
472 
473  if (width > avctx->width ||
474  height != avctx->height) {
475  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d\n",
476  width, height);
477  return AVERROR_INVALIDDATA;
478  }
479  if (quality < 1 || quality > 100) {
480  av_log(avctx, AV_LOG_ERROR, "Invalid quality setting %d\n", quality);
481  return AVERROR_INVALIDDATA;
482  }
483  if ((frame_type & ~3) || frame_type == 3) {
484  av_log(avctx, AV_LOG_ERROR, "Invalid frame type %d\n", frame_type);
485  return AVERROR_INVALIDDATA;
486  }
487 
489  av_log(avctx, AV_LOG_ERROR,
490  "Empty frame found but it is not a skip frame.\n");
491  return AVERROR_INVALIDDATA;
492  }
493  mb_width = FFALIGN(width, 16) >> 4;
494  mb_height = FFALIGN(height, 16) >> 4;
495 
496  if (frame_type != SKIP_FRAME && 8*buf_size < 8*HEADER_SIZE + mb_width*mb_height)
497  return AVERROR_INVALIDDATA;
498 
499  if ((ret = ff_reget_buffer(avctx, c->pic, 0)) < 0)
500  return ret;
501  c->pic->key_frame = (frame_type == INTRA_FRAME);
502  c->pic->pict_type = (frame_type == INTRA_FRAME) ? AV_PICTURE_TYPE_I
504  if (frame_type == SKIP_FRAME) {
505  *got_frame = 1;
506  if ((ret = av_frame_ref(data, c->pic)) < 0)
507  return ret;
508 
509  return buf_size;
510  }
511 
512  if (c->quality != quality) {
513  c->quality = quality;
514  for (i = 0; i < 2; i++)
515  ff_mss34_gen_quant_mat(c->quant_mat[i], quality, !i);
516  }
517 
518  if ((ret = init_get_bits8(&gb, buf + HEADER_SIZE, buf_size - HEADER_SIZE)) < 0)
519  return ret;
520  dst[0] = c->pic->data[0];
521  dst[1] = c->pic->data[1];
522  dst[2] = c->pic->data[2];
523 
524  memset(c->prev_vec, 0, sizeof(c->prev_vec));
525  for (y = 0; y < mb_height; y++) {
526  memset(c->dc_cache, 0, sizeof(c->dc_cache));
527  for (x = 0; x < mb_width; x++) {
528  blk_type = decode012(&gb);
529  switch (blk_type) {
530  case DCT_BLOCK:
531  if (mss4_decode_dct_block(c, &gb, dst, x, y) < 0) {
532  av_log(avctx, AV_LOG_ERROR,
533  "Error decoding DCT block %d,%d\n",
534  x, y);
535  return AVERROR_INVALIDDATA;
536  }
537  break;
538  case IMAGE_BLOCK:
539  if (mss4_decode_image_block(c, &gb, dst, x, y) < 0) {
540  av_log(avctx, AV_LOG_ERROR,
541  "Error decoding VQ block %d,%d\n",
542  x, y);
543  return AVERROR_INVALIDDATA;
544  }
545  break;
546  case SKIP_BLOCK:
547  if (frame_type == INTRA_FRAME) {
548  av_log(avctx, AV_LOG_ERROR, "Skip block in intra frame\n");
549  return AVERROR_INVALIDDATA;
550  }
551  break;
552  }
553  if (blk_type != DCT_BLOCK)
555  }
556  dst[0] += c->pic->linesize[0] * 16;
557  dst[1] += c->pic->linesize[1] * 16;
558  dst[2] += c->pic->linesize[2] * 16;
559  }
560 
561  if ((ret = av_frame_ref(data, c->pic)) < 0)
562  return ret;
563 
564  *got_frame = 1;
565 
566  return buf_size;
567 }
568 
570 {
571  MSS4Context * const c = avctx->priv_data;
572  int i;
573 
574  av_frame_free(&c->pic);
575  for (i = 0; i < 3; i++)
576  av_freep(&c->prev_dc[i]);
577 
578  return 0;
579 }
580 
582 {
583  static AVOnce init_static_once = AV_ONCE_INIT;
584  MSS4Context * const c = avctx->priv_data;
585  int i;
586 
587  for (i = 0; i < 3; i++) {
588  c->dc_stride[i] = FFALIGN(avctx->width, 16) >> (2 + !!i);
589  c->prev_dc[i] = av_malloc_array(c->dc_stride[i], sizeof(**c->prev_dc));
590  if (!c->prev_dc[i]) {
591  av_log(avctx, AV_LOG_ERROR, "Cannot allocate buffer\n");
592  return AVERROR(ENOMEM);
593  }
594  }
595 
596  c->pic = av_frame_alloc();
597  if (!c->pic)
598  return AVERROR(ENOMEM);
599 
600  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
601 
602  ff_thread_once(&init_static_once, mss4_init_vlcs);
603 
604  return 0;
605 }
606 
608  .name = "mts2",
609  .long_name = NULL_IF_CONFIG_SMALL("MS Expression Encoder Screen"),
610  .type = AVMEDIA_TYPE_VIDEO,
611  .id = AV_CODEC_ID_MTS2,
612  .priv_data_size = sizeof(MSS4Context),
614  .close = mss4_decode_end,
616  .capabilities = AV_CODEC_CAP_DR1,
618 };
static double val(void *priv, double ch)
Definition: aeval.c:76
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
#define av_always_inline
Definition: attributes.h:45
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
uint8_t
Libavcodec external API header.
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
int ff_init_vlc_from_lengths(VLC *vlc_arg, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: bitstream.c:381
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
static VLC_TYPE vlc_buf[16716][2]
Definition: clearvideo.c:86
#define FFMIN(a, b)
Definition: common.h:105
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define NULL
Definition: coverity.c:32
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:2000
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
FrameType
G723.1 frame types.
Definition: g723_1.h:63
bitstream reader API header.
static int decode012(GetBitContext *gb)
Definition: get_bits.h:831
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
@ AV_CODEC_ID_MTS2
Definition: codec_id.h:214
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define AV_WN16A(p, v)
Definition: intreadwrite.h:534
frame_type
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define AVOnce
Definition: thread.h:172
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
#define AV_ONCE_INIT
Definition: thread.h:173
#define FFALIGN(x, a)
Definition: macros.h:48
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
void ff_mss34_dct_put(uint8_t *dst, ptrdiff_t stride, int *block)
Transform and output DCT block.
Definition: mss34dsp.c:90
void ff_mss34_gen_quant_mat(uint16_t *qmat, int quality, int luma)
Generate quantisation matrix for given quality.
Definition: mss34dsp.c:48
BlockType
Definition: mss3.c:68
static int mss4_decode_dct_block(MSS4Context *c, GetBitContext *gb, uint8_t *dst[3], int mb_x, int mb_y)
Definition: mss4.c:219
static const uint8_t vec_len_syms[2][4]
Definition: mss4.c:63
static const uint8_t mss4_vec_entry_vlc_lens[2][16]
Definition: mss4.c:68
static void mss4_update_dc_cache(MSS4Context *c, int mb_x)
Definition: mss4.c:427
#define HEADER_SIZE
Definition: mss4.c:38
static void read_vec_pos(GetBitContext *gb, int *vec_pos, int *sel_flag, int *sel_len, int *prev)
Definition: mss4.c:267
@ DCT_BLOCK
Definition: mss4.c:48
@ SKIP_BLOCK
Definition: mss4.c:47
@ IMAGE_BLOCK
Definition: mss4.c:49
static av_always_inline int get_coeff_bits(GetBitContext *gb, int nbits)
Definition: mss4.c:143
static int get_coeff(GetBitContext *gb, VLC *vlc)
Definition: mss4.c:157
static av_cold void mss4_init_vlcs(void)
Definition: mss4.c:122
static const uint8_t mss4_vec_entry_vlc_syms[2][9]
Definition: mss4.c:73
static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mss4.c:446
static const uint8_t mss4_dc_vlc_lens[2][16]
Definition: mss4.c:58
#define MKVAL(vals)
Definition: mss4.c:304
static VLC ac_vlc[2]
Definition: mss4.c:96
static int mss4_decode_dct(GetBitContext *gb, VLC *dc_vlc, VLC *ac_vlc, int *block, int *dc_cache, int bx, int by, uint16_t *quant_mat)
Definition: mss4.c:164
static VLC vec_entry_vlc[2]
Definition: mss4.c:97
static av_cold int mss4_decode_init(AVCodecContext *avctx)
Definition: mss4.c:581
static int get_value_cached(GetBitContext *gb, int vec_pos, uint8_t *vec, int vec_size, int component, int shift, int *prev)
Definition: mss4.c:293
#define MAX_ENTRIES
Definition: mss4.c:78
@ INTER_FRAME
Definition: mss4.c:42
@ SKIP_FRAME
Definition: mss4.c:43
@ INTRA_FRAME
Definition: mss4.c:41
AVCodec ff_mts2_decoder
Definition: mss4.c:607
static av_cold int mss4_decode_end(AVCodecContext *avctx)
Definition: mss4.c:569
static VLC dc_vlc[2]
Definition: mss4.c:96
CachePos
Definition: mss4.c:52
@ TOP
Definition: mss4.c:55
@ TOP_LEFT
Definition: mss4.c:54
@ LEFT
Definition: mss4.c:53
static av_cold void mss4_init_vlc(VLC *vlc, unsigned *offset, const uint8_t *lens, const uint8_t *syms)
Definition: mss4.c:99
static int mss4_decode_image_block(MSS4Context *ctx, GetBitContext *gb, uint8_t *picdst[3], int mb_x, int mb_y)
Definition: mss4.c:317
const char data[16]
Definition: mxf.c:142
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define FF_ARRAY_ELEMS(a)
static int shift(int a, int b)
Definition: sonic.c:82
unsigned int pos
Definition: spdifenc.c:412
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
void * priv_data
Definition: avcodec.h:563
AVCodec.
Definition: codec.h:197
const char * name
Name of the codec implementation.
Definition: codec.h:204
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
This structure stores compressed data.
Definition: packet.h:346
int size
Definition: packet.h:370
uint8_t * data
Definition: packet.h:369
int quality
Definition: mss4.c:86
AVFrame * pic
Definition: mss4.c:81
uint8_t imgbuf[3][16 *16]
Definition: mss4.c:84
int dc_cache[4][4]
Definition: mss4.c:91
ptrdiff_t dc_stride[3]
Definition: mss4.c:90
uint16_t quant_mat[2][64]
Definition: mss4.c:87
int prev_vec[3][4]
Definition: mss4.c:93
int * prev_dc[3]
Definition: mss4.c:89
int block[64]
Definition: mss4.c:83
Definition: vlc.h:26
int table_size
Definition: vlc.h:29
int table_allocated
Definition: vlc.h:29
int bits
Definition: vlc.h:27
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
#define av_malloc_array(a, b)
#define av_freep(p)
#define av_log(a,...)
static int16_t block[64]
Definition: dct.c:116
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
static const uint8_t offset[127][2]
Definition: vf_spp.c:107
#define INIT_VLC_STATIC_OVERLONG
Definition: vlc.h:96
#define VLC_TYPE
Definition: vlc.h:24
uint8_t bits
Definition: vp3data.h:141
static double c[64]