Libav
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The Libav Project
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "internal.h"
47 #include "avcodec.h"
48 #include "mpegutils.h"
49 #include "h264dec.h"
50 #include "h264data.h"
51 #include "golomb.h"
52 #include "hpeldsp.h"
53 #include "mathops.h"
54 #include "rectangle.h"
55 #include "tpeldsp.h"
56 
57 #if CONFIG_ZLIB
58 #include <zlib.h>
59 #endif
60 
61 #include "svq1.h"
62 
68 typedef struct SVQ3Frame {
70 
72  int16_t (*motion_val[2])[2];
73 
75  uint32_t *mb_type;
76 
77 
79  int8_t *ref_index[2];
80 } SVQ3Frame;
81 
82 typedef struct SVQ3Context {
84 
90 
101  uint32_t watermark_key;
108  int qscale;
109  int cbp;
114 
115  enum AVPictureType pict_type;
117 
118  int mb_x, mb_y;
119  int mb_xy;
120  int mb_width, mb_height;
121  int mb_stride, mb_num;
122  int b_stride;
123 
124  uint32_t *mb2br_xy;
125 
128 
129  int8_t intra4x4_pred_mode_cache[5 * 8];
130  int8_t (*intra4x4_pred_mode);
131 
132  unsigned int top_samples_available;
135 
137 
138  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
139  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
140  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
141  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
142  DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
143  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
144  int block_offset[2 * (16 * 3)];
145 } SVQ3Context;
146 
147 #define FULLPEL_MODE 1
148 #define HALFPEL_MODE 2
149 #define THIRDPEL_MODE 3
150 #define PREDICT_MODE 4
151 
152 /* dual scan (from some older H.264 draft)
153  * o-->o-->o o
154  * | /|
155  * o o o / o
156  * | / | |/ |
157  * o o o o
158  * /
159  * o-->o-->o-->o
160  */
161 static const uint8_t svq3_scan[16] = {
162  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
163  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
164  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
165  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
166 };
167 
168 static const uint8_t luma_dc_zigzag_scan[16] = {
169  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
170  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
171  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
172  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
173 };
174 
175 static const uint8_t svq3_pred_0[25][2] = {
176  { 0, 0 },
177  { 1, 0 }, { 0, 1 },
178  { 0, 2 }, { 1, 1 }, { 2, 0 },
179  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
180  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
181  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
182  { 2, 4 }, { 3, 3 }, { 4, 2 },
183  { 4, 3 }, { 3, 4 },
184  { 4, 4 }
185 };
186 
187 static const int8_t svq3_pred_1[6][6][5] = {
188  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
189  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
190  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
191  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
192  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
193  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
194  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
195  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
196  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
197  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
198  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
199  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
200 };
201 
202 static const struct {
205 } svq3_dct_tables[2][16] = {
206  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
207  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
208  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
209  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
210 };
211 
212 static const uint32_t svq3_dequant_coeff[32] = {
213  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
214  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
215  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
216  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
217 };
218 
219 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
220 {
221  const int qmul = svq3_dequant_coeff[qp];
222 #define stride 16
223  int i;
224  int temp[16];
225  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
226 
227  for (i = 0; i < 4; i++) {
228  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
229  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
230  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
231  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
232 
233  temp[4 * i + 0] = z0 + z3;
234  temp[4 * i + 1] = z1 + z2;
235  temp[4 * i + 2] = z1 - z2;
236  temp[4 * i + 3] = z0 - z3;
237  }
238 
239  for (i = 0; i < 4; i++) {
240  const int offset = x_offset[i];
241  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
242  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
243  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
244  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
245 
246  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
247  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
248  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
249  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
250  }
251 }
252 #undef stride
253 
254 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
255  int stride, int qp, int dc)
256 {
257  const int qmul = svq3_dequant_coeff[qp];
258  int i;
259 
260  if (dc) {
261  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
262  : qmul * (block[0] >> 3) / 2);
263  block[0] = 0;
264  }
265 
266  for (i = 0; i < 4; i++) {
267  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
268  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
269  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
270  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
271 
272  block[0 + 4 * i] = z0 + z3;
273  block[1 + 4 * i] = z1 + z2;
274  block[2 + 4 * i] = z1 - z2;
275  block[3 + 4 * i] = z0 - z3;
276  }
277 
278  for (i = 0; i < 4; i++) {
279  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
280  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
281  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
282  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
283  const int rr = (dc + 0x80000);
284 
285  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
286  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
287  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
288  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
289  }
290 
291  memset(block, 0, 16 * sizeof(int16_t));
292 }
293 
294 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
295  int index, const int type)
296 {
297  static const uint8_t *const scan_patterns[4] = {
299  };
300 
301  int run, level, limit;
302  unsigned vlc;
303  const int intra = 3 * type >> 2;
304  const uint8_t *const scan = scan_patterns[type];
305 
306  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
307  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
308  int sign = (vlc & 1) ? 0 : -1;
309  vlc = vlc + 1 >> 1;
310 
311  if (type == 3) {
312  if (vlc < 3) {
313  run = 0;
314  level = vlc;
315  } else if (vlc < 4) {
316  run = 1;
317  level = 1;
318  } else {
319  run = vlc & 0x3;
320  level = (vlc + 9 >> 2) - run;
321  }
322  } else {
323  if (vlc < 16) {
324  run = svq3_dct_tables[intra][vlc].run;
325  level = svq3_dct_tables[intra][vlc].level;
326  } else if (intra) {
327  run = vlc & 0x7;
328  level = (vlc >> 3) +
329  ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
330  } else {
331  run = vlc & 0xF;
332  level = (vlc >> 4) +
333  ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
334  }
335  }
336 
337  if ((index += run) >= limit)
338  return -1;
339 
340  block[scan[index]] = (level ^ sign) - sign;
341  }
342 
343  if (type != 2) {
344  break;
345  }
346  }
347 
348  return 0;
349 }
350 
351 static av_always_inline int
352 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
353  int i, int list, int part_width)
354 {
355  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
356 
357  if (topright_ref != PART_NOT_AVAILABLE) {
358  *C = s->mv_cache[list][i - 8 + part_width];
359  return topright_ref;
360  } else {
361  *C = s->mv_cache[list][i - 8 - 1];
362  return s->ref_cache[list][i - 8 - 1];
363  }
364 }
365 
373 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
374  int part_width, int list,
375  int ref, int *const mx, int *const my)
376 {
377  const int index8 = scan8[n];
378  const int top_ref = s->ref_cache[list][index8 - 8];
379  const int left_ref = s->ref_cache[list][index8 - 1];
380  const int16_t *const A = s->mv_cache[list][index8 - 1];
381  const int16_t *const B = s->mv_cache[list][index8 - 8];
382  const int16_t *C;
383  int diagonal_ref, match_count;
384 
385 /* mv_cache
386  * B . . A T T T T
387  * U . . L . . , .
388  * U . . L . . . .
389  * U . . L . . , .
390  * . . . L . . . .
391  */
392 
393  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
394  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
395  if (match_count > 1) { //most common
396  *mx = mid_pred(A[0], B[0], C[0]);
397  *my = mid_pred(A[1], B[1], C[1]);
398  } else if (match_count == 1) {
399  if (left_ref == ref) {
400  *mx = A[0];
401  *my = A[1];
402  } else if (top_ref == ref) {
403  *mx = B[0];
404  *my = B[1];
405  } else {
406  *mx = C[0];
407  *my = C[1];
408  }
409  } else {
410  if (top_ref == PART_NOT_AVAILABLE &&
411  diagonal_ref == PART_NOT_AVAILABLE &&
412  left_ref != PART_NOT_AVAILABLE) {
413  *mx = A[0];
414  *my = A[1];
415  } else {
416  *mx = mid_pred(A[0], B[0], C[0]);
417  *my = mid_pred(A[1], B[1], C[1]);
418  }
419  }
420 }
421 
422 static inline void svq3_mc_dir_part(SVQ3Context *s,
423  int x, int y, int width, int height,
424  int mx, int my, int dxy,
425  int thirdpel, int dir, int avg)
426 {
427  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
428  uint8_t *src, *dest;
429  int i, emu = 0;
430  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
431  int linesize = s->cur_pic->f->linesize[0];
432  int uvlinesize = s->cur_pic->f->linesize[1];
433 
434  mx += x;
435  my += y;
436 
437  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
438  my < 0 || my >= s->v_edge_pos - height - 1) {
439  emu = 1;
440  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
441  my = av_clip(my, -16, s->v_edge_pos - height + 15);
442  }
443 
444  /* form component predictions */
445  dest = s->cur_pic->f->data[0] + x + y * linesize;
446  src = pic->f->data[0] + mx + my * linesize;
447 
448  if (emu) {
450  linesize, linesize,
451  width + 1, height + 1,
452  mx, my, s->h_edge_pos, s->v_edge_pos);
453  src = s->edge_emu_buffer;
454  }
455  if (thirdpel)
456  (avg ? s->tdsp.avg_tpel_pixels_tab
457  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
458  width, height);
459  else
460  (avg ? s->hdsp.avg_pixels_tab
461  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
462  height);
463 
464  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
465  mx = mx + (mx < (int) x) >> 1;
466  my = my + (my < (int) y) >> 1;
467  width = width >> 1;
468  height = height >> 1;
469  blocksize++;
470 
471  for (i = 1; i < 3; i++) {
472  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
473  src = pic->f->data[i] + mx + my * uvlinesize;
474 
475  if (emu) {
477  uvlinesize, uvlinesize,
478  width + 1, height + 1,
479  mx, my, (s->h_edge_pos >> 1),
480  s->v_edge_pos >> 1);
481  src = s->edge_emu_buffer;
482  }
483  if (thirdpel)
484  (avg ? s->tdsp.avg_tpel_pixels_tab
485  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
486  uvlinesize,
487  width, height);
488  else
489  (avg ? s->hdsp.avg_pixels_tab
490  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
491  uvlinesize,
492  height);
493  }
494  }
495 }
496 
497 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
498  int dir, int avg)
499 {
500  int i, j, k, mx, my, dx, dy, x, y;
501  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
502  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
503  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
504  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
505  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
506 
507  for (i = 0; i < 16; i += part_height)
508  for (j = 0; j < 16; j += part_width) {
509  const int b_xy = (4 * s->mb_x + (j >> 2)) +
510  (4 * s->mb_y + (i >> 2)) * s->b_stride;
511  int dxy;
512  x = 16 * s->mb_x + j;
513  y = 16 * s->mb_y + i;
514  k = (j >> 2 & 1) + (i >> 1 & 2) +
515  (j >> 1 & 4) + (i & 8);
516 
517  if (mode != PREDICT_MODE) {
518  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
519  } else {
520  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
521  my = s->next_pic->motion_val[0][b_xy][1] << 1;
522 
523  if (dir == 0) {
524  mx = mx * s->frame_num_offset /
525  s->prev_frame_num_offset + 1 >> 1;
526  my = my * s->frame_num_offset /
527  s->prev_frame_num_offset + 1 >> 1;
528  } else {
529  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
530  s->prev_frame_num_offset + 1 >> 1;
531  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
532  s->prev_frame_num_offset + 1 >> 1;
533  }
534  }
535 
536  /* clip motion vector prediction to frame border */
537  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
538  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
539 
540  /* get (optional) motion vector differential */
541  if (mode == PREDICT_MODE) {
542  dx = dy = 0;
543  } else {
546 
547  if (dx == INVALID_VLC || dy == INVALID_VLC) {
548  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
549  return -1;
550  }
551  }
552 
553  /* compute motion vector */
554  if (mode == THIRDPEL_MODE) {
555  int fx, fy;
556  mx = (mx + 1 >> 1) + dx;
557  my = (my + 1 >> 1) + dy;
558  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
559  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
560  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
561 
562  svq3_mc_dir_part(s, x, y, part_width, part_height,
563  fx, fy, dxy, 1, dir, avg);
564  mx += mx;
565  my += my;
566  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
567  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
568  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
569  dxy = (mx & 1) + 2 * (my & 1);
570 
571  svq3_mc_dir_part(s, x, y, part_width, part_height,
572  mx >> 1, my >> 1, dxy, 0, dir, avg);
573  mx *= 3;
574  my *= 3;
575  } else {
576  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
577  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
578 
579  svq3_mc_dir_part(s, x, y, part_width, part_height,
580  mx, my, 0, 0, dir, avg);
581  mx *= 6;
582  my *= 6;
583  }
584 
585  /* update mv_cache */
586  if (mode != PREDICT_MODE) {
587  int32_t mv = pack16to32(mx, my);
588 
589  if (part_height == 8 && i < 8) {
590  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
591 
592  if (part_width == 8 && j < 8)
593  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
594  }
595  if (part_width == 8 && j < 8)
596  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
597  if (part_width == 4 || part_height == 4)
598  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
599  }
600 
601  /* write back motion vectors */
602  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
603  part_width >> 2, part_height >> 2, s->b_stride,
604  pack16to32(mx, my), 4);
605  }
606 
607  return 0;
608 }
609 
611  int mb_type, const int *block_offset,
612  int linesize, uint8_t *dest_y)
613 {
614  int i;
615  if (!IS_INTRA4x4(mb_type)) {
616  for (i = 0; i < 16; i++)
617  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
618  uint8_t *const ptr = dest_y + block_offset[i];
619  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
620  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
621  }
622  }
623 }
624 
625 static av_always_inline int dctcoef_get(int16_t *mb, int index)
626 {
627  return AV_RN16A(mb + index);
628 }
629 
631  int mb_type,
632  const int *block_offset,
633  int linesize,
634  uint8_t *dest_y)
635 {
636  int i;
637  int qscale = s->qscale;
638 
639  if (IS_INTRA4x4(mb_type)) {
640  for (i = 0; i < 16; i++) {
641  uint8_t *const ptr = dest_y + block_offset[i];
642  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
643 
644  uint8_t *topright;
645  int nnz, tr;
646  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
647  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
648  assert(s->mb_y || linesize <= block_offset[i]);
649  if (!topright_avail) {
650  tr = ptr[3 - linesize] * 0x01010101u;
651  topright = (uint8_t *)&tr;
652  } else
653  topright = ptr + 4 - linesize;
654  } else
655  topright = NULL;
656 
657  s->hpc.pred4x4[dir](ptr, topright, linesize);
658  nnz = s->non_zero_count_cache[scan8[i]];
659  if (nnz) {
660  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
661  }
662  }
663  } else {
664  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
665  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
666  }
667 }
668 
669 static void hl_decode_mb(SVQ3Context *s)
670 {
671  const int mb_x = s->mb_x;
672  const int mb_y = s->mb_y;
673  const int mb_xy = s->mb_xy;
674  const int mb_type = s->cur_pic->mb_type[mb_xy];
675  uint8_t *dest_y, *dest_cb, *dest_cr;
676  int linesize, uvlinesize;
677  int i, j;
678  const int *block_offset = &s->block_offset[0];
679  const int block_h = 16 >> 1;
680 
681  linesize = s->cur_pic->f->linesize[0];
682  uvlinesize = s->cur_pic->f->linesize[1];
683 
684  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
685  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
686  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
687 
688  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
689  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
690 
691  if (IS_INTRA(mb_type)) {
692  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
693  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
694 
695  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
696  }
697 
698  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
699 
700  if (s->cbp & 0x30) {
701  uint8_t *dest[2] = { dest_cb, dest_cr };
702  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
703  s->dequant4_coeff[4][0]);
704  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
705  s->dequant4_coeff[4][0]);
706  for (j = 1; j < 3; j++) {
707  for (i = j * 16; i < j * 16 + 4; i++)
708  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
709  uint8_t *const ptr = dest[j - 1] + block_offset[i];
710  svq3_add_idct_c(ptr, s->mb + i * 16,
711  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
712  }
713  }
714  }
715 }
716 
717 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
718 {
719  int i, j, k, m, dir, mode;
720  int cbp = 0;
721  uint32_t vlc;
722  int8_t *top, *left;
723  const int mb_xy = s->mb_xy;
724  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
725 
726  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
727  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
728  s->topright_samples_available = 0xFFFF;
729 
730  if (mb_type == 0) { /* SKIP */
731  if (s->pict_type == AV_PICTURE_TYPE_P ||
732  s->next_pic->mb_type[mb_xy] == -1) {
733  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
734  0, 0, 0, 0, 0, 0);
735 
736  if (s->pict_type == AV_PICTURE_TYPE_B)
737  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
738  0, 0, 0, 0, 1, 1);
739 
740  mb_type = MB_TYPE_SKIP;
741  } else {
742  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
743  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
744  return -1;
745  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
746  return -1;
747 
748  mb_type = MB_TYPE_16x16;
749  }
750  } else if (mb_type < 8) { /* INTER */
751  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
752  mode = THIRDPEL_MODE;
753  else if (s->halfpel_flag &&
754  s->thirdpel_flag == !get_bits1(&s->gb_slice))
755  mode = HALFPEL_MODE;
756  else
757  mode = FULLPEL_MODE;
758 
759  /* fill caches */
760  /* note ref_cache should contain here:
761  * ????????
762  * ???11111
763  * N??11111
764  * N??11111
765  * N??11111
766  */
767 
768  for (m = 0; m < 2; m++) {
769  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
770  for (i = 0; i < 4; i++)
771  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
772  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
773  } else {
774  for (i = 0; i < 4; i++)
775  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
776  }
777  if (s->mb_y > 0) {
778  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
779  s->cur_pic->motion_val[m][b_xy - s->b_stride],
780  4 * 2 * sizeof(int16_t));
781  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
782  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
783 
784  if (s->mb_x < s->mb_width - 1) {
785  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
786  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
787  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
788  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
789  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
790  } else
791  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
792  if (s->mb_x > 0) {
793  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
794  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
795  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
796  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
797  } else
798  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
799  } else
800  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
801  PART_NOT_AVAILABLE, 8);
802 
803  if (s->pict_type != AV_PICTURE_TYPE_B)
804  break;
805  }
806 
807  /* decode motion vector(s) and form prediction(s) */
808  if (s->pict_type == AV_PICTURE_TYPE_P) {
809  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
810  return -1;
811  } else { /* AV_PICTURE_TYPE_B */
812  if (mb_type != 2) {
813  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
814  return -1;
815  } else {
816  for (i = 0; i < 4; i++)
817  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
818  0, 4 * 2 * sizeof(int16_t));
819  }
820  if (mb_type != 1) {
821  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
822  return -1;
823  } else {
824  for (i = 0; i < 4; i++)
825  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
826  0, 4 * 2 * sizeof(int16_t));
827  }
828  }
829 
830  mb_type = MB_TYPE_16x16;
831  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
832  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
833  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
834 
835  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
836 
837  if (mb_type == 8) {
838  if (s->mb_x > 0) {
839  for (i = 0; i < 4; i++)
840  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
841  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
842  s->left_samples_available = 0x5F5F;
843  }
844  if (s->mb_y > 0) {
845  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
846  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
847  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
848  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
849 
850  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
851  s->top_samples_available = 0x33FF;
852  }
853 
854  /* decode prediction codes for luma blocks */
855  for (i = 0; i < 16; i += 2) {
857 
858  if (vlc >= 25) {
860  "luma prediction:%"PRIu32"\n", vlc);
861  return -1;
862  }
863 
864  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
865  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
866 
867  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
868  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
869 
870  if (left[1] == -1 || left[2] == -1) {
871  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
872  return -1;
873  }
874  }
875  } else { /* mb_type == 33, DC_128_PRED block type */
876  for (i = 0; i < 4; i++)
877  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
878  }
879 
880  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
881  i4x4[4] = i4x4_cache[7 + 8 * 3];
882  i4x4[5] = i4x4_cache[7 + 8 * 2];
883  i4x4[6] = i4x4_cache[7 + 8 * 1];
884 
885  if (mb_type == 8) {
889 
890  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
891  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
892  } else {
893  for (i = 0; i < 4; i++)
894  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
895 
896  s->top_samples_available = 0x33FF;
897  s->left_samples_available = 0x5F5F;
898  }
899 
900  mb_type = MB_TYPE_INTRA4x4;
901  } else { /* INTRA16x16 */
902  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
903  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
904 
906  s->left_samples_available, dir, 0)) < 0) {
907  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
908  return s->intra16x16_pred_mode;
909  }
910 
911  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
912  mb_type = MB_TYPE_INTRA16x16;
913  }
914 
915  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
916  for (i = 0; i < 4; i++)
917  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
918  0, 4 * 2 * sizeof(int16_t));
919  if (s->pict_type == AV_PICTURE_TYPE_B) {
920  for (i = 0; i < 4; i++)
921  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
922  0, 4 * 2 * sizeof(int16_t));
923  }
924  }
925  if (!IS_INTRA4x4(mb_type)) {
926  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
927  }
928  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
929  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
930  }
931 
932  if (!IS_INTRA16x16(mb_type) &&
933  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
934  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48) {
935  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
936  return -1;
937  }
938 
939  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
941  }
942  if (IS_INTRA16x16(mb_type) ||
943  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
945 
946  if (s->qscale > 31u) {
947  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
948  return -1;
949  }
950  }
951  if (IS_INTRA16x16(mb_type)) {
952  AV_ZERO128(s->mb_luma_dc[0] + 0);
953  AV_ZERO128(s->mb_luma_dc[0] + 8);
954  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
956  "error while decoding intra luma dc\n");
957  return -1;
958  }
959  }
960 
961  if (cbp) {
962  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
963  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
964 
965  for (i = 0; i < 4; i++)
966  if ((cbp & (1 << i))) {
967  for (j = 0; j < 4; j++) {
968  k = index ? (1 * (j & 1) + 2 * (i & 1) +
969  2 * (j & 2) + 4 * (i & 2))
970  : (4 * i + j);
971  s->non_zero_count_cache[scan8[k]] = 1;
972 
973  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
975  "error while decoding block\n");
976  return -1;
977  }
978  }
979  }
980 
981  if ((cbp & 0x30)) {
982  for (i = 1; i < 3; ++i)
983  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
985  "error while decoding chroma dc block\n");
986  return -1;
987  }
988 
989  if ((cbp & 0x20)) {
990  for (i = 1; i < 3; i++) {
991  for (j = 0; j < 4; j++) {
992  k = 16 * i + j;
993  s->non_zero_count_cache[scan8[k]] = 1;
994 
995  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
997  "error while decoding chroma ac block\n");
998  return -1;
999  }
1000  }
1001  }
1002  }
1003  }
1004  }
1005 
1006  s->cbp = cbp;
1007  s->cur_pic->mb_type[mb_xy] = mb_type;
1008 
1009  if (IS_INTRA(mb_type))
1012 
1013  return 0;
1014 }
1015 
1017 {
1018  SVQ3Context *s = avctx->priv_data;
1019  const int mb_xy = s->mb_xy;
1020  int i, header;
1021  unsigned slice_id;
1022 
1023  header = get_bits(&s->gb, 8);
1024 
1025  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1026  /* TODO: what? */
1027  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1028  return -1;
1029  } else {
1030  int slice_bits, slice_bytes, slice_length;
1031  int length = header >> 5 & 3;
1032 
1033  slice_length = show_bits(&s->gb, 8 * length);
1034  slice_bits = slice_length * 8;
1035  slice_bytes = slice_length + length - 1;
1036 
1037  if (slice_bytes > get_bits_left(&s->gb)) {
1038  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1039  return -1;
1040  }
1041 
1042  skip_bits(&s->gb, 8);
1043 
1045  if (!s->slice_buf)
1046  return AVERROR(ENOMEM);
1047 
1048  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1049 
1050  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1051 
1052  if (s->watermark_key) {
1053  uint32_t header = AV_RL32(&s->gb_slice.buffer[1]);
1054  AV_WL32(&s->gb_slice.buffer[1], header ^ s->watermark_key);
1055  }
1056  if (length > 0) {
1057  memcpy(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1058  }
1059  skip_bits_long(&s->gb, slice_bytes * 8);
1060  }
1061 
1062  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1063  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1064  return -1;
1065  }
1066 
1067  s->pict_type = ff_h264_golomb_to_pict_type[slice_id];
1068 
1069  if ((header & 0x9F) == 2) {
1070  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1071  get_bits(&s->gb_slice, i);
1072  } else {
1073  skip_bits1(&s->gb_slice);
1074  }
1075 
1076  s->slice_num = get_bits(&s->gb_slice, 8);
1077  s->qscale = get_bits(&s->gb_slice, 5);
1078  s->adaptive_quant = get_bits1(&s->gb_slice);
1079 
1080  /* unknown fields */
1081  skip_bits1(&s->gb_slice);
1082 
1083  if (s->unknown_flag)
1084  skip_bits1(&s->gb_slice);
1085 
1086  skip_bits1(&s->gb_slice);
1087  skip_bits(&s->gb_slice, 2);
1088 
1089  while (get_bits1(&s->gb_slice))
1090  skip_bits(&s->gb_slice, 8);
1091 
1092  /* reset intra predictors and invalidate motion vector references */
1093  if (s->mb_x > 0) {
1094  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1095  -1, 4 * sizeof(int8_t));
1096  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1097  -1, 8 * sizeof(int8_t) * s->mb_x);
1098  }
1099  if (s->mb_y > 0) {
1100  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1101  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1102 
1103  if (s->mb_x > 0)
1104  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1105  }
1106 
1107  return 0;
1108 }
1109 
1111 {
1112  int q, x;
1113  const int max_qp = 51;
1114 
1115  for (q = 0; q < max_qp + 1; q++) {
1116  int shift = ff_h264_quant_div6[q] + 2;
1117  int idx = ff_h264_quant_rem6[q];
1118  for (x = 0; x < 16; x++)
1119  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1120  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1121  }
1122 }
1123 
1125 {
1126  SVQ3Context *s = avctx->priv_data;
1127  int m, x, y;
1128  unsigned char *extradata;
1129  unsigned char *extradata_end;
1130  unsigned int size;
1131  int marker_found = 0;
1132 
1133  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1134  s->last_pic = av_mallocz(sizeof(*s->last_pic));
1135  s->next_pic = av_mallocz(sizeof(*s->next_pic));
1136  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1137  av_freep(&s->cur_pic);
1138  av_freep(&s->last_pic);
1139  av_freep(&s->next_pic);
1140  return AVERROR(ENOMEM);
1141  }
1142 
1143  s->cur_pic->f = av_frame_alloc();
1144  s->last_pic->f = av_frame_alloc();
1145  s->next_pic->f = av_frame_alloc();
1146  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1147  return AVERROR(ENOMEM);
1148 
1149  ff_h264dsp_init(&s->h264dsp, 8, 1);
1151  ff_videodsp_init(&s->vdsp, 8);
1152 
1153  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1154  ff_tpeldsp_init(&s->tdsp);
1155 
1156  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1157  avctx->color_range = AVCOL_RANGE_JPEG;
1158 
1159  s->avctx = avctx;
1160  s->halfpel_flag = 1;
1161  s->thirdpel_flag = 1;
1162  s->unknown_flag = 0;
1163 
1164  /* prowl for the "SEQH" marker in the extradata */
1165  extradata = (unsigned char *)avctx->extradata;
1166  extradata_end = avctx->extradata + avctx->extradata_size;
1167  if (extradata) {
1168  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1169  if (!memcmp(extradata, "SEQH", 4)) {
1170  marker_found = 1;
1171  break;
1172  }
1173  extradata++;
1174  }
1175  }
1176 
1177  /* if a match was found, parse the extra data */
1178  if (marker_found) {
1179  GetBitContext gb;
1180  int frame_size_code;
1181 
1182  size = AV_RB32(&extradata[4]);
1183  if (size > extradata_end - extradata - 8)
1184  return AVERROR_INVALIDDATA;
1185  init_get_bits(&gb, extradata + 8, size * 8);
1186 
1187  /* 'frame size code' and optional 'width, height' */
1188  frame_size_code = get_bits(&gb, 3);
1189  switch (frame_size_code) {
1190  case 0:
1191  avctx->width = 160;
1192  avctx->height = 120;
1193  break;
1194  case 1:
1195  avctx->width = 128;
1196  avctx->height = 96;
1197  break;
1198  case 2:
1199  avctx->width = 176;
1200  avctx->height = 144;
1201  break;
1202  case 3:
1203  avctx->width = 352;
1204  avctx->height = 288;
1205  break;
1206  case 4:
1207  avctx->width = 704;
1208  avctx->height = 576;
1209  break;
1210  case 5:
1211  avctx->width = 240;
1212  avctx->height = 180;
1213  break;
1214  case 6:
1215  avctx->width = 320;
1216  avctx->height = 240;
1217  break;
1218  case 7:
1219  avctx->width = get_bits(&gb, 12);
1220  avctx->height = get_bits(&gb, 12);
1221  break;
1222  }
1223 
1224  s->halfpel_flag = get_bits1(&gb);
1225  s->thirdpel_flag = get_bits1(&gb);
1226 
1227  /* unknown fields */
1228  skip_bits1(&gb);
1229  skip_bits1(&gb);
1230  skip_bits1(&gb);
1231  skip_bits1(&gb);
1232 
1233  s->low_delay = get_bits1(&gb);
1234 
1235  /* unknown field */
1236  skip_bits1(&gb);
1237 
1238  while (get_bits1(&gb))
1239  skip_bits(&gb, 8);
1240 
1241  s->unknown_flag = get_bits1(&gb);
1242  avctx->has_b_frames = !s->low_delay;
1243  if (s->unknown_flag) {
1244 #if CONFIG_ZLIB
1245  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1246  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1247  int u1 = get_interleaved_ue_golomb(&gb);
1248  int u2 = get_bits(&gb, 8);
1249  int u3 = get_bits(&gb, 2);
1250  int u4 = get_interleaved_ue_golomb(&gb);
1251  unsigned long buf_len = watermark_width *
1252  watermark_height * 4;
1253  int offset = get_bits_count(&gb) + 7 >> 3;
1254  uint8_t *buf;
1255 
1256  if (watermark_height > 0 &&
1257  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1258  return -1;
1259 
1260  buf = av_malloc(buf_len);
1261  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1262  watermark_width, watermark_height);
1263  av_log(avctx, AV_LOG_DEBUG,
1264  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1265  u1, u2, u3, u4, offset);
1266  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1267  size - offset) != Z_OK) {
1268  av_log(avctx, AV_LOG_ERROR,
1269  "could not uncompress watermark logo\n");
1270  av_free(buf);
1271  return -1;
1272  }
1273  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1274  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1275  av_log(avctx, AV_LOG_DEBUG,
1276  "watermark key %#"PRIx32"\n", s->watermark_key);
1277  av_free(buf);
1278 #else
1279  av_log(avctx, AV_LOG_ERROR,
1280  "this svq3 file contains watermark which need zlib support compiled in\n");
1281  return -1;
1282 #endif
1283  }
1284  }
1285 
1286  s->mb_width = (avctx->width + 15) / 16;
1287  s->mb_height = (avctx->height + 15) / 16;
1288  s->mb_stride = s->mb_width + 1;
1289  s->mb_num = s->mb_width * s->mb_height;
1290  s->b_stride = 4 * s->mb_width;
1291  s->h_edge_pos = s->mb_width * 16;
1292  s->v_edge_pos = s->mb_height * 16;
1293 
1294  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1295  if (!s->intra4x4_pred_mode)
1296  return AVERROR(ENOMEM);
1297 
1298  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1299  sizeof(*s->mb2br_xy));
1300  if (!s->mb2br_xy)
1301  return AVERROR(ENOMEM);
1302 
1303  for (y = 0; y < s->mb_height; y++)
1304  for (x = 0; x < s->mb_width; x++) {
1305  const int mb_xy = x + y * s->mb_stride;
1306 
1307  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1308  }
1309 
1311 
1312  return 0;
1313 }
1314 
1315 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1316 {
1317  int i;
1318  for (i = 0; i < 2; i++) {
1319  av_buffer_unref(&pic->motion_val_buf[i]);
1320  av_buffer_unref(&pic->ref_index_buf[i]);
1321  }
1323 
1324  av_frame_unref(pic->f);
1325 }
1326 
1327 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1328 {
1329  SVQ3Context *s = avctx->priv_data;
1330  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1331  const int mb_array_size = s->mb_stride * s->mb_height;
1332  const int b4_stride = s->mb_width * 4 + 1;
1333  const int b4_array_size = b4_stride * s->mb_height * 4;
1334  int ret;
1335 
1336  if (!pic->motion_val_buf[0]) {
1337  int i;
1338 
1339  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1340  if (!pic->mb_type_buf)
1341  return AVERROR(ENOMEM);
1342  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1343 
1344  for (i = 0; i < 2; i++) {
1345  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1346  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1347  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1348  ret = AVERROR(ENOMEM);
1349  goto fail;
1350  }
1351 
1352  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1353  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1354  }
1355  }
1356 
1357  ret = ff_get_buffer(avctx, pic->f,
1358  (s->pict_type != AV_PICTURE_TYPE_B) ?
1360  if (ret < 0)
1361  goto fail;
1362 
1363  if (!s->edge_emu_buffer) {
1364  s->edge_emu_buffer = av_mallocz(pic->f->linesize[0] * 17);
1365  if (!s->edge_emu_buffer)
1366  return AVERROR(ENOMEM);
1367  }
1368 
1369  return 0;
1370 fail:
1371  free_picture(avctx, pic);
1372  return ret;
1373 }
1374 
1375 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1376  int *got_frame, AVPacket *avpkt)
1377 {
1378  const uint8_t *buf = avpkt->data;
1379  SVQ3Context *s = avctx->priv_data;
1380  int buf_size = avpkt->size;
1381  int ret, m, i;
1382 
1383  /* special case for last picture */
1384  if (buf_size == 0) {
1385  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1386  ret = av_frame_ref(data, s->next_pic->f);
1387  if (ret < 0)
1388  return ret;
1389  s->last_frame_output = 1;
1390  *got_frame = 1;
1391  }
1392  return 0;
1393  }
1394 
1395  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1396  if (ret < 0)
1397  return ret;
1398 
1399  s->mb_x = s->mb_y = s->mb_xy = 0;
1400 
1401  if (svq3_decode_slice_header(avctx))
1402  return -1;
1403 
1404  if (s->pict_type != AV_PICTURE_TYPE_B)
1405  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1406 
1407  av_frame_unref(s->cur_pic->f);
1408 
1409  /* for skipping the frame */
1410  s->cur_pic->f->pict_type = s->pict_type;
1412 
1413  ret = get_buffer(avctx, s->cur_pic);
1414  if (ret < 0)
1415  return ret;
1416 
1417  for (i = 0; i < 16; i++) {
1418  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1419  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1420  }
1421  for (i = 0; i < 16; i++) {
1422  s->block_offset[16 + i] =
1423  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1424  s->block_offset[48 + 16 + i] =
1425  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1426  }
1427 
1428  if (s->pict_type != AV_PICTURE_TYPE_I) {
1429  if (!s->last_pic->f->data[0]) {
1430  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1431  ret = get_buffer(avctx, s->last_pic);
1432  if (ret < 0)
1433  return ret;
1434  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1435  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1436  s->last_pic->f->linesize[1]);
1437  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1438  s->last_pic->f->linesize[2]);
1439  }
1440 
1441  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1442  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1443  ret = get_buffer(avctx, s->next_pic);
1444  if (ret < 0)
1445  return ret;
1446  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1447  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1448  s->next_pic->f->linesize[1]);
1449  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1450  s->next_pic->f->linesize[2]);
1451  }
1452  }
1453 
1454  if (avctx->debug & FF_DEBUG_PICT_INFO)
1456  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1458  s->halfpel_flag, s->thirdpel_flag,
1459  s->adaptive_quant, s->qscale, s->slice_num);
1460 
1461  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1463  avctx->skip_frame >= AVDISCARD_ALL)
1464  return 0;
1465 
1466  if (s->next_p_frame_damaged) {
1467  if (s->pict_type == AV_PICTURE_TYPE_B)
1468  return 0;
1469  else
1470  s->next_p_frame_damaged = 0;
1471  }
1472 
1473  if (s->pict_type == AV_PICTURE_TYPE_B) {
1475 
1476  if (s->frame_num_offset < 0)
1477  s->frame_num_offset += 256;
1478  if (s->frame_num_offset == 0 ||
1480  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1481  return -1;
1482  }
1483  } else {
1484  s->prev_frame_num = s->frame_num;
1485  s->frame_num = s->slice_num;
1487 
1488  if (s->prev_frame_num_offset < 0)
1489  s->prev_frame_num_offset += 256;
1490  }
1491 
1492  for (m = 0; m < 2; m++) {
1493  int i;
1494  for (i = 0; i < 4; i++) {
1495  int j;
1496  for (j = -1; j < 4; j++)
1497  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1498  if (i < 3)
1499  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1500  }
1501  }
1502 
1503  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1504  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1505  unsigned mb_type;
1506  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1507 
1508  if ((get_bits_left(&s->gb_slice)) <= 7) {
1509  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1510  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1511 
1512  if (svq3_decode_slice_header(avctx))
1513  return -1;
1514  }
1515  /* TODO: support s->mb_skip_run */
1516  }
1517 
1518  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1519 
1520  if (s->pict_type == AV_PICTURE_TYPE_I)
1521  mb_type += 8;
1522  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1523  mb_type += 4;
1524  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1526  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1527  return -1;
1528  }
1529 
1530  if (mb_type != 0)
1531  hl_decode_mb(s);
1532 
1533  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1534  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1535  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1536  }
1537 
1538  ff_draw_horiz_band(avctx, s->cur_pic->f,
1539  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1540  16 * s->mb_y, 16, PICT_FRAME, 0,
1541  s->low_delay);
1542  }
1543 
1544  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1545  ret = av_frame_ref(data, s->cur_pic->f);
1546  else if (s->last_pic->f->data[0])
1547  ret = av_frame_ref(data, s->last_pic->f);
1548  if (ret < 0)
1549  return ret;
1550 
1551  /* Do not output the last pic after seeking. */
1552  if (s->last_pic->f->data[0] || s->low_delay)
1553  *got_frame = 1;
1554 
1555  if (s->pict_type != AV_PICTURE_TYPE_B) {
1556  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1557  } else {
1558  av_frame_unref(s->cur_pic->f);
1559  }
1560 
1561  return buf_size;
1562 }
1563 
1565 {
1566  SVQ3Context *s = avctx->priv_data;
1567 
1568  free_picture(avctx, s->cur_pic);
1569  free_picture(avctx, s->next_pic);
1570  free_picture(avctx, s->last_pic);
1571  av_frame_free(&s->cur_pic->f);
1572  av_frame_free(&s->next_pic->f);
1573  av_frame_free(&s->last_pic->f);
1574  av_freep(&s->cur_pic);
1575  av_freep(&s->next_pic);
1576  av_freep(&s->last_pic);
1577  av_freep(&s->slice_buf);
1580  av_freep(&s->mb2br_xy);
1581 
1582  return 0;
1583 }
1584 
1586  .name = "svq3",
1587  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1588  .type = AVMEDIA_TYPE_VIDEO,
1589  .id = AV_CODEC_ID_SVQ3,
1590  .priv_data_size = sizeof(SVQ3Context),
1592  .close = svq3_decode_end,
1594  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1597  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1598  AV_PIX_FMT_NONE},
1599 };
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:1083
uint8_t pred_mode
Definition: h264data.h:35
#define MB_TYPE_SKIP
Definition: avcodec.h:1093
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
discard all frames except keyframes
Definition: avcodec.h:688
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:103
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
int cbp
Definition: svq3.c:109
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:106
int size
This structure describes decoded (raw) audio or video data.
Definition: frame.h:140
HpelDSPContext hdsp
Definition: svq3.c:87
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
const uint8_t ff_zigzag_scan[16]
Definition: mathtables.c:126
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:228
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:294
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:187
int prev_frame_num
Definition: svq3.c:113
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:373
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2127
int size
Definition: avcodec.h:1347
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:1082
int mb_xy
Definition: svq3.c:119
av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (%s)\, len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic ? ac->func_descr_generic :ac->func_descr)
const uint8_t * buffer
Definition: get_bits.h:55
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:58
uint8_t * slice_buf
Definition: svq3.c:96
#define INVALID_VLC
Definition: golomb.h:38
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1621
int v_edge_pos
Definition: svq3.c:105
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
discard all
Definition: avcodec.h:689
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:65
uint8_t run
Definition: svq3.c:203
#define FULLPEL_MODE
Definition: svq3.c:147
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
AVCodec.
Definition: avcodec.h:3120
#define AV_WN32A(p, v)
Definition: intreadwrite.h:469
#define AV_COPY32(d, s)
Definition: intreadwrite.h:517
int16_t mb[16 *48 *2]
Definition: svq3.c:140
Macro definitions for various function/variable attributes.
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:202
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:497
enum AVDiscard skip_frame
Definition: avcodec.h:2989
static int16_t block[64]
Definition: dct.c:97
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:863
int thirdpel_flag
Definition: svq3.c:99
int mb_num
Definition: svq3.c:121
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:168
uint8_t
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:610
#define av_cold
Definition: attributes.h:66
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:68
#define DC_PRED8x8
Definition: h264pred.h:68
Definition: vf_drawbox.c:37
int block_offset[2 *(16 *3)]
Definition: svq3.c:144
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2627
#define AV_RB32
Definition: intreadwrite.h:130
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:352
static av_always_inline int dctcoef_get(int16_t *mb, int index)
Definition: svq3.c:625
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:199
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1523
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
const char data[16]
Definition: mxf.c:70
uint8_t * data
Definition: avcodec.h:1346
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:182
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:50
AVBufferRef * ref_index_buf[2]
Definition: svq3.c:78
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:103
thirdpel DSP functions
#define B
Definition: huffyuv.h:49
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:763
int prev_frame_num_offset
Definition: svq3.c:112
int low_delay
Definition: svq3.c:116
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:717
#define src
Definition: vp8dsp.c:254
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:526
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:124
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1715
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:190
#define HALFPEL_MODE
Definition: svq3.c:148
AVCodecContext * avctx
Definition: svq3.c:83
int8_t * intra4x4_pred_mode
Definition: svq3.c:130
#define AVERROR(e)
Definition: error.h:43
uint8_t * edge_emu_buffer
Definition: svq3.c:136
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:80
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:148
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:145
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1503
int frame_num
Definition: svq3.c:110
int mb_x
Definition: svq3.c:118
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:221
unsigned int left_samples_available
Definition: svq3.c:134
const char * name
Name of the codec implementation.
Definition: avcodec.h:3127
#define IS_SKIP(a)
Definition: mpegutils.h:83
int chroma_pred_mode
Definition: svq3.c:126
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define PREDICT_MODE
Definition: svq3.c:150
#define fail()
Definition: checkasm.h:80
unsigned int topright_samples_available
Definition: svq3.c:133
Sorenson Vector Quantizer #1 (SVQ1) video codec.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
Definition: svq3.c:68
useful rectangle filling function
AVBufferRef * motion_val_buf[2]
Definition: svq3.c:71
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:832
SVQ3Frame * cur_pic
Definition: svq3.c:91
Context for storing H.264 DSP functions.
Definition: h264dsp.h:41
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:143
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:201
int16_t(*[2] motion_val)[2]
Definition: svq3.c:72
#define FFMIN(a, b)
Definition: common.h:66
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:71
int width
picture width / height.
Definition: avcodec.h:1580
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: avconv.c:1288
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
int32_t
GetBitContext gb_slice
Definition: svq3.c:95
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:250
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1124
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:392
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
H.264 / AVC / MPEG-4 part10 codec.
int b_stride
Definition: svq3.c:122
#define AV_RL32
Definition: intreadwrite.h:146
H264PredContext hpc
Definition: svq3.c:86
int last_frame_output
Definition: svq3.c:106
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:402
int next_p_frame_damaged
Definition: svq3.c:103
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:365
#define IS_INTRA16x16(a)
Definition: mpegutils.h:78
if(ac->has_optimized_func)
static const int8_t mv[256][2]
Definition: 4xm.c:75
H264DSPContext h264dsp
Definition: svq3.c:85
NULL
Definition: eval.c:55
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1585
GetBitContext gb
Definition: svq3.c:94
static int width
Definition: utils.c:156
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:158
int debug
debug
Definition: avcodec.h:2626
int intra16x16_pred_mode
Definition: svq3.c:127
main external API structure.
Definition: avcodec.h:1409
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define QP_MAX_NUM
Definition: h264.h:27
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:589
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264dec.h:638
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:622
int extradata_size
Definition: avcodec.h:1524
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
int qscale
Definition: svq3.c:108
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
AVBufferRef * mb_type_buf
Definition: svq3.c:74
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:267
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:292
int mb_height
Definition: svq3.c:120
enum AVPictureType pict_type
Definition: svq3.c:115
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:180
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:259
int index
Definition: gxfenc.c:72
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:422
uint32_t * mb_type
Definition: svq3.c:75
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:362
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1110
#define MB_TYPE_16x16
Definition: avcodec.h:1085
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1564
#define mid_pred
Definition: mathops.h:99
int8_t ref_cache[2][5 *8]
Definition: svq3.c:139
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:175
int mb_y
Definition: svq3.c:118
AVPictureType
Definition: avutil.h:259
#define IS_INTER(a)
Definition: mpegutils.h:81
int slice_num
Definition: svq3.c:107
#define u(width,...)
AVFrame * f
Definition: svq3.c:69
int unknown_flag
Definition: svq3.c:100
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:257
SVQ3Frame * last_pic
Definition: svq3.c:93
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
VideoDSPContext vdsp
Definition: svq3.c:89
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:302
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1315
uint32_t * mb2br_xy
Definition: svq3.c:124
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:146
uint8_t level
Definition: svq3.c:204
Definition: vp9.h:58
#define AV_ZERO128(d)
Definition: intreadwrite.h:553
int height
Definition: gxfenc.c:72
static const struct @78 svq3_dct_tables[2][16]
A reference to a data buffer.
Definition: buffer.h:81
discard all non reference
Definition: avcodec.h:686
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:630
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:394
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:142
uint8_t cbp
Definition: h264data.h:36
common internal api header.
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1327
int mb_stride
Definition: svq3.c:121
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:151
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:141
int h_edge_pos
Definition: svq3.c:104
Bi-dir predicted.
Definition: avutil.h:262
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:219
#define stride
int frame_num_offset
Definition: svq3.c:111
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:582
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:638
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:212
void * priv_data
Definition: avcodec.h:1451
#define THIRDPEL_MODE
Definition: svq3.c:149
#define PICT_FRAME
Definition: mpegutils.h:39
unsigned int top_samples_available
Definition: svq3.c:132
#define IS_INTRA4x4(a)
Definition: mpegutils.h:77
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:669
const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1]
Definition: h264data.c:199
#define av_log2
Definition: intmath.h:85
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1016
#define PART_NOT_AVAILABLE
Definition: h264dec.h:382
int slice_size
Definition: svq3.c:97
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:196
#define AV_ZERO32(d)
Definition: intreadwrite.h:545
TpelDSPContext tdsp
Definition: svq3.c:88
static const uint8_t svq3_scan[16]
Definition: svq3.c:161
#define AV_RN16A(p)
Definition: intreadwrite.h:453
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:129
int mb_width
Definition: svq3.c:120
#define AV_WL32(p, val)
Definition: intreadwrite.h:263
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:57
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:187
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:254
uint32_t watermark_key
Definition: svq3.c:101
#define av_always_inline
Definition: attributes.h:40
SVQ3Frame * next_pic
Definition: svq3.c:92
#define FFSWAP(type, a, b)
Definition: common.h:69
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:111
exp golomb vlc stuff
AVPixelFormat
Pixel format.
Definition: pixfmt.h:57
This structure stores compressed data.
Definition: avcodec.h:1323
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1375
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1183
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:211
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:838
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:261
int halfpel_flag
Definition: svq3.c:98
int adaptive_quant
Definition: svq3.c:102
int8_t * ref_index[2]
Definition: svq3.c:79
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:138