Libav
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "idctdsp.h"
38 #include "internal.h"
39 #include "mathops.h"
40 #include "mpeg_er.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "mjpegenc.h"
45 #include "msmpeg4.h"
46 #include "qpeldsp.h"
47 #include "xvmc_internal.h"
48 #include "thread.h"
49 #include "wmv2.h"
50 #include <limits.h>
51 
53  int16_t *block, int n, int qscale)
54 {
55  int i, level, nCoeffs;
56  const uint16_t *quant_matrix;
57 
58  nCoeffs= s->block_last_index[n];
59 
60  if (n < 4)
61  block[0] = block[0] * s->y_dc_scale;
62  else
63  block[0] = block[0] * s->c_dc_scale;
64  /* XXX: only MPEG-1 */
65  quant_matrix = s->intra_matrix;
66  for(i=1;i<=nCoeffs;i++) {
67  int j= s->intra_scantable.permutated[i];
68  level = block[j];
69  if (level) {
70  if (level < 0) {
71  level = -level;
72  level = (int)(level * qscale * quant_matrix[j]) >> 3;
73  level = (level - 1) | 1;
74  level = -level;
75  } else {
76  level = (int)(level * qscale * quant_matrix[j]) >> 3;
77  level = (level - 1) | 1;
78  }
79  block[j] = level;
80  }
81  }
82 }
83 
85  int16_t *block, int n, int qscale)
86 {
87  int i, level, nCoeffs;
88  const uint16_t *quant_matrix;
89 
90  nCoeffs= s->block_last_index[n];
91 
92  quant_matrix = s->inter_matrix;
93  for(i=0; i<=nCoeffs; i++) {
94  int j= s->intra_scantable.permutated[i];
95  level = block[j];
96  if (level) {
97  if (level < 0) {
98  level = -level;
99  level = (((level << 1) + 1) * qscale *
100  ((int) (quant_matrix[j]))) >> 4;
101  level = (level - 1) | 1;
102  level = -level;
103  } else {
104  level = (((level << 1) + 1) * qscale *
105  ((int) (quant_matrix[j]))) >> 4;
106  level = (level - 1) | 1;
107  }
108  block[j] = level;
109  }
110  }
111 }
112 
114  int16_t *block, int n, int qscale)
115 {
116  int i, level, nCoeffs;
117  const uint16_t *quant_matrix;
118 
119  if(s->alternate_scan) nCoeffs= 63;
120  else nCoeffs= s->block_last_index[n];
121 
122  if (n < 4)
123  block[0] = block[0] * s->y_dc_scale;
124  else
125  block[0] = block[0] * s->c_dc_scale;
126  quant_matrix = s->intra_matrix;
127  for(i=1;i<=nCoeffs;i++) {
128  int j= s->intra_scantable.permutated[i];
129  level = block[j];
130  if (level) {
131  if (level < 0) {
132  level = -level;
133  level = (int)(level * qscale * quant_matrix[j]) >> 3;
134  level = -level;
135  } else {
136  level = (int)(level * qscale * quant_matrix[j]) >> 3;
137  }
138  block[j] = level;
139  }
140  }
141 }
142 
144  int16_t *block, int n, int qscale)
145 {
146  int i, level, nCoeffs;
147  const uint16_t *quant_matrix;
148  int sum=-1;
149 
150  if(s->alternate_scan) nCoeffs= 63;
151  else nCoeffs= s->block_last_index[n];
152 
153  if (n < 4)
154  block[0] = block[0] * s->y_dc_scale;
155  else
156  block[0] = block[0] * s->c_dc_scale;
157  quant_matrix = s->intra_matrix;
158  for(i=1;i<=nCoeffs;i++) {
159  int j= s->intra_scantable.permutated[i];
160  level = block[j];
161  if (level) {
162  if (level < 0) {
163  level = -level;
164  level = (int)(level * qscale * quant_matrix[j]) >> 3;
165  level = -level;
166  } else {
167  level = (int)(level * qscale * quant_matrix[j]) >> 3;
168  }
169  block[j] = level;
170  sum+=level;
171  }
172  }
173  block[63]^=sum&1;
174 }
175 
177  int16_t *block, int n, int qscale)
178 {
179  int i, level, nCoeffs;
180  const uint16_t *quant_matrix;
181  int sum=-1;
182 
183  if(s->alternate_scan) nCoeffs= 63;
184  else nCoeffs= s->block_last_index[n];
185 
186  quant_matrix = s->inter_matrix;
187  for(i=0; i<=nCoeffs; i++) {
188  int j= s->intra_scantable.permutated[i];
189  level = block[j];
190  if (level) {
191  if (level < 0) {
192  level = -level;
193  level = (((level << 1) + 1) * qscale *
194  ((int) (quant_matrix[j]))) >> 4;
195  level = -level;
196  } else {
197  level = (((level << 1) + 1) * qscale *
198  ((int) (quant_matrix[j]))) >> 4;
199  }
200  block[j] = level;
201  sum+=level;
202  }
203  }
204  block[63]^=sum&1;
205 }
206 
208  int16_t *block, int n, int qscale)
209 {
210  int i, level, qmul, qadd;
211  int nCoeffs;
212 
213  assert(s->block_last_index[n]>=0);
214 
215  qmul = qscale << 1;
216 
217  if (!s->h263_aic) {
218  if (n < 4)
219  block[0] = block[0] * s->y_dc_scale;
220  else
221  block[0] = block[0] * s->c_dc_scale;
222  qadd = (qscale - 1) | 1;
223  }else{
224  qadd = 0;
225  }
226  if(s->ac_pred)
227  nCoeffs=63;
228  else
229  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
230 
231  for(i=1; i<=nCoeffs; i++) {
232  level = block[i];
233  if (level) {
234  if (level < 0) {
235  level = level * qmul - qadd;
236  } else {
237  level = level * qmul + qadd;
238  }
239  block[i] = level;
240  }
241  }
242 }
243 
245  int16_t *block, int n, int qscale)
246 {
247  int i, level, qmul, qadd;
248  int nCoeffs;
249 
250  assert(s->block_last_index[n]>=0);
251 
252  qadd = (qscale - 1) | 1;
253  qmul = qscale << 1;
254 
255  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
256 
257  for(i=0; i<=nCoeffs; i++) {
258  level = block[i];
259  if (level) {
260  if (level < 0) {
261  level = level * qmul - qadd;
262  } else {
263  level = level * qmul + qadd;
264  }
265  block[i] = level;
266  }
267  }
268 }
269 
270 /* init common dct for both encoder and decoder */
272 {
273  ff_blockdsp_init(&s->bdsp, s->avctx);
274  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
277 
286 
289 
290  if (ARCH_ARM)
292  if (ARCH_PPC)
294  if (ARCH_X86)
296 
297  return 0;
298 }
299 
301 {
302  ff_idctdsp_init(&s->idsp, s->avctx);
303 
304  /* load & permutate scantables
305  * note: only wmv uses different ones
306  */
307  if (s->alternate_scan) {
310  } else {
313  }
316 }
317 
318 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
319 {
320  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0,
322  s->mb_stride, s->mb_height, s->b8_stride,
323  &s->linesize, &s->uvlinesize);
324 }
325 
327 {
328  int y_size = s->b8_stride * (2 * s->mb_height + 1);
329  int c_size = s->mb_stride * (s->mb_height + 1);
330  int yc_size = y_size + 2 * c_size;
331  int i;
332 
333  s->sc.edge_emu_buffer =
334  s->me.scratchpad =
335  s->me.temp =
336  s->sc.rd_scratchpad =
337  s->sc.b_scratchpad =
338  s->sc.obmc_scratchpad = NULL;
339 
340  if (s->encoding) {
341  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
342  ME_MAP_SIZE * sizeof(uint32_t), fail)
344  ME_MAP_SIZE * sizeof(uint32_t), fail)
345  if (s->noise_reduction) {
347  2 * 64 * sizeof(int), fail)
348  }
349  }
350  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
351  s->block = s->blocks[0];
352 
353  for (i = 0; i < 12; i++) {
354  s->pblocks[i] = &s->block[i];
355  }
356  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
357  // exchange uv
358  int16_t (*tmp)[64];
359  tmp = s->pblocks[4];
360  s->pblocks[4] = s->pblocks[5];
361  s->pblocks[5] = tmp;
362  }
363 
364  if (s->out_format == FMT_H263) {
365  /* ac values */
367  yc_size * sizeof(int16_t) * 16, fail);
368  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
369  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
370  s->ac_val[2] = s->ac_val[1] + c_size;
371  }
372 
373  return 0;
374 fail:
375  return -1; // free() through ff_mpv_common_end()
376 }
377 
379 {
380  if (!s)
381  return;
382 
384  av_freep(&s->me.scratchpad);
385  s->me.temp =
386  s->sc.rd_scratchpad =
387  s->sc.b_scratchpad =
388  s->sc.obmc_scratchpad = NULL;
389 
390  av_freep(&s->dct_error_sum);
391  av_freep(&s->me.map);
392  av_freep(&s->me.score_map);
393  av_freep(&s->blocks);
394  av_freep(&s->ac_val_base);
395  s->block = NULL;
396 }
397 
399 {
400 #define COPY(a) bak->a = src->a
401  COPY(sc.edge_emu_buffer);
402  COPY(me.scratchpad);
403  COPY(me.temp);
404  COPY(sc.rd_scratchpad);
405  COPY(sc.b_scratchpad);
406  COPY(sc.obmc_scratchpad);
407  COPY(me.map);
408  COPY(me.score_map);
409  COPY(blocks);
410  COPY(block);
411  COPY(start_mb_y);
412  COPY(end_mb_y);
413  COPY(me.map_generation);
414  COPY(pb);
415  COPY(dct_error_sum);
416  COPY(dct_count[0]);
417  COPY(dct_count[1]);
418  COPY(ac_val_base);
419  COPY(ac_val[0]);
420  COPY(ac_val[1]);
421  COPY(ac_val[2]);
422 #undef COPY
423 }
424 
426 {
427  MpegEncContext bak;
428  int i, ret;
429  // FIXME copy only needed parts
430  // START_TIMER
431  backup_duplicate_context(&bak, dst);
432  memcpy(dst, src, sizeof(MpegEncContext));
433  backup_duplicate_context(dst, &bak);
434  for (i = 0; i < 12; i++) {
435  dst->pblocks[i] = &dst->block[i];
436  }
437  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
438  // exchange uv
439  int16_t (*tmp)[64];
440  tmp = dst->pblocks[4];
441  dst->pblocks[4] = dst->pblocks[5];
442  dst->pblocks[5] = tmp;
443  }
444  if (!dst->sc.edge_emu_buffer &&
445  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
446  &dst->sc, dst->linesize)) < 0) {
447  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
448  "scratch buffers.\n");
449  return ret;
450  }
451  // STOP_TIMER("update_duplicate_context")
452  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
453  return 0;
454 }
455 
457  const AVCodecContext *src)
458 {
459  int i, ret;
460  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
461 
462  if (dst == src || !s1->context_initialized)
463  return 0;
464 
465  // FIXME can parameters change on I-frames?
466  // in that case dst may need a reinit
467  if (!s->context_initialized) {
468  int err;
469  memcpy(s, s1, sizeof(MpegEncContext));
470 
471  s->avctx = dst;
472  s->bitstream_buffer = NULL;
474 
475  ff_mpv_idct_init(s);
476  if ((err = ff_mpv_common_init(s)) < 0)
477  return err;
478  }
479 
480  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
481  int err;
482  s->context_reinit = 0;
483  s->height = s1->height;
484  s->width = s1->width;
485  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
486  return err;
487  }
488 
489  s->avctx->coded_height = s1->avctx->coded_height;
490  s->avctx->coded_width = s1->avctx->coded_width;
491  s->avctx->width = s1->avctx->width;
492  s->avctx->height = s1->avctx->height;
493 
494  s->coded_picture_number = s1->coded_picture_number;
495  s->picture_number = s1->picture_number;
496 
497  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
498  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
499  if (s1->picture[i].f->buf[0] &&
500  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
501  return ret;
502  }
503 
504 #define UPDATE_PICTURE(pic)\
505 do {\
506  ff_mpeg_unref_picture(s->avctx, &s->pic);\
507  if (s1->pic.f->buf[0])\
508  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
509  else\
510  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
511  if (ret < 0)\
512  return ret;\
513 } while (0)
514 
515  UPDATE_PICTURE(current_picture);
517  UPDATE_PICTURE(next_picture);
518 
519 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
520  ((pic && pic >= old_ctx->picture && \
521  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
522  &new_ctx->picture[pic - old_ctx->picture] : NULL)
523 
524  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
525  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
526  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
527 
528  // Error/bug resilience
529  s->next_p_frame_damaged = s1->next_p_frame_damaged;
530  s->workaround_bugs = s1->workaround_bugs;
531 
532  // MPEG-4 timing info
533  memcpy(&s->last_time_base, &s1->last_time_base,
534  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
535  (char *) &s1->last_time_base);
536 
537  // B-frame info
538  s->max_b_frames = s1->max_b_frames;
539  s->low_delay = s1->low_delay;
540  s->droppable = s1->droppable;
541 
542  // DivX handling (doesn't work)
543  s->divx_packed = s1->divx_packed;
544 
545  if (s1->bitstream_buffer) {
546  if (s1->bitstream_buffer_size +
550  s1->allocated_bitstream_buffer_size);
551  s->bitstream_buffer_size = s1->bitstream_buffer_size;
552  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
553  s1->bitstream_buffer_size);
554  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
556  }
557 
558  // linesize-dependent scratch buffer allocation
559  if (!s->sc.edge_emu_buffer)
560  if (s1->linesize) {
561  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
562  &s->sc, s1->linesize) < 0) {
563  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
564  "scratch buffers.\n");
565  return AVERROR(ENOMEM);
566  }
567  } else {
568  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
569  "be allocated due to unknown size.\n");
570  return AVERROR_BUG;
571  }
572 
573  // MPEG-2/interlacing info
574  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
575  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
576 
577  if (!s1->first_field) {
578  s->last_pict_type = s1->pict_type;
579  if (s1->current_picture_ptr)
580  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
581  }
582 
583  return 0;
584 }
585 
593 {
594  s->y_dc_scale_table =
597  s->progressive_frame = 1;
598  s->progressive_sequence = 1;
600 
601  s->coded_picture_number = 0;
602  s->picture_number = 0;
603 
604  s->f_code = 1;
605  s->b_code = 1;
606 
607  s->slice_context_count = 1;
608 }
609 
616 {
618 }
619 
624 {
625  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
626 
627  s->mb_width = (s->width + 15) / 16;
628  s->mb_stride = s->mb_width + 1;
629  s->b8_stride = s->mb_width * 2 + 1;
630  mb_array_size = s->mb_height * s->mb_stride;
631  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
632 
633  /* set default edge pos, will be overridden
634  * in decode_header if needed */
635  s->h_edge_pos = s->mb_width * 16;
636  s->v_edge_pos = s->mb_height * 16;
637 
638  s->mb_num = s->mb_width * s->mb_height;
639 
640  s->block_wrap[0] =
641  s->block_wrap[1] =
642  s->block_wrap[2] =
643  s->block_wrap[3] = s->b8_stride;
644  s->block_wrap[4] =
645  s->block_wrap[5] = s->mb_stride;
646 
647  y_size = s->b8_stride * (2 * s->mb_height + 1);
648  c_size = s->mb_stride * (s->mb_height + 1);
649  yc_size = y_size + 2 * c_size;
650 
651  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
652  fail); // error resilience code looks cleaner with this
653  for (y = 0; y < s->mb_height; y++)
654  for (x = 0; x < s->mb_width; x++)
655  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
656 
657  s->mb_index2xy[s->mb_height * s->mb_width] =
658  (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
659 
660  if (s->encoding) {
661  /* Allocate MV tables */
663  mv_table_size * 2 * sizeof(int16_t), fail);
665  mv_table_size * 2 * sizeof(int16_t), fail);
667  mv_table_size * 2 * sizeof(int16_t), fail);
669  mv_table_size * 2 * sizeof(int16_t), fail);
671  mv_table_size * 2 * sizeof(int16_t), fail);
673  mv_table_size * 2 * sizeof(int16_t), fail);
674  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
678  s->mb_stride + 1;
680  s->mb_stride + 1;
682 
683  /* Allocate MB type table */
684  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
685  sizeof(uint16_t), fail); // needed for encoding
686 
687  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
688  sizeof(int), fail);
689 
691  mb_array_size * sizeof(float), fail);
693  mb_array_size * sizeof(float), fail);
694 
695  }
696 
697  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
699  /* interlaced direct mode decoding tables */
700  for (i = 0; i < 2; i++) {
701  int j, k;
702  for (j = 0; j < 2; j++) {
703  for (k = 0; k < 2; k++) {
705  s->b_field_mv_table_base[i][j][k],
706  mv_table_size * 2 * sizeof(int16_t),
707  fail);
708  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
709  s->mb_stride + 1;
710  }
712  mb_array_size * 2 * sizeof(uint8_t), fail);
714  mv_table_size * 2 * sizeof(int16_t), fail);
715  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
716  + s->mb_stride + 1;
717  }
719  mb_array_size * 2 * sizeof(uint8_t), fail);
720  }
721  }
722  if (s->out_format == FMT_H263) {
723  /* cbp values */
724  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
725  s->coded_block = s->coded_block_base + s->b8_stride + 1;
726 
727  /* cbp, ac_pred, pred_dir */
729  mb_array_size * sizeof(uint8_t), fail);
731  mb_array_size * sizeof(uint8_t), fail);
732  }
733 
734  if (s->h263_pred || s->h263_plus || !s->encoding) {
735  /* dc values */
736  // MN: we need these for error resilience of intra-frames
738  yc_size * sizeof(int16_t), fail);
739  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
740  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
741  s->dc_val[2] = s->dc_val[1] + c_size;
742  for (i = 0; i < yc_size; i++)
743  s->dc_val_base[i] = 1024;
744  }
745 
746  /* which mb is a intra block */
747  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
748  memset(s->mbintra_table, 1, mb_array_size);
749 
750  /* init macroblock skip table */
751  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
752  // Note the + 1 is for a quicker MPEG-4 slice_end detection
753 
754  return ff_mpeg_er_init(s);
755 fail:
756  return AVERROR(ENOMEM);
757 }
758 
764 {
765  int i;
766  int nb_slices = (HAVE_THREADS &&
768  s->avctx->thread_count : 1;
769 
770  if (s->encoding && s->avctx->slices)
771  nb_slices = s->avctx->slices;
772 
774  s->mb_height = (s->height + 31) / 32 * 2;
775  else
776  s->mb_height = (s->height + 15) / 16;
777 
778  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
780  "decoding to AV_PIX_FMT_NONE is not supported.\n");
781  return -1;
782  }
783 
784  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
785  int max_slices;
786  if (s->mb_height)
787  max_slices = FFMIN(MAX_THREADS, s->mb_height);
788  else
789  max_slices = MAX_THREADS;
790  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
791  " reducing to %d\n", nb_slices, max_slices);
792  nb_slices = max_slices;
793  }
794 
795  if ((s->width || s->height) &&
796  av_image_check_size(s->width, s->height, 0, s->avctx))
797  return -1;
798 
799  dct_init(s);
800 
801  /* set chroma shifts */
803  &s->chroma_x_shift,
804  &s->chroma_y_shift);
805 
806  /* convert fourcc to upper case */
808 
810  MAX_PICTURE_COUNT * sizeof(Picture), fail);
811  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
812  s->picture[i].f = av_frame_alloc();
813  if (!s->picture[i].f)
814  goto fail;
815  }
816  memset(&s->next_picture, 0, sizeof(s->next_picture));
817  memset(&s->last_picture, 0, sizeof(s->last_picture));
818  memset(&s->current_picture, 0, sizeof(s->current_picture));
819  memset(&s->new_picture, 0, sizeof(s->new_picture));
821  if (!s->next_picture.f)
822  goto fail;
824  if (!s->last_picture.f)
825  goto fail;
827  if (!s->current_picture.f)
828  goto fail;
830  if (!s->new_picture.f)
831  goto fail;
832 
833  if (s->width && s->height) {
834  if (init_context_frame(s))
835  goto fail;
836 
837  s->parse_context.state = -1;
838  }
839 
840  s->context_initialized = 1;
841  s->thread_context[0] = s;
842 
843  if (s->width && s->height) {
844  if (nb_slices > 1) {
845  for (i = 1; i < nb_slices; i++) {
846  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
847  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
848  }
849 
850  for (i = 0; i < nb_slices; i++) {
851  if (init_duplicate_context(s->thread_context[i]) < 0)
852  goto fail;
853  s->thread_context[i]->start_mb_y =
854  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
855  s->thread_context[i]->end_mb_y =
856  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
857  }
858  } else {
859  if (init_duplicate_context(s) < 0)
860  goto fail;
861  s->start_mb_y = 0;
862  s->end_mb_y = s->mb_height;
863  }
864  s->slice_context_count = nb_slices;
865  }
866 
867  return 0;
868  fail:
870  return -1;
871 }
872 
879 {
880  int i, j, k;
881 
882  av_freep(&s->mb_type);
889  s->p_mv_table = NULL;
890  s->b_forw_mv_table = NULL;
891  s->b_back_mv_table = NULL;
894  s->b_direct_mv_table = NULL;
895  for (i = 0; i < 2; i++) {
896  for (j = 0; j < 2; j++) {
897  for (k = 0; k < 2; k++) {
898  av_freep(&s->b_field_mv_table_base[i][j][k]);
899  s->b_field_mv_table[i][j][k] = NULL;
900  }
901  av_freep(&s->b_field_select_table[i][j]);
902  av_freep(&s->p_field_mv_table_base[i][j]);
903  s->p_field_mv_table[i][j] = NULL;
904  }
906  }
907 
908  av_freep(&s->dc_val_base);
910  av_freep(&s->mbintra_table);
911  av_freep(&s->cbp_table);
913 
914  av_freep(&s->mbskip_table);
915 
918  av_freep(&s->mb_index2xy);
919  av_freep(&s->lambda_table);
920  av_freep(&s->cplx_tab);
921  av_freep(&s->bits_tab);
922 
923  s->linesize = s->uvlinesize = 0;
924 }
925 
927 {
928  int i, err = 0;
929 
930  if (s->slice_context_count > 1) {
931  for (i = 0; i < s->slice_context_count; i++) {
933  }
934  for (i = 1; i < s->slice_context_count; i++) {
935  av_freep(&s->thread_context[i]);
936  }
937  } else
939 
941 
942  if (s->picture)
943  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
944  s->picture[i].needs_realloc = 1;
945  }
946 
947  s->last_picture_ptr =
948  s->next_picture_ptr =
950 
951  // init
953  s->mb_height = (s->height + 31) / 32 * 2;
954  else
955  s->mb_height = (s->height + 15) / 16;
956 
957  if ((s->width || s->height) &&
958  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
959  goto fail;
960 
961  if ((err = init_context_frame(s)))
962  goto fail;
963 
964  s->thread_context[0] = s;
965 
966  if (s->width && s->height) {
967  int nb_slices = s->slice_context_count;
968  if (nb_slices > 1) {
969  for (i = 1; i < nb_slices; i++) {
970  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
971  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
972  }
973 
974  for (i = 0; i < nb_slices; i++) {
975  if ((err = init_duplicate_context(s->thread_context[i])) < 0)
976  goto fail;
977  s->thread_context[i]->start_mb_y =
978  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
979  s->thread_context[i]->end_mb_y =
980  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
981  }
982  } else {
983  if (init_duplicate_context(s) < 0)
984  goto fail;
985  s->start_mb_y = 0;
986  s->end_mb_y = s->mb_height;
987  }
988  s->slice_context_count = nb_slices;
989  }
990 
991  return 0;
992  fail:
994  return err;
995 }
996 
997 /* init common structure for both encoder and decoder */
999 {
1000  int i;
1001 
1002  if (s->slice_context_count > 1) {
1003  for (i = 0; i < s->slice_context_count; i++) {
1005  }
1006  for (i = 1; i < s->slice_context_count; i++) {
1007  av_freep(&s->thread_context[i]);
1008  }
1009  s->slice_context_count = 1;
1010  } else free_duplicate_context(s);
1011 
1013  s->parse_context.buffer_size = 0;
1014 
1017 
1018  if (s->picture) {
1019  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1021  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1022  av_frame_free(&s->picture[i].f);
1023  }
1024  }
1025  av_freep(&s->picture);
1038 
1039  free_context_frame(s);
1040 
1041  s->context_initialized = 0;
1042  s->last_picture_ptr =
1043  s->next_picture_ptr =
1045  s->linesize = s->uvlinesize = 0;
1046 }
1047 
1053 {
1054  int i, ret;
1055  Picture *pic;
1056  s->mb_skipped = 0;
1057 
1058  /* mark & release old frames */
1059  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1061  s->last_picture_ptr->f->buf[0]) {
1063  }
1064 
1065  /* release forgotten pictures */
1066  /* if (MPEG-124 / H.263) */
1067  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1068  if (&s->picture[i] != s->last_picture_ptr &&
1069  &s->picture[i] != s->next_picture_ptr &&
1070  s->picture[i].reference && !s->picture[i].needs_realloc) {
1071  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1072  }
1073  }
1074 
1076 
1077  /* release non reference frames */
1078  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1079  if (!s->picture[i].reference)
1080  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1081  }
1082 
1083  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1084  // we already have a unused image
1085  // (maybe it was set before reading the header)
1086  pic = s->current_picture_ptr;
1087  } else {
1088  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1089  if (i < 0) {
1090  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1091  return i;
1092  }
1093  pic = &s->picture[i];
1094  }
1095 
1096  pic->reference = 0;
1097  if (!s->droppable) {
1098  if (s->pict_type != AV_PICTURE_TYPE_B)
1099  pic->reference = 3;
1100  }
1101 
1103 
1104  if (alloc_picture(s, pic, 0) < 0)
1105  return -1;
1106 
1107  s->current_picture_ptr = pic;
1108  // FIXME use only the vars from current_pic
1110  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1112  if (s->picture_structure != PICT_FRAME)
1115  }
1119 
1121  // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1122  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1124 
1125  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1126  s->current_picture_ptr)) < 0)
1127  return ret;
1128 
1129  if (s->pict_type != AV_PICTURE_TYPE_B) {
1131  if (!s->droppable)
1133  }
1134  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1136  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1137  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1139  s->pict_type, s->droppable);
1140 
1141  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1142  (s->pict_type != AV_PICTURE_TYPE_I ||
1143  s->picture_structure != PICT_FRAME)) {
1144  int h_chroma_shift, v_chroma_shift;
1146  &h_chroma_shift, &v_chroma_shift);
1147  if (s->pict_type != AV_PICTURE_TYPE_I)
1148  av_log(avctx, AV_LOG_ERROR,
1149  "warning: first frame is no keyframe\n");
1150  else if (s->picture_structure != PICT_FRAME)
1151  av_log(avctx, AV_LOG_INFO,
1152  "allocate dummy last picture for field based first keyframe\n");
1153 
1154  /* Allocate a dummy frame */
1155  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1156  if (i < 0) {
1157  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1158  return i;
1159  }
1160  s->last_picture_ptr = &s->picture[i];
1161 
1162  s->last_picture_ptr->reference = 3;
1164 
1165  if (alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1166  s->last_picture_ptr = NULL;
1167  return -1;
1168  }
1169 
1170  memset(s->last_picture_ptr->f->data[0], 0,
1171  avctx->height * s->last_picture_ptr->f->linesize[0]);
1172  memset(s->last_picture_ptr->f->data[1], 0x80,
1173  (avctx->height >> v_chroma_shift) *
1174  s->last_picture_ptr->f->linesize[1]);
1175  memset(s->last_picture_ptr->f->data[2], 0x80,
1176  (avctx->height >> v_chroma_shift) *
1177  s->last_picture_ptr->f->linesize[2]);
1178 
1179  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1180  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1181  }
1182  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1183  s->pict_type == AV_PICTURE_TYPE_B) {
1184  /* Allocate a dummy frame */
1185  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1186  if (i < 0) {
1187  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1188  return i;
1189  }
1190  s->next_picture_ptr = &s->picture[i];
1191 
1192  s->next_picture_ptr->reference = 3;
1194 
1195  if (alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1196  s->next_picture_ptr = NULL;
1197  return -1;
1198  }
1199  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1200  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1201  }
1202 
1203  if (s->last_picture_ptr) {
1205  if (s->last_picture_ptr->f->buf[0] &&
1206  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1207  s->last_picture_ptr)) < 0)
1208  return ret;
1209  }
1210  if (s->next_picture_ptr) {
1212  if (s->next_picture_ptr->f->buf[0] &&
1213  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1214  s->next_picture_ptr)) < 0)
1215  return ret;
1216  }
1217 
1218  if (s->pict_type != AV_PICTURE_TYPE_I &&
1219  !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1220  av_log(s, AV_LOG_ERROR,
1221  "Non-reference picture received and no reference available\n");
1222  return AVERROR_INVALIDDATA;
1223  }
1224 
1225  if (s->picture_structure!= PICT_FRAME) {
1226  int i;
1227  for (i = 0; i < 4; i++) {
1229  s->current_picture.f->data[i] +=
1230  s->current_picture.f->linesize[i];
1231  }
1232  s->current_picture.f->linesize[i] *= 2;
1233  s->last_picture.f->linesize[i] *= 2;
1234  s->next_picture.f->linesize[i] *= 2;
1235  }
1236  }
1237 
1238  /* set dequantizer, we can't do it during init as
1239  * it might change for MPEG-4 and we can't do it in the header
1240  * decode as init is not called for MPEG-4 there yet */
1241  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1244  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1247  } else {
1250  }
1251 
1252 #if FF_API_XVMC
1254  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1255  return ff_xvmc_field_start(s, avctx);
1257 #endif /* FF_API_XVMC */
1258 
1259  return 0;
1260 }
1261 
1262 /* called after a frame has been decoded. */
1264 {
1265 #if FF_API_XVMC
1267  /* redraw edges for the frame if decoding didn't complete */
1268  // just to make sure that all data is rendered.
1269  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1270  ff_xvmc_field_end(s);
1271  } else
1273 #endif /* FF_API_XVMC */
1274 
1275  emms_c();
1276 
1277  if (s->current_picture.reference)
1279 }
1280 
1285 {
1286  AVFrame *pict;
1287  if (s->avctx->hwaccel || !p || !p->mb_type)
1288  return;
1289  pict = p->f;
1290 
1292  int x,y;
1293 
1294  av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1295  switch (pict->pict_type) {
1296  case AV_PICTURE_TYPE_I:
1297  av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1298  break;
1299  case AV_PICTURE_TYPE_P:
1300  av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1301  break;
1302  case AV_PICTURE_TYPE_B:
1303  av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1304  break;
1305  case AV_PICTURE_TYPE_S:
1306  av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1307  break;
1308  case AV_PICTURE_TYPE_SI:
1309  av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1310  break;
1311  case AV_PICTURE_TYPE_SP:
1312  av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1313  break;
1314  }
1315  for (y = 0; y < s->mb_height; y++) {
1316  for (x = 0; x < s->mb_width; x++) {
1317  if (s->avctx->debug & FF_DEBUG_SKIP) {
1318  int count = s->mbskip_table[x + y * s->mb_stride];
1319  if (count > 9)
1320  count = 9;
1321  av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1322  }
1323  if (s->avctx->debug & FF_DEBUG_QP) {
1324  av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1325  p->qscale_table[x + y * s->mb_stride]);
1326  }
1327  if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1328  int mb_type = p->mb_type[x + y * s->mb_stride];
1329  // Type & MV direction
1330  if (IS_PCM(mb_type))
1331  av_log(s->avctx, AV_LOG_DEBUG, "P");
1332  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1333  av_log(s->avctx, AV_LOG_DEBUG, "A");
1334  else if (IS_INTRA4x4(mb_type))
1335  av_log(s->avctx, AV_LOG_DEBUG, "i");
1336  else if (IS_INTRA16x16(mb_type))
1337  av_log(s->avctx, AV_LOG_DEBUG, "I");
1338  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1339  av_log(s->avctx, AV_LOG_DEBUG, "d");
1340  else if (IS_DIRECT(mb_type))
1341  av_log(s->avctx, AV_LOG_DEBUG, "D");
1342  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1343  av_log(s->avctx, AV_LOG_DEBUG, "g");
1344  else if (IS_GMC(mb_type))
1345  av_log(s->avctx, AV_LOG_DEBUG, "G");
1346  else if (IS_SKIP(mb_type))
1347  av_log(s->avctx, AV_LOG_DEBUG, "S");
1348  else if (!USES_LIST(mb_type, 1))
1349  av_log(s->avctx, AV_LOG_DEBUG, ">");
1350  else if (!USES_LIST(mb_type, 0))
1351  av_log(s->avctx, AV_LOG_DEBUG, "<");
1352  else {
1353  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1354  av_log(s->avctx, AV_LOG_DEBUG, "X");
1355  }
1356 
1357  // segmentation
1358  if (IS_8X8(mb_type))
1359  av_log(s->avctx, AV_LOG_DEBUG, "+");
1360  else if (IS_16X8(mb_type))
1361  av_log(s->avctx, AV_LOG_DEBUG, "-");
1362  else if (IS_8X16(mb_type))
1363  av_log(s->avctx, AV_LOG_DEBUG, "|");
1364  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1365  av_log(s->avctx, AV_LOG_DEBUG, " ");
1366  else
1367  av_log(s->avctx, AV_LOG_DEBUG, "?");
1368 
1369 
1370  if (IS_INTERLACED(mb_type))
1371  av_log(s->avctx, AV_LOG_DEBUG, "=");
1372  else
1373  av_log(s->avctx, AV_LOG_DEBUG, " ");
1374  }
1375  }
1376  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1377  }
1378  }
1379 }
1380 
1384 static int lowest_referenced_row(MpegEncContext *s, int dir)
1385 {
1386  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1387  int my, off, i, mvs;
1388 
1389  if (s->picture_structure != PICT_FRAME || s->mcsel)
1390  goto unhandled;
1391 
1392  switch (s->mv_type) {
1393  case MV_TYPE_16X16:
1394  mvs = 1;
1395  break;
1396  case MV_TYPE_16X8:
1397  mvs = 2;
1398  break;
1399  case MV_TYPE_8X8:
1400  mvs = 4;
1401  break;
1402  default:
1403  goto unhandled;
1404  }
1405 
1406  for (i = 0; i < mvs; i++) {
1407  my = s->mv[dir][i][1]<<qpel_shift;
1408  my_max = FFMAX(my_max, my);
1409  my_min = FFMIN(my_min, my);
1410  }
1411 
1412  off = (FFMAX(-my_min, my_max) + 63) >> 6;
1413 
1414  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1415 unhandled:
1416  return s->mb_height-1;
1417 }
1418 
1419 /* put block[] to dest[] */
1420 static inline void put_dct(MpegEncContext *s,
1421  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1422 {
1423  s->dct_unquantize_intra(s, block, i, qscale);
1424  s->idsp.idct_put(dest, line_size, block);
1425 }
1426 
1427 /* add block[] to dest[] */
1428 static inline void add_dct(MpegEncContext *s,
1429  int16_t *block, int i, uint8_t *dest, int line_size)
1430 {
1431  if (s->block_last_index[i] >= 0) {
1432  s->idsp.idct_add(dest, line_size, block);
1433  }
1434 }
1435 
1436 static inline void add_dequant_dct(MpegEncContext *s,
1437  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1438 {
1439  if (s->block_last_index[i] >= 0) {
1440  s->dct_unquantize_inter(s, block, i, qscale);
1441 
1442  s->idsp.idct_add(dest, line_size, block);
1443  }
1444 }
1445 
1450 {
1451  int wrap = s->b8_stride;
1452  int xy = s->block_index[0];
1453 
1454  s->dc_val[0][xy ] =
1455  s->dc_val[0][xy + 1 ] =
1456  s->dc_val[0][xy + wrap] =
1457  s->dc_val[0][xy + 1 + wrap] = 1024;
1458  /* ac pred */
1459  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1460  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1461  if (s->msmpeg4_version>=3) {
1462  s->coded_block[xy ] =
1463  s->coded_block[xy + 1 ] =
1464  s->coded_block[xy + wrap] =
1465  s->coded_block[xy + 1 + wrap] = 0;
1466  }
1467  /* chroma */
1468  wrap = s->mb_stride;
1469  xy = s->mb_x + s->mb_y * wrap;
1470  s->dc_val[1][xy] =
1471  s->dc_val[2][xy] = 1024;
1472  /* ac pred */
1473  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1474  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1475 
1476  s->mbintra_table[xy]= 0;
1477 }
1478 
1479 /* generic function called after a macroblock has been parsed by the
1480  decoder or after it has been encoded by the encoder.
1481 
1482  Important variables used:
1483  s->mb_intra : true if intra macroblock
1484  s->mv_dir : motion vector direction
1485  s->mv_type : motion vector type
1486  s->mv : motion vector
1487  s->interlaced_dct : true if interlaced dct used (mpeg2)
1488  */
1489 static av_always_inline
1491  int is_mpeg12)
1492 {
1493  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1494 
1495 #if FF_API_XVMC
1497  if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1498  ff_xvmc_decode_mb(s);//xvmc uses pblocks
1499  return;
1500  }
1502 #endif /* FF_API_XVMC */
1503 
1504  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1505  /* print DCT coefficients */
1506  int i,j;
1507  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1508  for(i=0; i<6; i++){
1509  for(j=0; j<64; j++){
1510  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1511  block[i][s->idsp.idct_permutation[j]]);
1512  }
1513  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1514  }
1515  }
1516 
1517  s->current_picture.qscale_table[mb_xy] = s->qscale;
1518 
1519  /* update DC predictors for P macroblocks */
1520  if (!s->mb_intra) {
1521  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1522  if(s->mbintra_table[mb_xy])
1524  } else {
1525  s->last_dc[0] =
1526  s->last_dc[1] =
1527  s->last_dc[2] = 128 << s->intra_dc_precision;
1528  }
1529  }
1530  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1531  s->mbintra_table[mb_xy]=1;
1532 
1533  if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) ||
1534  !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1535  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1536  uint8_t *dest_y, *dest_cb, *dest_cr;
1537  int dct_linesize, dct_offset;
1538  op_pixels_func (*op_pix)[4];
1539  qpel_mc_func (*op_qpix)[16];
1540  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1541  const int uvlinesize = s->current_picture.f->linesize[1];
1542  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1543  const int block_size = 8;
1544 
1545  /* avoid copy if macroblock skipped in last frame too */
1546  /* skip only during decoding as we might trash the buffers during encoding a bit */
1547  if(!s->encoding){
1548  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1549 
1550  if (s->mb_skipped) {
1551  s->mb_skipped= 0;
1552  assert(s->pict_type!=AV_PICTURE_TYPE_I);
1553  *mbskip_ptr = 1;
1554  } else if(!s->current_picture.reference) {
1555  *mbskip_ptr = 1;
1556  } else{
1557  *mbskip_ptr = 0; /* not skipped */
1558  }
1559  }
1560 
1561  dct_linesize = linesize << s->interlaced_dct;
1562  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1563 
1564  if(readable){
1565  dest_y= s->dest[0];
1566  dest_cb= s->dest[1];
1567  dest_cr= s->dest[2];
1568  }else{
1569  dest_y = s->sc.b_scratchpad;
1570  dest_cb= s->sc.b_scratchpad+16*linesize;
1571  dest_cr= s->sc.b_scratchpad+32*linesize;
1572  }
1573 
1574  if (!s->mb_intra) {
1575  /* motion handling */
1576  /* decoding or more than one mb_type (MC was already done otherwise) */
1577  if(!s->encoding){
1578 
1580  if (s->mv_dir & MV_DIR_FORWARD) {
1582  lowest_referenced_row(s, 0),
1583  0);
1584  }
1585  if (s->mv_dir & MV_DIR_BACKWARD) {
1587  lowest_referenced_row(s, 1),
1588  0);
1589  }
1590  }
1591 
1592  op_qpix= s->me.qpel_put;
1593  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1594  op_pix = s->hdsp.put_pixels_tab;
1595  }else{
1596  op_pix = s->hdsp.put_no_rnd_pixels_tab;
1597  }
1598  if (s->mv_dir & MV_DIR_FORWARD) {
1599  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1600  op_pix = s->hdsp.avg_pixels_tab;
1601  op_qpix= s->me.qpel_avg;
1602  }
1603  if (s->mv_dir & MV_DIR_BACKWARD) {
1604  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1605  }
1606  }
1607 
1608  /* skip dequant / idct if we are really late ;) */
1609  if(s->avctx->skip_idct){
1612  || s->avctx->skip_idct >= AVDISCARD_ALL)
1613  goto skip_idct;
1614  }
1615 
1616  /* add dct residue */
1618  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1619  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1620  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1621  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1622  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1623 
1624  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1625  if (s->chroma_y_shift){
1626  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1627  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1628  }else{
1629  dct_linesize >>= 1;
1630  dct_offset >>=1;
1631  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1632  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1633  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1634  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1635  }
1636  }
1637  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1638  add_dct(s, block[0], 0, dest_y , dct_linesize);
1639  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1640  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1641  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1642 
1643  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1644  if(s->chroma_y_shift){//Chroma420
1645  add_dct(s, block[4], 4, dest_cb, uvlinesize);
1646  add_dct(s, block[5], 5, dest_cr, uvlinesize);
1647  }else{
1648  //chroma422
1649  dct_linesize = uvlinesize << s->interlaced_dct;
1650  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1651 
1652  add_dct(s, block[4], 4, dest_cb, dct_linesize);
1653  add_dct(s, block[5], 5, dest_cr, dct_linesize);
1654  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1655  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1656  if(!s->chroma_x_shift){//Chroma444
1657  add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1658  add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1659  add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1660  add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
1661  }
1662  }
1663  }//fi gray
1664  }
1666  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1667  }
1668  } else {
1669  /* dct only in intra block */
1671  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1672  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1673  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1674  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1675 
1676  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1677  if(s->chroma_y_shift){
1678  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1679  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1680  }else{
1681  dct_offset >>=1;
1682  dct_linesize >>=1;
1683  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1684  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1685  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1686  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1687  }
1688  }
1689  }else{
1690  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1691  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1692  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1693  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1694 
1695  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1696  if(s->chroma_y_shift){
1697  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1698  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1699  }else{
1700 
1701  dct_linesize = uvlinesize << s->interlaced_dct;
1702  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1703 
1704  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1705  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1706  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1707  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1708  if(!s->chroma_x_shift){//Chroma444
1709  s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
1710  s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
1711  s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
1712  s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
1713  }
1714  }
1715  }//gray
1716  }
1717  }
1718 skip_idct:
1719  if(!readable){
1720  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
1721  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1722  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1723  }
1724  }
1725 }
1726 
1727 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
1728 {
1729 #if !CONFIG_SMALL
1730  if(s->out_format == FMT_MPEG1) {
1731  mpv_decode_mb_internal(s, block, 1);
1732  } else
1733 #endif
1734  mpv_decode_mb_internal(s, block, 0);
1735 }
1736 
1738 {
1740  s->last_picture.f, y, h, s->picture_structure,
1741  s->first_field, s->low_delay);
1742 }
1743 
1744 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1745  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1746  const int uvlinesize = s->current_picture.f->linesize[1];
1747  const int mb_size= 4;
1748 
1749  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
1750  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
1751  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1752  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1753  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1754  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1755  //block_index is not used by mpeg2, so it is not affected by chroma_format
1756 
1757  s->dest[0] = s->current_picture.f->data[0] + (s->mb_x - 1) * (1 << mb_size);
1758  s->dest[1] = s->current_picture.f->data[1] + (s->mb_x - 1) * (1 << (mb_size - s->chroma_x_shift));
1759  s->dest[2] = s->current_picture.f->data[2] + (s->mb_x - 1) * (1 << (mb_size - s->chroma_x_shift));
1760 
1762  {
1763  if(s->picture_structure==PICT_FRAME){
1764  s->dest[0] += s->mb_y * linesize << mb_size;
1765  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1766  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1767  }else{
1768  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
1769  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1770  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1771  assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1772  }
1773  }
1774 }
1775 
1777  int i;
1778  MpegEncContext *s = avctx->priv_data;
1779 
1780  if (!s || !s->picture)
1781  return;
1782 
1783  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1784  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1786 
1790 
1791  s->mb_x= s->mb_y= 0;
1792 
1793  s->parse_context.state= -1;
1795  s->parse_context.overread= 0;
1797  s->parse_context.index= 0;
1798  s->parse_context.last_index= 0;
1799  s->bitstream_buffer_size=0;
1800  s->pp_time=0;
1801 }
1802 
1806 void ff_set_qscale(MpegEncContext * s, int qscale)
1807 {
1808  if (qscale < 1)
1809  qscale = 1;
1810  else if (qscale > 31)
1811  qscale = 31;
1812 
1813  s->qscale = qscale;
1814  s->chroma_qscale= s->chroma_qscale_table[qscale];
1815 
1816  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1818 }
1819 
1821 {
1824 }
int last_time_base
Definition: mpegvideo.h:373
int bitstream_buffer_size
Definition: mpegvideo.h:401
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:47
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:797
int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
IDCTDSPContext idsp
Definition: mpegvideo.h:221
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:326
int16_t(* b_bidir_back_mv_table_base)[2]
Definition: mpegvideo.h:235
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:43
discard all frames except keyframes
Definition: avcodec.h:688
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1744
int picture_number
Definition: mpegvideo.h:122
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
#define CONFIG_MPEG_XVMC_DECODER
Definition: config.h:619
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:83
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:126
ScanTable intra_v_scantable
Definition: mpegvideo.h:88
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
S(GMC)-VOP MPEG-4.
Definition: avutil.h:263
#define CONFIG_WMV2_ENCODER
Definition: config.h:1095
This structure describes decoded (raw) audio or video data.
Definition: frame.h:140
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:239
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:148
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:183
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1595
#define ARCH_PPC
Definition: config.h:24
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:494
#define IS_GMC(a)
Definition: mpegutils.h:87
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:130
uint8_t * coded_block_base
Definition: mpegvideo.h:186
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:308
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:149
int16_t(*[3] ac_val)[16]
used for for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:189
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:421
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegpicture.h:82
#define HAVE_INTRINSICS_NEON
Definition: config.h:212
uint8_t * bitstream_buffer
Definition: mpegvideo.h:400
enum AVCodecID codec_id
Definition: mpegvideo.h:107
av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (%s)\, len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic ? ac->func_descr_generic :ac->func_descr)
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegpicture.h:76
void ff_print_debug_info(MpegEncContext *s, Picture *p)
Print debugging info for the given picture.
Definition: mpegvideo.c:1284
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1621
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:245
int16_t(* p_mv_table_base)[2]
Definition: mpegvideo.h:231
uint8_t raster_end[64]
Definition: idctdsp.h:32
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1384
uint32_t * score_map
map to store the scores
Definition: motion_est.h:54
mpegvideo header.
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:77
discard all
Definition: avcodec.h:689
uint8_t permutated[64]
Definition: idctdsp.h:31
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:378
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2776
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:329
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:128
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
int frame_start_found
Definition: parser.h:34
int qscale
QP.
Definition: mpegvideo.h:199
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:82
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:241
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1654
enum AVPictureType last_picture
Definition: movenc.c:68
int chroma_x_shift
Definition: mpegvideo.h:459
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:109
int block_wrap[6]
Definition: mpegvideo.h:288
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:84
Macro definitions for various function/variable attributes.
int16_t(* b_back_mv_table_base)[2]
Definition: mpegvideo.h:233
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:398
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1449
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:202
Switching Intra.
Definition: avutil.h:264
#define MAX_THREADS
Definition: mpegvideo.h:59
static int16_t block[64]
Definition: dct.c:97
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2696
#define USES_LIST(a, list)
Definition: mpegutils.h:101
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:1737
int context_reinit
Definition: mpegvideo.h:526
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:27
int16_t * dc_val_base
Definition: mpegvideo.h:181
ScratchpadContext sc
Definition: mpegvideo.h:197
uint8_t
#define ME_MAP_SIZE
Definition: motion_est.h:33
#define av_cold
Definition: attributes.h:66
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:68
enum OutputFormat out_format
output format
Definition: mpegvideo.h:99
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:926
int noise_reduction
Definition: mpegvideo.h:544
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Definition: mpegvideo.h:195
Multithreading support functions.
qpel_mc_func(* qpel_put)[16]
Definition: motion_est.h:86
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:412
#define emms_c()
Definition: internal.h:48
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:278
int interlaced_dct
Definition: mpegvideo.h:464
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:1727
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:175
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:398
int intra_dc_precision
Definition: mpegvideo.h:446
quarterpel DSP functions
void ff_mpv_common_init_ppc(MpegEncContext *s)
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:242
float * cplx_tab
Definition: mpegvideo.h:522
#define FF_DEBUG_MB_TYPE
Definition: avcodec.h:2630
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:377
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo.c:318
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:263
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:300
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:124
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:115
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:763
high precision timer, useful to profile code
int16_t(*[2][2] p_field_mv_table_base)[2]
Definition: mpegvideo.h:237
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:1806
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:97
ThreadFrame tf
Definition: mpegpicture.h:47
#define src
Definition: vp8dsp.c:254
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:182
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:104
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:151
unsigned int buffer_size
Definition: parser.h:32
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:124
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:180
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1428
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:190
#define ARCH_X86
Definition: config.h:33
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int chroma_y_shift
Definition: mpegvideo.h:460
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:390
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:80
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1793
ERContext er
Definition: mpegvideo.h:528
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2825
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:211
int reference
Definition: mpegpicture.h:84
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:145
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:143
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1503
static enum AVDiscard skip_idct
Definition: avplay.c:253
#define wrap(func)
Definition: neontest.h:62
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1420
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:52
#define IS_SKIP(a)
Definition: mpegutils.h:83
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:386
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:285
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:391
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:248
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:998
#define FFMAX(a, b)
Definition: common.h:64
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:447
#define fail()
Definition: checkasm.h:80
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:1776
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:121
int * lambda_table
Definition: mpegvideo.h:203
uint8_t * error_status_table
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:82
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:96
common internal API header
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:70
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:223
int progressive_frame
Definition: mpegvideo.h:462
#define IS_16X8(a)
Definition: mpegutils.h:89
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:201
#define UPDATE_PICTURE(pic)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:788
int top_field_first
Definition: mpegvideo.h:448
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2817
uint8_t * er_temp_buffer
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:66
int last_index
Definition: parser.h:31
#define IS_DIRECT(a)
Definition: mpegutils.h:86
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed B-frames
Definition: mpegvideo.h:348
#define ARCH_ARM
Definition: config.h:14
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:176
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:169
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:484
int width
picture width / height.
Definition: avcodec.h:1580
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
Definition: mpegvideo.h:191
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:179
Picture.
Definition: mpegpicture.h:45
int alternate_scan
Definition: mpegvideo.h:453
unsigned int allocated_bitstream_buffer_size
Definition: mpegvideo.h:402
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
Definition: mpegvideo.h:188
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:767
#define FF_DEBUG_SKIP
Definition: avcodec.h:2639
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2818
int16_t(*[2][2][2] b_field_mv_table_base)[2]
Definition: mpegvideo.h:238
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:498
int16_t(* b_forw_mv_table_base)[2]
Definition: mpegvideo.h:232
#define AV_RL32
Definition: intreadwrite.h:146
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:496
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:474
#define CONFIG_GRAY
Definition: config.h:399
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:81
MotionEstContext me
Definition: mpegvideo.h:276
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:94
int mb_decision
macroblock decision mode
Definition: avcodec.h:1953
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:193
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:490
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:456
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:198
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:615
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2806
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:287
#define IS_INTRA16x16(a)
Definition: mpegutils.h:78
if(ac->has_optimized_func)
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:291
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:465
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:260
NULL
Definition: eval.c:55
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:257
int16_t(* b_bidir_forw_mv_table_base)[2]
Definition: mpegvideo.h:234
int coded_picture_number
picture number in bitstream order
Definition: frame.h:230
#define AV_LOG_INFO
Standard information.
Definition: log.h:135
uint16_t inter_matrix[64]
Definition: mpegvideo.h:296
#define IS_INTERLACED(a)
Definition: mpegutils.h:85
uint8_t * buffer
Definition: parser.h:29
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:150
Libavcodec external API header.
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:211
#define ff_dlog(ctx,...)
Definition: internal.h:60
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
BlockDSPContext bdsp
Definition: mpegvideo.h:218
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:58
enum AVDiscard skip_idct
Definition: avcodec.h:2983
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:158
int debug
debug
Definition: avcodec.h:2626
main external API structure.
Definition: avcodec.h:1409
ScanTable intra_scantable
Definition: mpegvideo.h:86
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:187
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:95
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1441
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:113
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:488
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
static av_always_inline void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int is_mpeg12)
Definition: mpegvideo.c:1490
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:262
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
Picture * picture
main picture buffer
Definition: mpegvideo.h:131
#define AVERROR_BUG
Bug detected, please report the issue.
Definition: error.h:60
int progressive_sequence
Definition: mpegvideo.h:439
int coded_height
Definition: avcodec.h:1595
Switching Predicted.
Definition: avutil.h:265
#define IS_16X16(a)
Definition: mpegutils.h:88
ScanTable intra_h_scantable
Definition: mpegvideo.h:87
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:246
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
Definition: mpegvideo.h:194
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:56
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:2647
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2638
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1956
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:492
#define IS_8X16(a)
Definition: mpegutils.h:90
int context_initialized
Definition: mpegvideo.h:119
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:115
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1052
int f_code
forward MV resolution
Definition: mpegvideo.h:229
#define COPY(a)
#define MV_DIR_FORWARD
Definition: mpegvideo.h:256
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:110
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:100
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:243
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:623
#define IS_PCM(a)
Definition: mpegutils.h:79
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:247
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:244
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:146
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:184
uint8_t level
Definition: svq3.c:204
qpel_mc_func(* qpel_avg)[16]
Definition: motion_est.h:87
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:270
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:240
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:126
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:207
MpegEncContext.
Definition: mpegvideo.h:76
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:178
int8_t * qscale_table
Definition: mpegpicture.h:50
struct AVCodecContext * avctx
Definition: mpegvideo.h:93
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:269
discard all non reference
Definition: avcodec.h:686
MpegVideoDSPContext mdsp
Definition: mpegvideo.h:223
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:394
int(* dct_error_sum)[64]
Definition: mpegvideo.h:321
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:77
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:125
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
uint8_t * dest[3]
Definition: mpegvideo.h:289
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:120
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:271
int last_pict_type
Definition: mpegvideo.h:207
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:244
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:157
#define FF_DEBUG_QP
Definition: avcodec.h:2631
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:177
Bi-dir predicted.
Definition: avutil.h:262
int index
Definition: parser.h:30
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
Definition: mpegpicture.h:39
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:185
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:93
uint32_t * map
map to avoid duplicate evaluations
Definition: motion_est.h:53
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:425
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:638
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:477
#define IS_INTRA(x, y)
int slices
Number of slices.
Definition: avcodec.h:2143
void * priv_data
Definition: avcodec.h:1451
#define PICT_FRAME
Definition: mpegutils.h:39
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:763
#define IS_INTRA4x4(a)
Definition: mpegutils.h:77
int picture_structure
Definition: mpegvideo.h:443
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:28
VideoDSPContext vdsp
Definition: mpegvideo.h:227
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:78
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:147
#define IS_8X8(a)
Definition: mpegutils.h:91
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:268
void ff_xvmc_decode_mb(MpegEncContext *s)
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1263
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:38
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:476
static uint8_t tmp[8]
Definition: des.c:38
ParseContext parse_context
Definition: mpegvideo.h:350
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1436
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:163
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:196
#define IS_ACPRED(a)
Definition: mpegutils.h:96
#define CONFIG_WMV2_DECODER
Definition: config.h:712
#define HAVE_THREADS
Definition: config.h:321
int chroma_qscale
chroma QP
Definition: mpegvideo.h:200
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:592
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:878
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:294
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:114
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:85
#define av_always_inline
Definition: attributes.h:40
uint8_t * temp
Definition: motion_est.h:51
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:261
void ff_xvmc_field_end(MpegEncContext *s)
int16_t(* b_direct_mv_table_base)[2]
Definition: mpegvideo.h:236
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:230
float * bits_tab
Definition: mpegvideo.h:522
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:486
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:1820
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:129
Predicted.
Definition: avutil.h:261
HpelDSPContext hdsp
Definition: mpegvideo.h:220