Libav
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 
26 #include "libavutil/internal.h"
27 #include "avcodec.h"
28 #include "h261.h"
29 #include "mpegutils.h"
30 #include "mpegvideo.h"
31 #include "mjpegenc.h"
32 #include "msmpeg4.h"
33 #include "qpeldsp.h"
34 #include "wmv2.h"
35 #include <limits.h>
36 
37 static void gmc1_motion(MpegEncContext *s,
38  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
39  uint8_t **ref_picture)
40 {
41  uint8_t *ptr;
42  int src_x, src_y, motion_x, motion_y;
43  ptrdiff_t offset, linesize, uvlinesize;
44  int emu = 0;
45 
46  motion_x = s->sprite_offset[0][0];
47  motion_y = s->sprite_offset[0][1];
48  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
49  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
50  motion_x <<= (3 - s->sprite_warping_accuracy);
51  motion_y <<= (3 - s->sprite_warping_accuracy);
52  src_x = av_clip(src_x, -16, s->width);
53  if (src_x == s->width)
54  motion_x = 0;
55  src_y = av_clip(src_y, -16, s->height);
56  if (src_y == s->height)
57  motion_y = 0;
58 
59  linesize = s->linesize;
60  uvlinesize = s->uvlinesize;
61 
62  ptr = ref_picture[0] + src_y * linesize + src_x;
63 
64  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
65  (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
67  linesize, linesize,
68  17, 17,
69  src_x, src_y,
70  s->h_edge_pos, s->v_edge_pos);
71  ptr = s->sc.edge_emu_buffer;
72  }
73 
74  if ((motion_x | motion_y) & 7) {
75  s->mdsp.gmc1(dest_y, ptr, linesize, 16,
76  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
77  s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
78  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
79  } else {
80  int dxy;
81 
82  dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
83  if (s->no_rounding) {
84  s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
85  } else {
86  s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
87  }
88  }
89 
91  return;
92 
93  motion_x = s->sprite_offset[1][0];
94  motion_y = s->sprite_offset[1][1];
95  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
96  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
97  motion_x <<= (3 - s->sprite_warping_accuracy);
98  motion_y <<= (3 - s->sprite_warping_accuracy);
99  src_x = av_clip(src_x, -8, s->width >> 1);
100  if (src_x == s->width >> 1)
101  motion_x = 0;
102  src_y = av_clip(src_y, -8, s->height >> 1);
103  if (src_y == s->height >> 1)
104  motion_y = 0;
105 
106  offset = (src_y * uvlinesize) + src_x;
107  ptr = ref_picture[1] + offset;
108  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
109  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
111  uvlinesize, uvlinesize,
112  9, 9,
113  src_x, src_y,
114  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
115  ptr = s->sc.edge_emu_buffer;
116  emu = 1;
117  }
118  s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
119  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
120 
121  ptr = ref_picture[2] + offset;
122  if (emu) {
124  uvlinesize, uvlinesize,
125  9, 9,
126  src_x, src_y,
127  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
128  ptr = s->sc.edge_emu_buffer;
129  }
130  s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
131  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
132 }
133 
134 static void gmc_motion(MpegEncContext *s,
135  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
136  uint8_t **ref_picture)
137 {
138  uint8_t *ptr;
139  int linesize, uvlinesize;
140  const int a = s->sprite_warping_accuracy;
141  int ox, oy;
142 
143  linesize = s->linesize;
144  uvlinesize = s->uvlinesize;
145 
146  ptr = ref_picture[0];
147 
148  ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
149  s->sprite_delta[0][1] * s->mb_y * 16;
150  oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
151  s->sprite_delta[1][1] * s->mb_y * 16;
152 
153  s->mdsp.gmc(dest_y, ptr, linesize, 16,
154  ox, oy,
155  s->sprite_delta[0][0], s->sprite_delta[0][1],
156  s->sprite_delta[1][0], s->sprite_delta[1][1],
157  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
158  s->h_edge_pos, s->v_edge_pos);
159  s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
160  ox + s->sprite_delta[0][0] * 8,
161  oy + s->sprite_delta[1][0] * 8,
162  s->sprite_delta[0][0], s->sprite_delta[0][1],
163  s->sprite_delta[1][0], s->sprite_delta[1][1],
164  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
165  s->h_edge_pos, s->v_edge_pos);
166 
168  return;
169 
170  ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
171  s->sprite_delta[0][1] * s->mb_y * 8;
172  oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
173  s->sprite_delta[1][1] * s->mb_y * 8;
174 
175  ptr = ref_picture[1];
176  s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
177  ox, oy,
178  s->sprite_delta[0][0], s->sprite_delta[0][1],
179  s->sprite_delta[1][0], s->sprite_delta[1][1],
180  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
181  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
182 
183  ptr = ref_picture[2];
184  s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
185  ox, oy,
186  s->sprite_delta[0][0], s->sprite_delta[0][1],
187  s->sprite_delta[1][0], s->sprite_delta[1][1],
188  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
189  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
190 }
191 
192 static inline int hpel_motion(MpegEncContext *s,
193  uint8_t *dest, uint8_t *src,
194  int src_x, int src_y,
195  op_pixels_func *pix_op,
196  int motion_x, int motion_y)
197 {
198  int dxy = 0;
199  int emu = 0;
200 
201  src_x += motion_x >> 1;
202  src_y += motion_y >> 1;
203 
204  /* WARNING: do no forget half pels */
205  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
206  if (src_x != s->width)
207  dxy |= motion_x & 1;
208  src_y = av_clip(src_y, -16, s->height);
209  if (src_y != s->height)
210  dxy |= (motion_y & 1) << 1;
211  src += src_y * s->linesize + src_x;
212 
213  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) ||
214  (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) {
216  s->linesize, s->linesize,
217  9, 9, src_x, src_y,
218  s->h_edge_pos, s->v_edge_pos);
219  src = s->sc.edge_emu_buffer;
220  emu = 1;
221  }
222  pix_op[dxy](dest, src, s->linesize, 8);
223  return emu;
224 }
225 
226 static av_always_inline
228  int src_x, int src_y,
229  int uvsrc_x, int uvsrc_y,
230  int field_based,
231  uint8_t **ptr_y,
232  uint8_t **ptr_cb,
233  uint8_t **ptr_cr)
234 {
235  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, *ptr_y,
236  s->linesize, s->linesize,
237  17, 17 + field_based,
238  src_x, src_y * (1 << field_based),
239  s->h_edge_pos, s->v_edge_pos);
240  *ptr_y = s->sc.edge_emu_buffer;
241  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
242  uint8_t *uvbuf = s->sc.edge_emu_buffer + 18 * s->linesize;
243  s->vdsp.emulated_edge_mc(uvbuf, *ptr_cb,
244  s->uvlinesize, s->uvlinesize,
245  9, 9 + field_based,
246  uvsrc_x, uvsrc_y * (1 << field_based),
247  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
248  s->vdsp.emulated_edge_mc(uvbuf + 16, *ptr_cr,
249  s->uvlinesize, s->uvlinesize,
250  9, 9 + field_based,
251  uvsrc_x, uvsrc_y * (1 << field_based),
252  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
253  *ptr_cb = uvbuf;
254  *ptr_cr = uvbuf + 16;
255  }
256 }
257 
258 static av_always_inline
260  uint8_t *dest_y,
261  uint8_t *dest_cb,
262  uint8_t *dest_cr,
263  int field_based,
264  int bottom_field,
265  int field_select,
266  uint8_t **ref_picture,
267  op_pixels_func (*pix_op)[4],
268  int motion_x,
269  int motion_y,
270  int h,
271  int is_mpeg12,
272  int mb_y)
273 {
274  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
275  int dxy, uvdxy, mx, my, src_x, src_y,
276  uvsrc_x, uvsrc_y, v_edge_pos;
277  ptrdiff_t uvlinesize, linesize;
278 
279  v_edge_pos = s->v_edge_pos >> field_based;
280  linesize = s->current_picture.f->linesize[0] << field_based;
281  uvlinesize = s->current_picture.f->linesize[1] << field_based;
282 
283  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
284  src_x = s->mb_x * 16 + (motion_x >> 1);
285  src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
286 
287  if (!is_mpeg12 && s->out_format == FMT_H263) {
288  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
289  mx = (motion_x >> 1) | (motion_x & 1);
290  my = motion_y >> 1;
291  uvdxy = ((my & 1) << 1) | (mx & 1);
292  uvsrc_x = s->mb_x * 8 + (mx >> 1);
293  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
294  } else {
295  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
296  uvsrc_x = src_x >> 1;
297  uvsrc_y = src_y >> 1;
298  }
299  // Even chroma mv's are full pel in H261
300  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
301  mx = motion_x / 4;
302  my = motion_y / 4;
303  uvdxy = 0;
304  uvsrc_x = s->mb_x * 8 + mx;
305  uvsrc_y = mb_y * 8 + my;
306  } else {
307  if (s->chroma_y_shift) {
308  mx = motion_x / 2;
309  my = motion_y / 2;
310  uvdxy = ((my & 1) << 1) | (mx & 1);
311  uvsrc_x = s->mb_x * 8 + (mx >> 1);
312  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
313  } else {
314  if (s->chroma_x_shift) {
315  // Chroma422
316  mx = motion_x / 2;
317  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
318  uvsrc_x = s->mb_x * 8 + (mx >> 1);
319  uvsrc_y = src_y;
320  } else {
321  // Chroma444
322  uvdxy = dxy;
323  uvsrc_x = src_x;
324  uvsrc_y = src_y;
325  }
326  }
327  }
328 
329  ptr_y = ref_picture[0] + src_y * linesize + src_x;
330  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
331  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
332 
333  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 16, 0) ||
334  (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 1) - h, 0)) {
335  if (is_mpeg12 ||
339  "MPEG motion vector out of boundary (%d %d)\n", src_x,
340  src_y);
341  return;
342  }
343  emulated_edge_mc(s, src_x, src_y, uvsrc_x, uvsrc_y, field_based,
344  &ptr_y, &ptr_cb, &ptr_cr);
345  }
346 
347  /* FIXME use this for field pix too instead of the obnoxious hack which
348  * changes picture.data */
349  if (bottom_field) {
350  dest_y += s->linesize;
351  dest_cb += s->uvlinesize;
352  dest_cr += s->uvlinesize;
353  }
354 
355  if (field_select) {
356  ptr_y += s->linesize;
357  ptr_cb += s->uvlinesize;
358  ptr_cr += s->uvlinesize;
359  }
360 
361  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
362 
363  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
364  pix_op[s->chroma_x_shift][uvdxy]
365  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
366  pix_op[s->chroma_x_shift][uvdxy]
367  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
368  }
369  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
370  s->out_format == FMT_H261) {
372  }
373 }
374 /* apply one mpeg motion vector to the three components */
376  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
377  int field_select, uint8_t **ref_picture,
378  op_pixels_func (*pix_op)[4],
379  int motion_x, int motion_y, int h, int mb_y)
380 {
381 #if !CONFIG_SMALL
382  if (s->out_format == FMT_MPEG1)
383  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
384  field_select, ref_picture, pix_op,
385  motion_x, motion_y, h, 1, mb_y);
386  else
387 #endif
388  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
389  field_select, ref_picture, pix_op,
390  motion_x, motion_y, h, 0, mb_y);
391 }
392 
393 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
394  uint8_t *dest_cb, uint8_t *dest_cr,
395  int bottom_field, int field_select,
396  uint8_t **ref_picture,
397  op_pixels_func (*pix_op)[4],
398  int motion_x, int motion_y, int h, int mb_y)
399 {
400 #if !CONFIG_SMALL
401  if(s->out_format == FMT_MPEG1)
402  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
403  bottom_field, field_select, ref_picture, pix_op,
404  motion_x, motion_y, h, 1, mb_y);
405  else
406 #endif
407  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
408  bottom_field, field_select, ref_picture, pix_op,
409  motion_x, motion_y, h, 0, mb_y);
410 }
411 
412 // FIXME: SIMDify, avg variant, 16x16 version
413 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
414 {
415  int x;
416  uint8_t *const top = src[1];
417  uint8_t *const left = src[2];
418  uint8_t *const mid = src[0];
419  uint8_t *const right = src[3];
420  uint8_t *const bottom = src[4];
421 #define OBMC_FILTER(x, t, l, m, r, b)\
422  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
423 #define OBMC_FILTER4(x, t, l, m, r, b)\
424  OBMC_FILTER(x , t, l, m, r, b);\
425  OBMC_FILTER(x+1 , t, l, m, r, b);\
426  OBMC_FILTER(x +stride, t, l, m, r, b);\
427  OBMC_FILTER(x+1+stride, t, l, m, r, b);
428 
429  x = 0;
430  OBMC_FILTER (x , 2, 2, 4, 0, 0);
431  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
432  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
433  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
434  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
435  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
436  x += stride;
437  OBMC_FILTER (x , 1, 2, 5, 0, 0);
438  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
439  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
440  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
441  x += stride;
442  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
443  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
444  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
445  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
446  x += 2 * stride;
447  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
448  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
449  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
450  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
451  x += 2*stride;
452  OBMC_FILTER (x , 0, 2, 5, 0, 1);
453  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
454  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
455  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
456  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
457  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
458  x += stride;
459  OBMC_FILTER (x , 0, 2, 4, 0, 2);
460  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
461  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
462  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
463 }
464 
465 /* obmc for 1 8x8 luma block */
466 static inline void obmc_motion(MpegEncContext *s,
467  uint8_t *dest, uint8_t *src,
468  int src_x, int src_y,
469  op_pixels_func *pix_op,
470  int16_t mv[5][2] /* mid top left right bottom */)
471 #define MID 0
472 {
473  int i;
474  uint8_t *ptr[5];
475 
476  assert(s->quarter_sample == 0);
477 
478  for (i = 0; i < 5; i++) {
479  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
480  ptr[i] = ptr[MID];
481  } else {
482  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
483  s->linesize * 8 * (i >> 1);
484  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
485  mv[i][0], mv[i][1]);
486  }
487  }
488 
489  put_obmc(dest, ptr, s->linesize);
490 }
491 
492 static inline void qpel_motion(MpegEncContext *s,
493  uint8_t *dest_y,
494  uint8_t *dest_cb,
495  uint8_t *dest_cr,
496  int field_based, int bottom_field,
497  int field_select, uint8_t **ref_picture,
498  op_pixels_func (*pix_op)[4],
499  qpel_mc_func (*qpix_op)[16],
500  int motion_x, int motion_y, int h)
501 {
502  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
503  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
504  ptrdiff_t linesize, uvlinesize;
505 
506  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
507 
508  src_x = s->mb_x * 16 + (motion_x >> 2);
509  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
510 
511  v_edge_pos = s->v_edge_pos >> field_based;
512  linesize = s->linesize << field_based;
513  uvlinesize = s->uvlinesize << field_based;
514 
515  if (field_based) {
516  mx = motion_x / 2;
517  my = motion_y >> 1;
518  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
519  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
520  mx = (motion_x >> 1) + rtab[motion_x & 7];
521  my = (motion_y >> 1) + rtab[motion_y & 7];
522  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
523  mx = (motion_x >> 1) | (motion_x & 1);
524  my = (motion_y >> 1) | (motion_y & 1);
525  } else {
526  mx = motion_x / 2;
527  my = motion_y / 2;
528  }
529  mx = (mx >> 1) | (mx & 1);
530  my = (my >> 1) | (my & 1);
531 
532  uvdxy = (mx & 1) | ((my & 1) << 1);
533  mx >>= 1;
534  my >>= 1;
535 
536  uvsrc_x = s->mb_x * 8 + mx;
537  uvsrc_y = s->mb_y * (8 >> field_based) + my;
538 
539  ptr_y = ref_picture[0] + src_y * linesize + src_x;
540  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
541  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
542 
543  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 16, 0) ||
544  (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 3) - h, 0)) {
545  emulated_edge_mc(s, src_x, src_y, uvsrc_x, uvsrc_y, field_based,
546  &ptr_y, &ptr_cb, &ptr_cr);
547  }
548 
549  if (!field_based)
550  qpix_op[0][dxy](dest_y, ptr_y, linesize);
551  else {
552  if (bottom_field) {
553  dest_y += s->linesize;
554  dest_cb += s->uvlinesize;
555  dest_cr += s->uvlinesize;
556  }
557 
558  if (field_select) {
559  ptr_y += s->linesize;
560  ptr_cb += s->uvlinesize;
561  ptr_cr += s->uvlinesize;
562  }
563  // damn interlaced mode
564  // FIXME boundary mirroring is not exactly correct here
565  qpix_op[1][dxy](dest_y, ptr_y, linesize);
566  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
567  }
568  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
569  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
570  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
571  }
572 }
573 
578  uint8_t *dest_cb, uint8_t *dest_cr,
579  uint8_t **ref_picture,
580  op_pixels_func *pix_op,
581  int mx, int my)
582 {
583  uint8_t *ptr;
584  int src_x, src_y, dxy, emu = 0;
585  ptrdiff_t offset;
586 
587  /* In case of 8X8, we construct a single chroma motion vector
588  * with a special rounding */
589  mx = ff_h263_round_chroma(mx);
590  my = ff_h263_round_chroma(my);
591 
592  dxy = ((my & 1) << 1) | (mx & 1);
593  mx >>= 1;
594  my >>= 1;
595 
596  src_x = s->mb_x * 8 + mx;
597  src_y = s->mb_y * 8 + my;
598  src_x = av_clip(src_x, -8, (s->width >> 1));
599  if (src_x == (s->width >> 1))
600  dxy &= ~1;
601  src_y = av_clip(src_y, -8, (s->height >> 1));
602  if (src_y == (s->height >> 1))
603  dxy &= ~2;
604 
605  offset = src_y * s->uvlinesize + src_x;
606  ptr = ref_picture[1] + offset;
607  if ((unsigned)src_x > FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 8, 0) ||
608  (unsigned)src_y > FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 8, 0)) {
610  s->uvlinesize, s->uvlinesize,
611  9, 9, src_x, src_y,
612  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
613  ptr = s->sc.edge_emu_buffer;
614  emu = 1;
615  }
616  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
617 
618  ptr = ref_picture[2] + offset;
619  if (emu) {
621  s->uvlinesize, s->uvlinesize,
622  9, 9, src_x, src_y,
623  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
624  ptr = s->sc.edge_emu_buffer;
625  }
626  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
627 }
628 
629 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
630 {
631  /* fetch pixels for estimated mv 4 macroblocks ahead
632  * optimized for 64byte cache lines */
633  const int shift = s->quarter_sample ? 2 : 1;
634  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
635  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
636  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
637 
638  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
639  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
640  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
641 }
642 
643 static inline void apply_obmc(MpegEncContext *s,
644  uint8_t *dest_y,
645  uint8_t *dest_cb,
646  uint8_t *dest_cr,
647  uint8_t **ref_picture,
648  op_pixels_func (*pix_op)[4])
649 {
650  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
651  Picture *cur_frame = &s->current_picture;
652  int mb_x = s->mb_x;
653  int mb_y = s->mb_y;
654  const int xy = mb_x + mb_y * s->mb_stride;
655  const int mot_stride = s->b8_stride;
656  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
657  int mx, my, i;
658 
659  assert(!s->mb_skipped);
660 
661  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
662  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
663 
664  AV_COPY32(mv_cache[2][1],
665  cur_frame->motion_val[0][mot_xy + mot_stride]);
666  AV_COPY32(mv_cache[2][2],
667  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
668 
669  AV_COPY32(mv_cache[3][1],
670  cur_frame->motion_val[0][mot_xy + mot_stride]);
671  AV_COPY32(mv_cache[3][2],
672  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
673 
674  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
675  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
676  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
677  } else {
678  AV_COPY32(mv_cache[0][1],
679  cur_frame->motion_val[0][mot_xy - mot_stride]);
680  AV_COPY32(mv_cache[0][2],
681  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
682  }
683 
684  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
685  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
686  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
687  } else {
688  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
689  AV_COPY32(mv_cache[2][0],
690  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
691  }
692 
693  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
694  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
695  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
696  } else {
697  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
698  AV_COPY32(mv_cache[2][3],
699  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
700  }
701 
702  mx = 0;
703  my = 0;
704  for (i = 0; i < 4; i++) {
705  const int x = (i & 1) + 1;
706  const int y = (i >> 1) + 1;
707  int16_t mv[5][2] = {
708  { mv_cache[y][x][0], mv_cache[y][x][1] },
709  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
710  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
711  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
712  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
713  };
714  // FIXME cleanup
715  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
716  ref_picture[0],
717  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
718  pix_op[1],
719  mv);
720 
721  mx += mv[0][0];
722  my += mv[0][1];
723  }
724  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
725  chroma_4mv_motion(s, dest_cb, dest_cr,
726  ref_picture, pix_op[1],
727  mx, my);
728 }
729 
730 static inline void apply_8x8(MpegEncContext *s,
731  uint8_t *dest_y,
732  uint8_t *dest_cb,
733  uint8_t *dest_cr,
734  int dir,
735  uint8_t **ref_picture,
736  qpel_mc_func (*qpix_op)[16],
737  op_pixels_func (*pix_op)[4])
738 {
739  int dxy, mx, my, src_x, src_y;
740  int i;
741  int mb_x = s->mb_x;
742  int mb_y = s->mb_y;
743  uint8_t *ptr, *dest;
744 
745  mx = 0;
746  my = 0;
747  if (s->quarter_sample) {
748  for (i = 0; i < 4; i++) {
749  int motion_x = s->mv[dir][i][0];
750  int motion_y = s->mv[dir][i][1];
751 
752  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
753  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
754  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
755 
756  /* WARNING: do no forget half pels */
757  src_x = av_clip(src_x, -16, s->width);
758  if (src_x == s->width)
759  dxy &= ~3;
760  src_y = av_clip(src_y, -16, s->height);
761  if (src_y == s->height)
762  dxy &= ~12;
763 
764  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
765  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 8, 0) ||
766  (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 3) - 8, 0)) {
768  s->linesize, s->linesize,
769  9, 9,
770  src_x, src_y,
771  s->h_edge_pos,
772  s->v_edge_pos);
773  ptr = s->sc.edge_emu_buffer;
774  }
775  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
776  qpix_op[1][dxy](dest, ptr, s->linesize);
777 
778  mx += s->mv[dir][i][0] / 2;
779  my += s->mv[dir][i][1] / 2;
780  }
781  } else {
782  for (i = 0; i < 4; i++) {
783  hpel_motion(s,
784  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
785  ref_picture[0],
786  mb_x * 16 + (i & 1) * 8,
787  mb_y * 16 + (i >> 1) * 8,
788  pix_op[1],
789  s->mv[dir][i][0],
790  s->mv[dir][i][1]);
791 
792  mx += s->mv[dir][i][0];
793  my += s->mv[dir][i][1];
794  }
795  }
796 
797  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
798  chroma_4mv_motion(s, dest_cb, dest_cr,
799  ref_picture, pix_op[1], mx, my);
800 }
801 
815  uint8_t *dest_y,
816  uint8_t *dest_cb,
817  uint8_t *dest_cr,
818  int dir,
819  uint8_t **ref_picture,
820  op_pixels_func (*pix_op)[4],
821  qpel_mc_func (*qpix_op)[16],
822  int is_mpeg12)
823 {
824  int i;
825  int mb_y = s->mb_y;
826 
827  prefetch_motion(s, ref_picture, dir);
828 
829  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
830  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
831  return;
832  }
833 
834  switch (s->mv_type) {
835  case MV_TYPE_16X16:
836  if (s->mcsel) {
837  if (s->real_sprite_warping_points == 1) {
838  gmc1_motion(s, dest_y, dest_cb, dest_cr,
839  ref_picture);
840  } else {
841  gmc_motion(s, dest_y, dest_cb, dest_cr,
842  ref_picture);
843  }
844  } else if (!is_mpeg12 && s->quarter_sample) {
845  qpel_motion(s, dest_y, dest_cb, dest_cr,
846  0, 0, 0,
847  ref_picture, pix_op, qpix_op,
848  s->mv[dir][0][0], s->mv[dir][0][1], 16);
849  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
850  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
851  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
852  ref_picture, pix_op,
853  s->mv[dir][0][0], s->mv[dir][0][1], 16);
854  } else {
855  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
856  ref_picture, pix_op,
857  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
858  }
859  break;
860  case MV_TYPE_8X8:
861  if (!is_mpeg12)
862  apply_8x8(s, dest_y, dest_cb, dest_cr,
863  dir, ref_picture, qpix_op, pix_op);
864  break;
865  case MV_TYPE_FIELD:
866  if (s->picture_structure == PICT_FRAME) {
867  if (!is_mpeg12 && s->quarter_sample) {
868  for (i = 0; i < 2; i++)
869  qpel_motion(s, dest_y, dest_cb, dest_cr,
870  1, i, s->field_select[dir][i],
871  ref_picture, pix_op, qpix_op,
872  s->mv[dir][i][0], s->mv[dir][i][1], 8);
873  } else {
874  /* top field */
875  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
876  0, s->field_select[dir][0],
877  ref_picture, pix_op,
878  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
879  /* bottom field */
880  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
881  1, s->field_select[dir][1],
882  ref_picture, pix_op,
883  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
884  }
885  } else {
886  if (s->picture_structure != s->field_select[dir][0] + 1 &&
887  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
888  ref_picture = s->current_picture_ptr->f->data;
889  }
890 
891  mpeg_motion(s, dest_y, dest_cb, dest_cr,
892  s->field_select[dir][0],
893  ref_picture, pix_op,
894  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1);
895  }
896  break;
897  case MV_TYPE_16X8:
898  for (i = 0; i < 2; i++) {
899  uint8_t **ref2picture;
900 
901  if (s->picture_structure == s->field_select[dir][i] + 1
902  || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
903  ref2picture = ref_picture;
904  } else {
905  ref2picture = s->current_picture_ptr->f->data;
906  }
907 
908  mpeg_motion(s, dest_y, dest_cb, dest_cr,
909  s->field_select[dir][i],
910  ref2picture, pix_op,
911  s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i,
912  8, mb_y >> 1);
913 
914  dest_y += 16 * s->linesize;
915  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
916  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
917  }
918  break;
919  case MV_TYPE_DMV:
920  if (s->picture_structure == PICT_FRAME) {
921  for (i = 0; i < 2; i++) {
922  int j;
923  for (j = 0; j < 2; j++)
924  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
925  j, j ^ i, ref_picture, pix_op,
926  s->mv[dir][2 * i + j][0],
927  s->mv[dir][2 * i + j][1], 8, mb_y);
928  pix_op = s->hdsp.avg_pixels_tab;
929  }
930  } else {
931  for (i = 0; i < 2; i++) {
932  mpeg_motion(s, dest_y, dest_cb, dest_cr,
933  s->picture_structure != i + 1,
934  ref_picture, pix_op,
935  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
936  16, mb_y >> 1);
937 
938  // after put we make avg of the same block
939  pix_op = s->hdsp.avg_pixels_tab;
940 
941  /* opposite parity is always in the same frame if this is
942  * second field */
943  if (!s->first_field) {
944  ref_picture = s->current_picture_ptr->f->data;
945  }
946  }
947  }
948  break;
949  default: assert(0);
950  }
951 }
952 
954  uint8_t *dest_y, uint8_t *dest_cb,
955  uint8_t *dest_cr, int dir,
956  uint8_t **ref_picture,
957  op_pixels_func (*pix_op)[4],
958  qpel_mc_func (*qpix_op)[16])
959 {
960 #if !CONFIG_SMALL
961  if (s->out_format == FMT_MPEG1)
962  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
963  ref_picture, pix_op, qpix_op, 1);
964  else
965 #endif
966  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
967  ref_picture, pix_op, qpix_op, 0);
968 }
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
#define CONFIG_WMV2_ENCODER
Definition: config.h:1095
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
Definition: mpegvideodsp.h:32
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:263
int sprite_warping_accuracy
Definition: mpegvideo.h:388
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
enum AVCodecID codec_id
Definition: mpegvideo.h:107
av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (%s)\, len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic ? ac->func_descr_generic :ac->func_descr)
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:2588
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:354
int real_sprite_warping_points
Definition: mpegvideo.h:381
mpegvideo header.
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:65
int stride
Definition: mace.c:144
#define AV_COPY32(d, s)
Definition: intreadwrite.h:517
int chroma_x_shift
Definition: mpegvideo.h:459
int field_select[2][2]
Definition: mpegvideo.h:271
ScratchpadContext sc
Definition: mpegvideo.h:197
uint8_t
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
enum OutputFormat out_format
output format
Definition: mpegvideo.h:99
void(* gmc)(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
global motion compensation.
Definition: mpegvideodsp.h:37
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:278
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:175
quarterpel DSP functions
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:2585
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:763
int sprite_offset[2][2]
sprite offset[isChroma][isMVY]
Definition: mpegvideo.h:382
#define LOCAL_ALIGNED_8(t, v,...)
Definition: internal.h:105
#define MID
#define src
Definition: vp8dsp.c:254
static void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
#define CONFIG_H261_DECODER
Definition: config.h:584
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:190
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int chroma_y_shift
Definition: mpegvideo.h:460
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:145
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1503
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:386
#define FFMAX(a, b)
Definition: common.h:64
common internal API header
int sprite_delta[2][2]
sprite_delta [isY][isMVY]
Definition: mpegvideo.h:383
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:179
Picture.
Definition: mpegpicture.h:45
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
#define CONFIG_GRAY
Definition: config.h:399
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:2583
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:198
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
#define OBMC_FILTER4(x, t, l, m, r, b)
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:465
static const int8_t mv[256][2]
Definition: 4xm.c:75
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:260
Libavcodec external API header.
static av_always_inline void emulated_edge_mc(MpegEncContext *s, int src_x, int src_y, int uvsrc_x, int uvsrc_y, int field_based, uint8_t **ptr_y, uint8_t **ptr_cb, uint8_t **ptr_cr)
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:158
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:95
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:262
H.261 codec.
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
struct AVFrame * f
Definition: mpegpicture.h:46
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:96
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:146
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:270
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:126
MpegEncContext.
Definition: mpegvideo.h:76
struct AVCodecContext * avctx
Definition: mpegvideo.h:93
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
MpegVideoDSPContext mdsp
Definition: mpegvideo.h:223
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:125
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:100
uint8_t * dest[3]
Definition: mpegvideo.h:289
#define CONFIG_H261_ENCODER
Definition: config.h:1059
Bi-dir predicted.
Definition: avutil.h:262
#define IS_INTRA(x, y)
#define PICT_FRAME
Definition: mpegutils.h:39
int picture_structure
Definition: mpegvideo.h:443
VideoDSPContext vdsp
Definition: mpegvideo.h:227
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:264
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4])
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:38
#define CONFIG_WMV2_DECODER
Definition: config.h:712
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:114
#define av_always_inline
Definition: attributes.h:40
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:63
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:261
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
#define OBMC_FILTER(x, t, l, m, r, b)
HpelDSPContext hdsp
Definition: mpegvideo.h:220