129 int8_t intra4x4_pred_mode_cache[5 * 8];
130 int8_t (*intra4x4_pred_mode);
144 int block_offset[2 * (16 * 3)];
147 #define FULLPEL_MODE 1 148 #define HALFPEL_MODE 2 149 #define THIRDPEL_MODE 3 150 #define PREDICT_MODE 4 162 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
163 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
164 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
165 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
169 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
170 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
171 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
172 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
178 { 0, 2 }, { 1, 1 }, { 2, 0 },
179 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
180 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
181 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
182 { 2, 4 }, { 3, 3 }, { 4, 2 },
188 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
189 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
190 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
191 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
192 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
193 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
194 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
195 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
196 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
197 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
198 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
199 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
202 static const struct {
206 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
207 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
208 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
209 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
213 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
214 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
215 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
216 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
227 for (i = 0; i < 4; i++) {
228 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
229 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
230 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
231 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
233 temp[4 * i + 0] = z0 + z3;
234 temp[4 * i + 1] = z1 + z2;
235 temp[4 * i + 2] = z1 - z2;
236 temp[4 * i + 3] = z0 - z3;
239 for (i = 0; i < 4; i++) {
240 const int offset = x_offset[i];
241 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
242 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
243 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
244 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
246 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
247 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
248 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
249 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
261 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
262 : qmul * (block[0] >> 3) / 2);
266 for (i = 0; i < 4; i++) {
267 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
268 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
269 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
270 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
272 block[0 + 4 * i] = z0 + z3;
273 block[1 + 4 * i] = z1 + z2;
274 block[2 + 4 * i] = z1 - z2;
275 block[3 + 4 * i] = z0 - z3;
278 for (i = 0; i < 4; i++) {
279 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
280 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
281 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
282 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
283 const int rr = (dc + 0x80000);
285 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
286 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
287 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
288 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
291 memset(block, 0, 16 *
sizeof(int16_t));
295 int index,
const int type)
297 static const uint8_t *
const scan_patterns[4] = {
303 const int intra = 3 * type >> 2;
304 const uint8_t *
const scan = scan_patterns[type];
306 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
308 int sign = (vlc & 1) ? 0 : -1;
315 }
else if (vlc < 4) {
320 level = (vlc + 9 >> 2) - run;
329 ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
333 ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
337 if ((index += run) >= limit)
340 block[scan[
index]] = (level ^ sign) - sign;
353 int i,
int list,
int part_width)
355 const int topright_ref = s->
ref_cache[list][i - 8 + part_width];
358 *C = s->
mv_cache[list][i - 8 + part_width];
374 int part_width,
int list,
375 int ref,
int *
const mx,
int *
const my)
377 const int index8 =
scan8[n];
378 const int top_ref = s->
ref_cache[list][index8 - 8];
379 const int left_ref = s->
ref_cache[list][index8 - 1];
380 const int16_t *
const A = s->
mv_cache[list][index8 - 1];
381 const int16_t *
const B = s->
mv_cache[list][index8 - 8];
383 int diagonal_ref, match_count;
394 match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
395 if (match_count > 1) {
398 }
else if (match_count == 1) {
399 if (left_ref == ref) {
402 }
else if (top_ref == ref) {
424 int mx,
int my,
int dxy,
425 int thirdpel,
int dir,
int avg)
430 int blocksize = 2 - (width >> 3);
437 if (mx < 0 || mx >= s->
h_edge_pos - width - 1 ||
440 mx = av_clip(mx, -16, s->
h_edge_pos - width + 15);
441 my = av_clip(my, -16, s->
v_edge_pos - height + 15);
446 src = pic->
f->
data[0] + mx + my * linesize;
451 width + 1, height + 1,
465 mx = mx + (mx < (int) x) >> 1;
466 my = my + (my < (int) y) >> 1;
468 height = height >> 1;
471 for (i = 1; i < 3; i++) {
472 dest = s->
cur_pic->
f->
data[i] + (x >> 1) + (y >> 1) * uvlinesize;
473 src = pic->
f->
data[i] + mx + my * uvlinesize;
477 uvlinesize, uvlinesize,
478 width + 1, height + 1,
500 int i, j, k, mx, my, dx, dy, x, y;
501 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
502 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
503 const int extra_width = (mode ==
PREDICT_MODE) ? -16 * 6 : 0;
504 const int h_edge_pos = 6 * (s->
h_edge_pos - part_width) - extra_width;
505 const int v_edge_pos = 6 * (s->
v_edge_pos - part_height) - extra_width;
507 for (i = 0; i < 16; i += part_height)
508 for (j = 0; j < 16; j += part_width) {
509 const int b_xy = (4 * s->
mb_x + (j >> 2)) +
512 x = 16 * s->
mb_x + j;
513 y = 16 * s->
mb_y + i;
514 k = (j >> 2 & 1) + (i >> 1 & 2) +
515 (j >> 1 & 4) + (i & 8);
537 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
538 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
556 mx = (mx + 1 >> 1) + dx;
557 my = (my + 1 >> 1) + dy;
558 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
559 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
560 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
563 fx, fy, dxy, 1, dir, avg);
567 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
568 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
569 dxy = (mx & 1) + 2 * (my & 1);
572 mx >> 1, my >> 1, dxy, 0, dir, avg);
576 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
577 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
580 mx, my, 0, 0, dir, avg);
589 if (part_height == 8 && i < 8) {
592 if (part_width == 8 && j < 8)
595 if (part_width == 8 && j < 8)
597 if (part_width == 4 || part_height == 4)
603 part_width >> 2, part_height >> 2, s->
b_stride,
611 int mb_type,
const int *block_offset,
616 for (i = 0; i < 16; i++)
618 uint8_t *
const ptr = dest_y + block_offset[i];
632 const int *block_offset,
640 for (i = 0; i < 16; i++) {
641 uint8_t *
const ptr = dest_y + block_offset[i];
648 assert(s->
mb_y || linesize <= block_offset[i]);
649 if (!topright_avail) {
650 tr = ptr[3 - linesize] * 0x01010101
u;
653 topright = ptr + 4 - linesize;
671 const int mb_x = s->
mb_x;
672 const int mb_y = s->
mb_y;
673 const int mb_xy = s->
mb_xy;
675 uint8_t *dest_y, *dest_cb, *dest_cr;
676 int linesize, uvlinesize;
679 const int block_h = 16 >> 1;
684 dest_y = s->
cur_pic->
f->
data[0] + (mb_x + mb_y * linesize) * 16;
685 dest_cb = s->
cur_pic->
f->
data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
686 dest_cr = s->
cur_pic->
f->
data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
689 s->
vdsp.
prefetch(dest_cb + (s->
mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
701 uint8_t *dest[2] = { dest_cb, dest_cr };
706 for (j = 1; j < 3; j++) {
707 for (i = j * 16; i < j * 16 + 4; i++)
709 uint8_t *
const ptr = dest[j - 1] + block_offset[i];
719 int i, j, k, m, dir, mode;
723 const int mb_xy = s->
mb_xy;
750 }
else if (mb_type < 8) {
768 for (m = 0; m < 2; m++) {
770 for (i = 0; i < 4; i++)
774 for (i = 0; i < 4; i++)
780 4 * 2 *
sizeof(int16_t));
816 for (i = 0; i < 4; i++)
818 0, 4 * 2 *
sizeof(int16_t));
824 for (i = 0; i < 4; i++)
826 0, 4 * 2 *
sizeof(int16_t));
831 }
else if (mb_type == 8 || mb_type == 33) {
839 for (i = 0; i < 4; i++)
855 for (i = 0; i < 16; i += 2) {
860 "luma prediction:%"PRIu32
"\n", vlc);
868 left[2] =
svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
870 if (left[1] == -1 || left[2] == -1) {
876 for (i = 0; i < 4; i++)
881 i4x4[4] = i4x4_cache[7 + 8 * 3];
882 i4x4[5] = i4x4_cache[7 + 8 * 2];
883 i4x4[6] = i4x4_cache[7 + 8 * 1];
893 for (i = 0; i < 4; i++)
903 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
916 for (i = 0; i < 4; i++)
918 0, 4 * 2 *
sizeof(int16_t));
920 for (i = 0; i < 4; i++)
922 0, 4 * 2 *
sizeof(int16_t));
956 "error while decoding intra luma dc\n");
965 for (i = 0; i < 4; i++)
966 if ((cbp & (1 << i))) {
967 for (j = 0; j < 4; j++) {
968 k = index ? (1 * (j & 1) + 2 * (i & 1) +
969 2 * (j & 2) + 4 * (i & 2))
975 "error while decoding block\n");
982 for (i = 1; i < 3; ++i)
985 "error while decoding chroma dc block\n");
990 for (i = 1; i < 3; i++) {
991 for (j = 0; j < 4; j++) {
997 "error while decoding chroma ac block\n");
1019 const int mb_xy = s->
mb_xy;
1025 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1030 int slice_bits, slice_bytes, slice_length;
1031 int length = header >> 5 & 3;
1034 slice_bits = slice_length * 8;
1035 slice_bytes = slice_length + length - 1;
1069 if ((header & 0x9F) == 2) {
1095 -1, 4 *
sizeof(int8_t));
1097 -1, 8 *
sizeof(int8_t) * s->
mb_x);
1113 const int max_qp = 51;
1115 for (q = 0; q < max_qp + 1; q++) {
1118 for (x = 0; x < 16; x++)
1128 unsigned char *extradata;
1129 unsigned char *extradata_end;
1131 int marker_found = 0;
1165 extradata = (
unsigned char *)avctx->
extradata;
1169 if (!memcmp(extradata,
"SEQH", 4)) {
1180 int frame_size_code;
1182 size =
AV_RB32(&extradata[4]);
1183 if (size > extradata_end - extradata - 8)
1188 frame_size_code =
get_bits(&gb, 3);
1189 switch (frame_size_code) {
1251 unsigned long buf_len = watermark_width *
1252 watermark_height * 4;
1256 if (watermark_height > 0 &&
1257 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1262 watermark_width, watermark_height);
1264 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1265 u1, u2, u3, u4, offset);
1266 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1267 size - offset) != Z_OK) {
1269 "could not uncompress watermark logo\n");
1280 "this svq3 file contains watermark which need zlib support compiled in\n");
1304 for (x = 0; x < s->
mb_width; x++) {
1318 for (i = 0; i < 2; i++) {
1332 const int b4_stride = s->
mb_width * 4 + 1;
1333 const int b4_array_size = b4_stride * s->
mb_height * 4;
1344 for (i = 0; i < 2; i++) {
1380 int buf_size = avpkt->
size;
1384 if (buf_size == 0) {
1417 for (i = 0; i < 16; i++) {
1421 for (i = 0; i < 16; i++) {
1456 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1492 for (m = 0; m < 2; m++) {
1494 for (i = 0; i < 4; i++) {
1496 for (j = -1; j < 4; j++)
1526 "error while decoding MB %d %d\n", s->
mb_x, s->
mb_y);
#define MB_TYPE_INTRA16x16
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
discard all frames except keyframes
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
This structure describes decoded (raw) audio or video data.
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
const uint8_t ff_zigzag_scan[16]
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
static void skip_bits_long(GetBitContext *s, int n)
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (%s)\, len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic ? ac->func_descr_generic :ac->func_descr)
#define DECLARE_ALIGNED(n, t, v)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Macro definitions for various function/variable attributes.
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
enum AVDiscard skip_frame
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
const uint8_t ff_h264_dequant4_coeff_init[6][3]
static const uint8_t luma_dc_zigzag_scan[16]
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int block_offset[2 *(16 *3)]
#define FF_DEBUG_PICT_INFO
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
static av_always_inline int dctcoef_get(int16_t *mb, int index)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const uint8_t ff_h264_chroma_dc_scan[4]
Context for storing H.264 prediction functions.
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
static int get_bits_count(const GetBitContext *s)
const IMbInfo ff_h264_i_mb_type_info[26]
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
AVBufferRef * ref_index_buf[2]
const uint8_t ff_h264_golomb_to_inter_cbp[48]
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
int prev_frame_num_offset
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
static int get_bits_left(GetBitContext *gb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
int8_t * intra4x4_pred_mode
uint8_t * edge_emu_buffer
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int flags
AV_CODEC_FLAG_*.
static int get_interleaved_se_golomb(GetBitContext *gb)
unsigned int left_samples_available
const char * name
Name of the codec implementation.
const uint8_t ff_h264_golomb_to_pict_type[5]
unsigned int topright_samples_available
Sorenson Vector Quantizer #1 (SVQ1) video codec.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
useful rectangle filling function
AVBufferRef * motion_val_buf[2]
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
tpel_mc_func avg_tpel_pixels_tab[11]
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Context for storing H.264 DSP functions.
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
enum AVPictureType pict_type
Picture type of the frame.
int16_t(*[2] motion_val)[2]
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int width
picture width / height.
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static av_cold int svq3_decode_init(AVCodecContext *avctx)
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
H.264 / AVC / MPEG-4 part10 codec.
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
the normal 2^n-1 "JPEG" YUV ranges
if(ac->has_optimized_func)
static const int8_t mv[256][2]
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
uint8_t * data
The data buffer.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
static av_always_inline uint32_t pack16to32(int a, int b)
static const uint8_t scan8[16 *3+3]
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
AVBufferRef * mb_type_buf
static unsigned int get_bits1(GetBitContext *s)
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
static void skip_bits1(GetBitContext *s)
enum AVPictureType pict_type
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
static void skip_bits(GetBitContext *s, int n)
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static void init_dequant4_coeff_table(SVQ3Context *s)
static av_cold int svq3_decode_end(AVCodecContext *avctx)
int8_t ref_cache[2][5 *8]
static const uint8_t svq3_pred_0[25][2]
static enum AVPixelFormat pix_fmts[]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static const struct @78 svq3_dct_tables[2][16]
A reference to a data buffer.
discard all non reference
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
uint8_t non_zero_count_cache[15 *8]
common internal api header.
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
int16_t mb_luma_dc[3][16 *2]
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
static av_cold int init(AVCodecParserContext *s)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static const uint32_t svq3_dequant_coeff[32]
unsigned int top_samples_available
static void hl_decode_mb(SVQ3Context *s)
const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM+1]
static int svq3_decode_slice_header(AVCodecContext *avctx)
#define PART_NOT_AVAILABLE
int key_frame
1 -> keyframe, 0-> not
static const uint8_t svq3_scan[16]
int8_t intra4x4_pred_mode_cache[5 *8]
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
static const int8_t svq3_pred_1[6][6][5]
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
#define FFSWAP(type, a, b)
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
AVPixelFormat
Pixel format.
This structure stores compressed data.
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int16_t mv_cache[2][5 *8][2]