39 { 1, 3, 1, 3, 1, 3, 1, 3, },
40 { 2, 0, 2, 0, 2, 0, 2, 0, },
44 { 6, 2, 6, 2, 6, 2, 6, 2, },
45 { 0, 4, 0, 4, 0, 4, 0, 4, },
49 { 8, 4, 11, 7, 8, 4, 11, 7, },
50 { 2, 14, 1, 13, 2, 14, 1, 13, },
51 { 10, 6, 9, 5, 10, 6, 9, 5, },
52 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 17, 9, 23, 15, 16, 8, 22, 14, },
57 { 5, 29, 3, 27, 4, 28, 2, 26, },
58 { 21, 13, 19, 11, 20, 12, 18, 10, },
59 { 0, 24, 6, 30, 1, 25, 7, 31, },
60 { 16, 8, 22, 14, 17, 9, 23, 15, },
61 { 4, 28, 2, 26, 5, 29, 3, 27, },
62 { 20, 12, 18, 10, 21, 13, 19, 11, },
63 { 1, 25, 7, 31, 0, 24, 6, 30, },
67 { 0, 55, 14, 68, 3, 58, 17, 72, },
68 { 37, 18, 50, 32, 40, 22, 54, 35, },
69 { 9, 64, 5, 59, 13, 67, 8, 63, },
70 { 46, 27, 41, 23, 49, 31, 44, 26, },
71 { 2, 57, 16, 71, 1, 56, 15, 70, },
72 { 39, 21, 52, 34, 38, 19, 51, 33, },
73 { 11, 66, 7, 62, 10, 65, 6, 60, },
74 { 48, 30, 43, 25, 47, 29, 42, 24, },
79 {117, 62, 158, 103, 113, 58, 155, 100, },
80 { 34, 199, 21, 186, 31, 196, 17, 182, },
81 {144, 89, 131, 76, 141, 86, 127, 72, },
82 { 0, 165, 41, 206, 10, 175, 52, 217, },
83 {110, 55, 151, 96, 120, 65, 162, 107, },
84 { 28, 193, 14, 179, 38, 203, 24, 189, },
85 {138, 83, 124, 69, 148, 93, 134, 79, },
86 { 7, 172, 48, 213, 3, 168, 45, 210, },
91 { 0, 143, 18, 200, 2, 156, 25, 215, },
92 { 78, 28, 125, 64, 89, 36, 138, 74, },
93 { 10, 180, 3, 161, 16, 195, 8, 175, },
94 {109, 51, 93, 38, 121, 60, 105, 47, },
95 { 1, 152, 23, 210, 0, 147, 20, 205, },
96 { 85, 33, 134, 71, 81, 30, 130, 67, },
97 { 14, 190, 6, 171, 12, 185, 5, 166, },
98 {117, 57, 101, 44, 113, 54, 97, 41, },
103 { 0, 124, 8, 193, 0, 140, 12, 213, },
104 { 55, 14, 104, 42, 66, 19, 119, 52, },
105 { 3, 168, 1, 145, 6, 187, 3, 162, },
106 { 86, 31, 70, 21, 99, 39, 82, 28, },
107 { 0, 134, 11, 206, 0, 129, 9, 200, },
108 { 62, 17, 114, 48, 58, 16, 109, 45, },
109 { 5, 181, 2, 157, 4, 175, 1, 151, },
110 { 95, 36, 78, 26, 90, 34, 74, 24, },
115 { 0, 107, 3, 187, 0, 125, 6, 212, },
116 { 39, 7, 86, 28, 49, 11, 102, 36, },
117 { 1, 158, 0, 131, 3, 180, 1, 151, },
118 { 68, 19, 52, 12, 81, 25, 64, 17, },
119 { 0, 119, 5, 203, 0, 113, 4, 195, },
120 { 45, 9, 96, 33, 42, 8, 91, 30, },
121 { 2, 172, 1, 144, 2, 165, 0, 137, },
122 { 77, 23, 60, 15, 72, 21, 56, 14, },
126 #define output_pixel(pos, val, bias, signedness) \ 128 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ 130 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ 135 int big_endian,
int output_bits)
138 int shift = 19 - output_bits;
140 for (i = 0; i <
dstW; i++) {
141 int val = src[i] + (1 << (shift - 1));
149 int big_endian,
int output_bits)
152 int shift = 15 + 16 - output_bits;
154 for (i = 0; i <
dstW; i++) {
155 int val = 1 << (30-output_bits);
164 for (j = 0; j < filterSize; j++)
165 val += src[j][i] * filter[j];
173 #define output_pixel(pos, val) \ 175 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \ 177 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \ 182 int big_endian,
int output_bits)
185 int shift = 15 - output_bits;
187 for (i = 0; i <
dstW; i++) {
188 int val = src[i] + (1 << (shift - 1));
195 const int16_t **
src, uint16_t *dest,
int dstW,
196 int big_endian,
int output_bits)
199 int shift = 11 + 16 - output_bits;
201 for (i = 0; i <
dstW; i++) {
202 int val = 1 << (26-output_bits);
205 for (j = 0; j < filterSize; j++)
206 val += src[j][i] * filter[j];
214 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \ 215 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \ 216 uint8_t *dest, int dstW, \ 217 const uint8_t *dither, int offset)\ 219 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \ 220 (uint16_t *) dest, dstW, is_be, bits); \ 222 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \ 223 const int16_t **src, uint8_t *dest, int dstW, \ 224 const uint8_t *dither, int offset)\ 226 yuv2planeX_## template_size ## _c_template(filter, \ 227 filterSize, (const typeX_t **) src, \ 228 (uint16_t *) dest, dstW, is_be, bits); \ 237 static void yuv2planeX_8_c(
const int16_t *
filter,
int filterSize,
242 for (i=0; i<
dstW; i++) {
243 int val = dither[(i + offset) & 7] << 12;
245 for (j=0; j<filterSize; j++)
246 val += src[j][i] * filter[j];
248 dest[i]= av_clip_uint8(val>>19);
256 for (i=0; i<
dstW; i++) {
257 int val = (src[i] + dither[(i + offset) & 7]) >> 7;
258 dest[i]= av_clip_uint8(val);
263 const int16_t **chrUSrc,
const int16_t **chrVSrc,
272 int u = chrDither[i & 7] << 12;
273 int v = chrDither[(i + 3) & 7] << 12;
275 for (j=0; j<chrFilterSize; j++) {
276 u += chrUSrc[j][i] * chrFilter[j];
277 v += chrVSrc[j][i] * chrFilter[j];
280 dest[2*i]= av_clip_uint8(u>>19);
281 dest[2*i+1]= av_clip_uint8(v>>19);
285 int u = chrDither[i & 7] << 12;
286 int v = chrDither[(i + 3) & 7] << 12;
288 for (j=0; j<chrFilterSize; j++) {
289 u += chrUSrc[j][i] * chrFilter[j];
290 v += chrVSrc[j][i] * chrFilter[j];
293 dest[2*i]= av_clip_uint8(v>>19);
294 dest[2*i+1]= av_clip_uint8(u>>19);
298 #define accumulate_bit(acc, val) \ 300 acc |= (val) >= (128 + 110) 301 #define output_pixel(pos, acc) \ 302 if (target == AV_PIX_FMT_MONOBLACK) { \ 310 const int16_t **lumSrc,
int lumFilterSize,
311 const int16_t *chrFilter,
const int16_t **chrUSrc,
312 const int16_t **chrVSrc,
int chrFilterSize,
320 for (i = 0; i <
dstW; i += 2) {
325 for (j = 0; j < lumFilterSize; j++) {
326 Y1 += lumSrc[j][i] * lumFilter[j];
327 Y2 += lumSrc[j][i+1] * lumFilter[j];
331 if ((Y1 | Y2) & 0x100) {
332 Y1 = av_clip_uint8(Y1);
333 Y2 = av_clip_uint8(Y2);
349 const int16_t *ubuf[2],
const int16_t *vbuf[2],
351 int yalpha,
int uvalpha,
int y,
354 const int16_t *buf0 = buf[0], *buf1 = buf[1];
356 int yalpha1 = 4096 - yalpha;
359 for (i = 0; i <
dstW; i += 8) {
362 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
364 Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
366 Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
368 Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
370 Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
372 Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
374 Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
376 Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
385 const int16_t *ubuf[2],
const int16_t *vbuf[2],
392 for (i = 0; i <
dstW; i += 8) {
409 #undef accumulate_bit 411 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ 412 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ 413 const int16_t **lumSrc, int lumFilterSize, \ 414 const int16_t *chrFilter, const int16_t **chrUSrc, \ 415 const int16_t **chrVSrc, int chrFilterSize, \ 416 const int16_t **alpSrc, uint8_t *dest, int dstW, \ 419 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ 420 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 421 alpSrc, dest, dstW, y, fmt); \ 424 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ 425 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 426 const int16_t *abuf[2], uint8_t *dest, int dstW, \ 427 int yalpha, int uvalpha, int y) \ 429 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ 430 dest, dstW, yalpha, uvalpha, y, fmt); \ 433 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ 434 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 435 const int16_t *abuf0, uint8_t *dest, int dstW, \ 436 int uvalpha, int y) \ 438 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \ 439 abuf0, dest, dstW, uvalpha, \ 446 #define output_pixels(pos, Y1, U, Y2, V) \ 447 if (target == AV_PIX_FMT_YUYV422) { \ 448 dest[pos + 0] = Y1; \ 450 dest[pos + 2] = Y2; \ 452 } else if (target == AV_PIX_FMT_YVYU422) { \ 453 dest[pos + 0] = Y1; \ 455 dest[pos + 2] = Y2; \ 459 dest[pos + 1] = Y1; \ 461 dest[pos + 3] = Y2; \ 466 const int16_t **lumSrc,
int lumFilterSize,
467 const int16_t *chrFilter,
const int16_t **chrUSrc,
468 const int16_t **chrVSrc,
int chrFilterSize,
474 for (i = 0; i < ((dstW + 1) >> 1); i++) {
481 for (j = 0; j < lumFilterSize; j++) {
482 Y1 += lumSrc[j][i * 2] * lumFilter[j];
483 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
485 for (j = 0; j < chrFilterSize; j++) {
486 U += chrUSrc[j][i] * chrFilter[j];
487 V += chrVSrc[j][i] * chrFilter[j];
493 if ((Y1 | Y2 | U | V) & 0x100) {
494 Y1 = av_clip_uint8(Y1);
495 Y2 = av_clip_uint8(Y2);
496 U = av_clip_uint8(U);
497 V = av_clip_uint8(V);
505 const int16_t *ubuf[2],
const int16_t *vbuf[2],
507 int yalpha,
int uvalpha,
int y,
510 const int16_t *buf0 = buf[0], *buf1 = buf[1],
511 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
512 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
513 int yalpha1 = 4096 - yalpha;
514 int uvalpha1 = 4096 - uvalpha;
517 for (i = 0; i < ((dstW + 1) >> 1); i++) {
518 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
519 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
520 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
521 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
523 Y1 = av_clip_uint8(Y1);
524 Y2 = av_clip_uint8(Y2);
525 U = av_clip_uint8(U);
526 V = av_clip_uint8(V);
534 const int16_t *ubuf[2],
const int16_t *vbuf[2],
538 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
541 if (uvalpha < 2048) {
542 for (i = 0; i < ((dstW + 1) >> 1); i++) {
543 int Y1 = buf0[i * 2] >> 7;
544 int Y2 = buf0[i * 2 + 1] >> 7;
545 int U = ubuf0[i] >> 7;
546 int V = vbuf0[i] >> 7;
548 Y1 = av_clip_uint8(Y1);
549 Y2 = av_clip_uint8(Y2);
550 U = av_clip_uint8(U);
551 V = av_clip_uint8(V);
556 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
557 for (i = 0; i < ((dstW + 1) >> 1); i++) {
558 int Y1 = buf0[i * 2] >> 7;
559 int Y2 = buf0[i * 2 + 1] >> 7;
560 int U = (ubuf0[i] + ubuf1[i]) >> 8;
561 int V = (vbuf0[i] + vbuf1[i]) >> 8;
563 Y1 = av_clip_uint8(Y1);
564 Y2 = av_clip_uint8(Y2);
565 U = av_clip_uint8(U);
566 V = av_clip_uint8(V);
579 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? R : B) 580 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? B : R) 581 #define output_pixel(pos, val) \ 582 if (isBE(target)) { \ 590 const int32_t **lumSrc,
int lumFilterSize,
591 const int16_t *chrFilter,
const int32_t **chrUSrc,
592 const int32_t **chrVSrc,
int chrFilterSize,
598 for (i = 0; i < ((dstW + 1) >> 1); i++) {
600 int Y1 = -0x40000000;
601 int Y2 = -0x40000000;
606 for (j = 0; j < lumFilterSize; j++) {
607 Y1 += lumSrc[j][i * 2] * lumFilter[j];
608 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
610 for (j = 0; j < chrFilterSize; j++) {
611 U += chrUSrc[j][i] * chrFilter[j];
612 V += chrVSrc[j][i] * chrFilter[j];
638 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
641 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
651 int yalpha,
int uvalpha,
int y,
654 const int32_t *buf0 = buf[0], *buf1 = buf[1],
655 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
656 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
657 int yalpha1 = 4096 - yalpha;
658 int uvalpha1 = 4096 - uvalpha;
661 for (i = 0; i < ((dstW + 1) >> 1); i++) {
662 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
663 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
664 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
665 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14;
680 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
683 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
695 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
698 if (uvalpha < 2048) {
699 for (i = 0; i < ((dstW + 1) >> 1); i++) {
700 int Y1 = (buf0[i * 2] ) >> 2;
701 int Y2 = (buf0[i * 2 + 1]) >> 2;
702 int U = (ubuf0[i] + (-128 << 11)) >> 2;
703 int V = (vbuf0[i] + (-128 << 11)) >> 2;
718 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
721 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
726 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
727 for (i = 0; i < ((dstW + 1) >> 1); i++) {
728 int Y1 = (buf0[i * 2] ) >> 2;
729 int Y2 = (buf0[i * 2 + 1]) >> 2;
730 int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
731 int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3;
746 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
749 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
760 #define YUV2PACKED16WRAPPER(name, base, ext, fmt) \ 761 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ 762 const int16_t **_lumSrc, int lumFilterSize, \ 763 const int16_t *chrFilter, const int16_t **_chrUSrc, \ 764 const int16_t **_chrVSrc, int chrFilterSize, \ 765 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \ 768 const int32_t **lumSrc = (const int32_t **) _lumSrc, \ 769 **chrUSrc = (const int32_t **) _chrUSrc, \ 770 **chrVSrc = (const int32_t **) _chrVSrc, \ 771 **alpSrc = (const int32_t **) _alpSrc; \ 772 uint16_t *dest = (uint16_t *) _dest; \ 773 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ 774 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 775 alpSrc, dest, dstW, y, fmt); \ 778 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \ 779 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ 780 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \ 781 int yalpha, int uvalpha, int y) \ 783 const int32_t **buf = (const int32_t **) _buf, \ 784 **ubuf = (const int32_t **) _ubuf, \ 785 **vbuf = (const int32_t **) _vbuf, \ 786 **abuf = (const int32_t **) _abuf; \ 787 uint16_t *dest = (uint16_t *) _dest; \ 788 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ 789 dest, dstW, yalpha, uvalpha, y, fmt); \ 792 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \ 793 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ 794 const int16_t *_abuf0, uint8_t *_dest, int dstW, \ 795 int uvalpha, int y) \ 797 const int32_t *buf0 = (const int32_t *) _buf0, \ 798 **ubuf = (const int32_t **) _ubuf, \ 799 **vbuf = (const int32_t **) _vbuf, \ 800 *abuf0 = (const int32_t *) _abuf0; \ 801 uint16_t *dest = (uint16_t *) _dest; \ 802 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ 803 dstW, uvalpha, y, fmt); \ 821 unsigned A1,
unsigned A2,
822 const void *_r,
const void *_g,
const void *_b,
int y,
827 uint32_t *dest = (uint32_t *) _dest;
828 const uint32_t *
r = (
const uint32_t *) _r;
829 const uint32_t *
g = (
const uint32_t *) _g;
830 const uint32_t *
b = (
const uint32_t *) _b;
835 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
836 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
841 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
842 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
844 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
845 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
854 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b) 855 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r) 856 dest[i * 6 + 0] =
r_b[Y1];
857 dest[i * 6 + 1] = g[Y1];
858 dest[i * 6 + 2] =
b_r[Y1];
859 dest[i * 6 + 3] =
r_b[Y2];
860 dest[i * 6 + 4] = g[Y2];
861 dest[i * 6 + 5] =
b_r[Y2];
867 uint16_t *dest = (uint16_t *) _dest;
868 const uint16_t *
r = (
const uint16_t *) _r;
869 const uint16_t *
g = (
const uint16_t *) _g;
870 const uint16_t *
b = (
const uint16_t *) _b;
871 int dr1, dg1, db1, dr2, dg2, db2;
896 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
897 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
903 int dr1, dg1, db1, dr2, dg2, db2;
908 dr1 = dg1 = d32[(i * 2 + 0) & 7];
909 db1 = d64[(i * 2 + 0) & 7];
910 dr2 = dg2 = d32[(i * 2 + 1) & 7];
911 db2 = d64[(i * 2 + 1) & 7];
915 dr1 = db1 = d128[(i * 2 + 0) & 7];
916 dg1 = d64[(i * 2 + 0) & 7];
917 dr2 = db2 = d128[(i * 2 + 1) & 7];
918 dg2 = d64[(i * 2 + 1) & 7];
922 dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
923 ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
925 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
926 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
933 const int16_t **lumSrc,
int lumFilterSize,
934 const int16_t *chrFilter,
const int16_t **chrUSrc,
935 const int16_t **chrVSrc,
int chrFilterSize,
941 for (i = 0; i < ((dstW + 1) >> 1); i++) {
947 const void *
r, *
g, *
b;
949 for (j = 0; j < lumFilterSize; j++) {
950 Y1 += lumSrc[j][i * 2] * lumFilter[j];
951 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
953 for (j = 0; j < chrFilterSize; j++) {
954 U += chrUSrc[j][i] * chrFilter[j];
955 V += chrVSrc[j][i] * chrFilter[j];
961 if ((Y1 | Y2 | U | V) & 0x100) {
962 Y1 = av_clip_uint8(Y1);
963 Y2 = av_clip_uint8(Y2);
964 U = av_clip_uint8(U);
965 V = av_clip_uint8(V);
970 for (j = 0; j < lumFilterSize; j++) {
971 A1 += alpSrc[j][i * 2 ] * lumFilter[j];
972 A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
976 if ((A1 | A2) & 0x100) {
977 A1 = av_clip_uint8(A1);
978 A2 = av_clip_uint8(A2);
987 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
988 r, g, b, y, target, hasAlpha);
994 const int16_t *ubuf[2],
const int16_t *vbuf[2],
996 int yalpha,
int uvalpha,
int y,
999 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1000 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1001 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1002 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1003 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1004 int yalpha1 = 4096 - yalpha;
1005 int uvalpha1 = 4096 - uvalpha;
1008 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1009 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1010 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1011 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1012 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1014 const void *
r, *
g, *
b;
1016 Y1 = av_clip_uint8(Y1);
1017 Y2 = av_clip_uint8(Y2);
1018 U = av_clip_uint8(U);
1019 V = av_clip_uint8(V);
1026 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1027 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1028 A1 = av_clip_uint8(A1);
1029 A2 = av_clip_uint8(A2);
1032 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1033 r, g, b, y, target, hasAlpha);
1039 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1044 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1047 if (uvalpha < 2048) {
1048 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1049 int Y1 = buf0[i * 2] >> 7;
1050 int Y2 = buf0[i * 2 + 1] >> 7;
1051 int U = ubuf0[i] >> 7;
1052 int V = vbuf0[i] >> 7;
1054 const void *
r, *
g, *
b;
1056 Y1 = av_clip_uint8(Y1);
1057 Y2 = av_clip_uint8(Y2);
1058 U = av_clip_uint8(U);
1059 V = av_clip_uint8(V);
1066 A1 = abuf0[i * 2 ] >> 7;
1067 A2 = abuf0[i * 2 + 1] >> 7;
1068 A1 = av_clip_uint8(A1);
1069 A2 = av_clip_uint8(A2);
1072 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1073 r, g, b, y, target, hasAlpha);
1076 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1077 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1078 int Y1 = buf0[i * 2] >> 7;
1079 int Y2 = buf0[i * 2 + 1] >> 7;
1080 int U = (ubuf0[i] + ubuf1[i]) >> 8;
1081 int V = (vbuf0[i] + vbuf1[i]) >> 8;
1083 const void *
r, *
g, *
b;
1085 Y1 = av_clip_uint8(Y1);
1086 Y2 = av_clip_uint8(Y2);
1087 U = av_clip_uint8(U);
1088 V = av_clip_uint8(V);
1095 A1 = abuf0[i * 2 ] >> 7;
1096 A2 = abuf0[i * 2 + 1] >> 7;
1097 A1 = av_clip_uint8(A1);
1098 A2 = av_clip_uint8(A2);
1101 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1102 r, g, b, y, target, hasAlpha);
1107 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ 1108 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ 1109 const int16_t **lumSrc, int lumFilterSize, \ 1110 const int16_t *chrFilter, const int16_t **chrUSrc, \ 1111 const int16_t **chrVSrc, int chrFilterSize, \ 1112 const int16_t **alpSrc, uint8_t *dest, int dstW, \ 1115 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ 1116 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 1117 alpSrc, dest, dstW, y, fmt, hasAlpha); \ 1119 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \ 1120 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ 1121 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ 1122 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1123 const int16_t *abuf[2], uint8_t *dest, int dstW, \ 1124 int yalpha, int uvalpha, int y) \ 1126 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ 1127 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \ 1130 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ 1131 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1132 const int16_t *abuf0, uint8_t *dest, int dstW, \ 1133 int uvalpha, int y) \ 1135 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ 1136 dstW, uvalpha, y, fmt, hasAlpha); \ 1143 #if CONFIG_SWSCALE_ALPHA 1161 const int16_t **lumSrc,
int lumFilterSize,
1162 const int16_t *chrFilter, const int16_t **chrUSrc,
1163 const int16_t **chrVSrc,
int chrFilterSize,
1164 const int16_t **alpSrc,
uint8_t *dest,
1170 for (i = 0; i <
dstW; i++) {
1177 for (j = 0; j < lumFilterSize; j++) {
1178 Y += lumSrc[j][i] * lumFilter[j];
1180 for (j = 0; j < chrFilterSize; j++) {
1181 U += chrUSrc[j][i] * chrFilter[j];
1182 V += chrVSrc[j][i] * chrFilter[j];
1189 for (j = 0; j < lumFilterSize; j++) {
1190 A += alpSrc[j][i] * lumFilter[j];
1194 A = av_clip_uint8(A);
1196 Y -= c->yuv2rgb_y_offset;
1197 Y *= c->yuv2rgb_y_coeff;
1199 R = Y + V*c->yuv2rgb_v2r_coeff;
1200 G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1201 B = Y + U*c->yuv2rgb_u2b_coeff;
1202 if ((R | G | B) & 0xC0000000) {
1203 R = av_clip_uintp2(R, 30);
1204 G = av_clip_uintp2(G, 30);
1205 B = av_clip_uintp2(B, 30);
1210 dest[0] = hasAlpha ?
A : 255;
1224 dest[3] = hasAlpha ?
A : 255;
1227 dest[0] = hasAlpha ?
A : 255;
1242 dest[3] = hasAlpha ?
A : 255;
1255 #if CONFIG_SWSCALE_ALPHA 1271 const int16_t **lumSrc,
int lumFilterSize,
1272 const int16_t *chrFilter,
const int16_t **chrUSrc,
1273 const int16_t **chrVSrc,
int chrFilterSize,
1274 const int16_t **alpSrc,
uint8_t **dest,
1280 uint16_t **dest16 = (uint16_t**)dest;
1283 for (i = 0; i <
dstW; i++) {
1286 int U = (1 << 9) - (128 << 19);
1287 int V = (1 << 9) - (128 << 19);
1290 for (j = 0; j < lumFilterSize; j++)
1291 Y += lumSrc[j][i] * lumFilter[j];
1293 for (j = 0; j < chrFilterSize; j++) {
1294 U += chrUSrc[j][i] * chrFilter[j];
1295 V += chrVSrc[j][i] * chrFilter[j];
1305 for (j = 0; j < lumFilterSize; j++)
1306 A += alpSrc[j][i] * lumFilter[j];
1311 A = av_clip_uint8(A);
1321 if ((R | G | B) & 0xC0000000) {
1322 R = av_clip_uintp2(R, 30);
1323 G = av_clip_uintp2(G, 30);
1324 B = av_clip_uintp2(B, 30);
1328 dest16[0][i] = G >> SH;
1329 dest16[1][i] = B >> SH;
1330 dest16[2][i] = R >> SH;
1334 dest[0][i] = G >> 22;
1335 dest[1][i] = B >> 22;
1336 dest[2][i] = R >> 22;
1342 for (i = 0; i <
dstW; i++) {
1365 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
1366 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
1369 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
1370 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
1372 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
1373 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
1377 *yuv2planeX = yuv2planeX_8_c;
1383 switch (dstFormat) {
1386 *yuv2packedX = yuv2rgba32_full_X_c;
1388 #if CONFIG_SWSCALE_ALPHA 1390 *yuv2packedX = yuv2rgba32_full_X_c;
1394 *yuv2packedX = yuv2rgbx32_full_X_c;
1400 *yuv2packedX = yuv2argb32_full_X_c;
1402 #if CONFIG_SWSCALE_ALPHA 1404 *yuv2packedX = yuv2argb32_full_X_c;
1408 *yuv2packedX = yuv2xrgb32_full_X_c;
1414 *yuv2packedX = yuv2bgra32_full_X_c;
1416 #if CONFIG_SWSCALE_ALPHA 1418 *yuv2packedX = yuv2bgra32_full_X_c;
1422 *yuv2packedX = yuv2bgrx32_full_X_c;
1428 *yuv2packedX = yuv2abgr32_full_X_c;
1430 #if CONFIG_SWSCALE_ALPHA 1432 *yuv2packedX = yuv2abgr32_full_X_c;
1436 *yuv2packedX = yuv2xbgr32_full_X_c;
1441 *yuv2packedX = yuv2rgb24_full_X_c;
1444 *yuv2packedX = yuv2bgr24_full_X_c;
1458 switch (dstFormat) {
1460 *yuv2packed1 = yuv2rgb48le_1_c;
1461 *yuv2packed2 = yuv2rgb48le_2_c;
1462 *yuv2packedX = yuv2rgb48le_X_c;
1465 *yuv2packed1 = yuv2rgb48be_1_c;
1466 *yuv2packed2 = yuv2rgb48be_2_c;
1467 *yuv2packedX = yuv2rgb48be_X_c;
1470 *yuv2packed1 = yuv2bgr48le_1_c;
1471 *yuv2packed2 = yuv2bgr48le_2_c;
1472 *yuv2packedX = yuv2bgr48le_X_c;
1475 *yuv2packed1 = yuv2bgr48be_1_c;
1476 *yuv2packed2 = yuv2bgr48be_2_c;
1477 *yuv2packedX = yuv2bgr48be_X_c;
1482 *yuv2packed1 = yuv2rgb32_1_c;
1483 *yuv2packed2 = yuv2rgb32_2_c;
1484 *yuv2packedX = yuv2rgb32_X_c;
1486 #if CONFIG_SWSCALE_ALPHA 1488 *yuv2packed1 = yuv2rgba32_1_c;
1489 *yuv2packed2 = yuv2rgba32_2_c;
1490 *yuv2packedX = yuv2rgba32_X_c;
1494 *yuv2packed1 = yuv2rgbx32_1_c;
1495 *yuv2packed2 = yuv2rgbx32_2_c;
1496 *yuv2packedX = yuv2rgbx32_X_c;
1503 *yuv2packed1 = yuv2rgb32_1_1_c;
1504 *yuv2packed2 = yuv2rgb32_1_2_c;
1505 *yuv2packedX = yuv2rgb32_1_X_c;
1507 #if CONFIG_SWSCALE_ALPHA 1509 *yuv2packed1 = yuv2rgba32_1_1_c;
1510 *yuv2packed2 = yuv2rgba32_1_2_c;
1511 *yuv2packedX = yuv2rgba32_1_X_c;
1515 *yuv2packed1 = yuv2rgbx32_1_1_c;
1516 *yuv2packed2 = yuv2rgbx32_1_2_c;
1517 *yuv2packedX = yuv2rgbx32_1_X_c;
1522 *yuv2packed1 = yuv2rgb24_1_c;
1523 *yuv2packed2 = yuv2rgb24_2_c;
1524 *yuv2packedX = yuv2rgb24_X_c;
1527 *yuv2packed1 = yuv2bgr24_1_c;
1528 *yuv2packed2 = yuv2bgr24_2_c;
1529 *yuv2packedX = yuv2bgr24_X_c;
1535 *yuv2packed1 = yuv2rgb16_1_c;
1536 *yuv2packed2 = yuv2rgb16_2_c;
1537 *yuv2packedX = yuv2rgb16_X_c;
1543 *yuv2packed1 = yuv2rgb15_1_c;
1544 *yuv2packed2 = yuv2rgb15_2_c;
1545 *yuv2packedX = yuv2rgb15_X_c;
1551 *yuv2packed1 = yuv2rgb12_1_c;
1552 *yuv2packed2 = yuv2rgb12_2_c;
1553 *yuv2packedX = yuv2rgb12_X_c;
1557 *yuv2packed1 = yuv2rgb8_1_c;
1558 *yuv2packed2 = yuv2rgb8_2_c;
1559 *yuv2packedX = yuv2rgb8_X_c;
1563 *yuv2packed1 = yuv2rgb4_1_c;
1564 *yuv2packed2 = yuv2rgb4_2_c;
1565 *yuv2packedX = yuv2rgb4_X_c;
1569 *yuv2packed1 = yuv2rgb4b_1_c;
1570 *yuv2packed2 = yuv2rgb4b_2_c;
1571 *yuv2packedX = yuv2rgb4b_X_c;
1575 switch (dstFormat) {
1577 *yuv2packed1 = yuv2monowhite_1_c;
1578 *yuv2packed2 = yuv2monowhite_2_c;
1579 *yuv2packedX = yuv2monowhite_X_c;
1582 *yuv2packed1 = yuv2monoblack_1_c;
1583 *yuv2packed2 = yuv2monoblack_2_c;
1584 *yuv2packedX = yuv2monoblack_X_c;
1587 *yuv2packed1 = yuv2yuyv422_1_c;
1588 *yuv2packed2 = yuv2yuyv422_2_c;
1589 *yuv2packedX = yuv2yuyv422_X_c;
1592 *yuv2packed1 = yuv2yvyu422_1_c;
1593 *yuv2packed2 = yuv2yvyu422_2_c;
1594 *yuv2packedX = yuv2yvyu422_X_c;
1597 *yuv2packed1 = yuv2uyvy422_1_c;
1598 *yuv2packed2 = yuv2uyvy422_2_c;
1599 *yuv2packedX = yuv2uyvy422_X_c;
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
int16_t ** alpPixBuf
Ring buffer for scaled horizontal alpha plane lines to be fed to the vertical scaler.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
packed RGB 8:8:8, 24bpp, RGBRGB...
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
static av_always_inline void yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 ...
#define DECLARE_ALIGNED(n, t, v)
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 ...
#define AV_PIX_FMT_RGB444
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Macro definitions for various function/variable attributes.
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
#define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha)
packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 ...
#define SWS_FULL_CHR_H_INT
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
static const uint8_t dither_2x2_8[2][8]
planar GBR 4:4:4 48bpp, big-endian
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
external api for the swscale stuff
enum AVPixelFormat dstFormat
Destination pixel format.
yuv2packedX_fn yuv2packedX
planar GBR 4:4:4 27bpp, big-endian
#define AV_PIX_FMT_BGR32_1
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
static const uint8_t dither_2x2_4[2][8]
yuv2packed1_fn yuv2packed1
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
const uint8_t ff_dither_8x8_32[8][8]
#define output_pixel(pos, val, bias, signedness)
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
int chrDstW
Width of destination chroma planes.
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static av_always_inline void yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
as above, but U and V bytes are swapped
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, unsigned Y1, unsigned Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
#define YUV2PACKED16WRAPPER(name, base, ext, fmt)
static av_always_inline int is9_OR_10BPS(enum AVPixelFormat pix_fmt)
yuv2planar1_fn yuv2plane1
yuv2interleavedX_fn yuv2nv12cX
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
packed RGB 8:8:8, 24bpp, BGRBGR...
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
const uint8_t ff_dither_4x4_16[4][8]
int dstW
Width of destination luma/alpha planes.
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
#define AV_PIX_FMT_BGR555
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
static av_always_inline void yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
static const uint16_t dither[8][8]
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
yuv2planarX_fn yuv2planeX
planar GBR 4:4:4 30bpp, big-endian
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
static av_always_inline void yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target)
static av_always_inline void yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
const uint8_t ff_dither_8x8_220[8][8]
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 ...
#define AV_PIX_FMT_BGR565
const uint8_t * chrDither8
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 ...
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 ...
const uint8_t ff_dither_8x8_73[8][8]
#define output_pixels(pos, Y1, U, Y2, V)
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
yuv2packed2_fn yuv2packed2
#define CONFIG_SWSCALE_ALPHA
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
planar GBRA 4:4:4:4 32bpp
planar GBR 4:4:4 27bpp, little-endian
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define AV_PIX_FMT_BGR444
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
#define AV_PIX_FMT_RGB555
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
#define AV_PIX_FMT_RGB32_1
void(* yuv2interleavedX_fn)(struct SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
#define AV_PIX_FMT_RGB565
packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 ...
planar GBR 4:4:4 48bpp, little-endian
packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 ...
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
int depth
Number of bits in the component.
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
#define accumulate_bit(acc, val)
planar GBR 4:4:4 30bpp, little-endian