Libav
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/bswap.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "config.h"
34 #include "rgb2rgb.h"
35 #include "swscale.h"
36 #include "swscale_internal.h"
37 
38 DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={
39 { 1, 3, 1, 3, 1, 3, 1, 3, },
40 { 2, 0, 2, 0, 2, 0, 2, 0, },
41 };
42 
43 DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8)[2][8]={
44 { 6, 2, 6, 2, 6, 2, 6, 2, },
45 { 0, 4, 0, 4, 0, 4, 0, 4, },
46 };
47 
49 { 8, 4, 11, 7, 8, 4, 11, 7, },
50 { 2, 14, 1, 13, 2, 14, 1, 13, },
51 { 10, 6, 9, 5, 10, 6, 9, 5, },
52 { 0, 12, 3, 15, 0, 12, 3, 15, },
53 };
54 
56 { 17, 9, 23, 15, 16, 8, 22, 14, },
57 { 5, 29, 3, 27, 4, 28, 2, 26, },
58 { 21, 13, 19, 11, 20, 12, 18, 10, },
59 { 0, 24, 6, 30, 1, 25, 7, 31, },
60 { 16, 8, 22, 14, 17, 9, 23, 15, },
61 { 4, 28, 2, 26, 5, 29, 3, 27, },
62 { 20, 12, 18, 10, 21, 13, 19, 11, },
63 { 1, 25, 7, 31, 0, 24, 6, 30, },
64 };
65 
67 { 0, 55, 14, 68, 3, 58, 17, 72, },
68 { 37, 18, 50, 32, 40, 22, 54, 35, },
69 { 9, 64, 5, 59, 13, 67, 8, 63, },
70 { 46, 27, 41, 23, 49, 31, 44, 26, },
71 { 2, 57, 16, 71, 1, 56, 15, 70, },
72 { 39, 21, 52, 34, 38, 19, 51, 33, },
73 { 11, 66, 7, 62, 10, 65, 6, 60, },
74 { 48, 30, 43, 25, 47, 29, 42, 24, },
75 };
76 
77 #if 1
79 {117, 62, 158, 103, 113, 58, 155, 100, },
80 { 34, 199, 21, 186, 31, 196, 17, 182, },
81 {144, 89, 131, 76, 141, 86, 127, 72, },
82 { 0, 165, 41, 206, 10, 175, 52, 217, },
83 {110, 55, 151, 96, 120, 65, 162, 107, },
84 { 28, 193, 14, 179, 38, 203, 24, 189, },
85 {138, 83, 124, 69, 148, 93, 134, 79, },
86 { 7, 172, 48, 213, 3, 168, 45, 210, },
87 };
88 #elif 1
89 // tries to correct a gamma of 1.5
90 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[8][8] = {
91 { 0, 143, 18, 200, 2, 156, 25, 215, },
92 { 78, 28, 125, 64, 89, 36, 138, 74, },
93 { 10, 180, 3, 161, 16, 195, 8, 175, },
94 {109, 51, 93, 38, 121, 60, 105, 47, },
95 { 1, 152, 23, 210, 0, 147, 20, 205, },
96 { 85, 33, 134, 71, 81, 30, 130, 67, },
97 { 14, 190, 6, 171, 12, 185, 5, 166, },
98 {117, 57, 101, 44, 113, 54, 97, 41, },
99 };
100 #elif 1
101 // tries to correct a gamma of 2.0
102 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[8][8] = {
103 { 0, 124, 8, 193, 0, 140, 12, 213, },
104 { 55, 14, 104, 42, 66, 19, 119, 52, },
105 { 3, 168, 1, 145, 6, 187, 3, 162, },
106 { 86, 31, 70, 21, 99, 39, 82, 28, },
107 { 0, 134, 11, 206, 0, 129, 9, 200, },
108 { 62, 17, 114, 48, 58, 16, 109, 45, },
109 { 5, 181, 2, 157, 4, 175, 1, 151, },
110 { 95, 36, 78, 26, 90, 34, 74, 24, },
111 };
112 #else
113 // tries to correct a gamma of 2.5
114 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[8][8] = {
115 { 0, 107, 3, 187, 0, 125, 6, 212, },
116 { 39, 7, 86, 28, 49, 11, 102, 36, },
117 { 1, 158, 0, 131, 3, 180, 1, 151, },
118 { 68, 19, 52, 12, 81, 25, 64, 17, },
119 { 0, 119, 5, 203, 0, 113, 4, 195, },
120 { 45, 9, 96, 33, 42, 8, 91, 30, },
121 { 2, 172, 1, 144, 2, 165, 0, 137, },
122 { 77, 23, 60, 15, 72, 21, 56, 14, },
123 };
124 #endif
125 
126 #define output_pixel(pos, val, bias, signedness) \
127  if (big_endian) { \
128  AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
129  } else { \
130  AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
131  }
132 
133 static av_always_inline void
134 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
135  int big_endian, int output_bits)
136 {
137  int i;
138  int shift = 19 - output_bits;
139 
140  for (i = 0; i < dstW; i++) {
141  int val = src[i] + (1 << (shift - 1));
142  output_pixel(&dest[i], val, 0, uint);
143  }
144 }
145 
146 static av_always_inline void
147 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
148  const int32_t **src, uint16_t *dest, int dstW,
149  int big_endian, int output_bits)
150 {
151  int i;
152  int shift = 15 + 16 - output_bits;
153 
154  for (i = 0; i < dstW; i++) {
155  int val = 1 << (30-output_bits);
156  int j;
157 
158  /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
159  * filters (or anything with negative coeffs, the range can be slightly
160  * wider in both directions. To account for this overflow, we subtract
161  * a constant so it always fits in the signed range (assuming a
162  * reasonable filterSize), and re-add that at the end. */
163  val -= 0x40000000;
164  for (j = 0; j < filterSize; j++)
165  val += src[j][i] * filter[j];
166 
167  output_pixel(&dest[i], val, 0x8000, int);
168  }
169 }
170 
171 #undef output_pixel
172 
173 #define output_pixel(pos, val) \
174  if (big_endian) { \
175  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
176  } else { \
177  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
178  }
179 
180 static av_always_inline void
181 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
182  int big_endian, int output_bits)
183 {
184  int i;
185  int shift = 15 - output_bits;
186 
187  for (i = 0; i < dstW; i++) {
188  int val = src[i] + (1 << (shift - 1));
189  output_pixel(&dest[i], val);
190  }
191 }
192 
193 static av_always_inline void
194 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
195  const int16_t **src, uint16_t *dest, int dstW,
196  int big_endian, int output_bits)
197 {
198  int i;
199  int shift = 11 + 16 - output_bits;
200 
201  for (i = 0; i < dstW; i++) {
202  int val = 1 << (26-output_bits);
203  int j;
204 
205  for (j = 0; j < filterSize; j++)
206  val += src[j][i] * filter[j];
207 
208  output_pixel(&dest[i], val);
209  }
210 }
211 
212 #undef output_pixel
213 
214 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
215 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
216  uint8_t *dest, int dstW, \
217  const uint8_t *dither, int offset)\
218 { \
219  yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
220  (uint16_t *) dest, dstW, is_be, bits); \
221 }\
222 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
223  const int16_t **src, uint8_t *dest, int dstW, \
224  const uint8_t *dither, int offset)\
225 { \
226  yuv2planeX_## template_size ## _c_template(filter, \
227  filterSize, (const typeX_t **) src, \
228  (uint16_t *) dest, dstW, is_be, bits); \
229 }
230 yuv2NBPS( 9, BE, 1, 10, int16_t)
231 yuv2NBPS( 9, LE, 0, 10, int16_t)
232 yuv2NBPS(10, BE, 1, 10, int16_t)
233 yuv2NBPS(10, LE, 0, 10, int16_t)
234 yuv2NBPS(16, BE, 1, 16, int32_t)
235 yuv2NBPS(16, LE, 0, 16, int32_t)
236 
237 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
238  const int16_t **src, uint8_t *dest, int dstW,
239  const uint8_t *dither, int offset)
240 {
241  int i;
242  for (i=0; i<dstW; i++) {
243  int val = dither[(i + offset) & 7] << 12;
244  int j;
245  for (j=0; j<filterSize; j++)
246  val += src[j][i] * filter[j];
247 
248  dest[i]= av_clip_uint8(val>>19);
249  }
250 }
251 
252 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
253  const uint8_t *dither, int offset)
254 {
255  int i;
256  for (i=0; i<dstW; i++) {
257  int val = (src[i] + dither[(i + offset) & 7]) >> 7;
258  dest[i]= av_clip_uint8(val);
259  }
260 }
261 
262 static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize,
263  const int16_t **chrUSrc, const int16_t **chrVSrc,
264  uint8_t *dest, int chrDstW)
265 {
267  const uint8_t *chrDither = c->chrDither8;
268  int i;
269 
270  if (dstFormat == AV_PIX_FMT_NV12)
271  for (i=0; i<chrDstW; i++) {
272  int u = chrDither[i & 7] << 12;
273  int v = chrDither[(i + 3) & 7] << 12;
274  int j;
275  for (j=0; j<chrFilterSize; j++) {
276  u += chrUSrc[j][i] * chrFilter[j];
277  v += chrVSrc[j][i] * chrFilter[j];
278  }
279 
280  dest[2*i]= av_clip_uint8(u>>19);
281  dest[2*i+1]= av_clip_uint8(v>>19);
282  }
283  else
284  for (i=0; i<chrDstW; i++) {
285  int u = chrDither[i & 7] << 12;
286  int v = chrDither[(i + 3) & 7] << 12;
287  int j;
288  for (j=0; j<chrFilterSize; j++) {
289  u += chrUSrc[j][i] * chrFilter[j];
290  v += chrVSrc[j][i] * chrFilter[j];
291  }
292 
293  dest[2*i]= av_clip_uint8(v>>19);
294  dest[2*i+1]= av_clip_uint8(u>>19);
295  }
296 }
297 
298 #define accumulate_bit(acc, val) \
299  acc <<= 1; \
300  acc |= (val) >= (128 + 110)
301 #define output_pixel(pos, acc) \
302  if (target == AV_PIX_FMT_MONOBLACK) { \
303  pos = acc; \
304  } else { \
305  pos = ~acc; \
306  }
307 
308 static av_always_inline void
309 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
310  const int16_t **lumSrc, int lumFilterSize,
311  const int16_t *chrFilter, const int16_t **chrUSrc,
312  const int16_t **chrVSrc, int chrFilterSize,
313  const int16_t **alpSrc, uint8_t *dest, int dstW,
314  int y, enum AVPixelFormat target)
315 {
316  const uint8_t * const d128 = ff_dither_8x8_220[y&7];
317  int i;
318  unsigned acc = 0;
319 
320  for (i = 0; i < dstW; i += 2) {
321  int j;
322  int Y1 = 1 << 18;
323  int Y2 = 1 << 18;
324 
325  for (j = 0; j < lumFilterSize; j++) {
326  Y1 += lumSrc[j][i] * lumFilter[j];
327  Y2 += lumSrc[j][i+1] * lumFilter[j];
328  }
329  Y1 >>= 19;
330  Y2 >>= 19;
331  if ((Y1 | Y2) & 0x100) {
332  Y1 = av_clip_uint8(Y1);
333  Y2 = av_clip_uint8(Y2);
334  }
335  accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
336  accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
337  if ((i & 7) == 6) {
338  output_pixel(*dest++, acc);
339  }
340  }
341 
342  if (i & 6) {
343  output_pixel(*dest, acc);
344  }
345 }
346 
347 static av_always_inline void
348 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
349  const int16_t *ubuf[2], const int16_t *vbuf[2],
350  const int16_t *abuf[2], uint8_t *dest, int dstW,
351  int yalpha, int uvalpha, int y,
352  enum AVPixelFormat target)
353 {
354  const int16_t *buf0 = buf[0], *buf1 = buf[1];
355  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
356  int yalpha1 = 4096 - yalpha;
357  int i;
358 
359  for (i = 0; i < dstW; i += 8) {
360  int Y, acc = 0;
361 
362  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
363  accumulate_bit(acc, Y + d128[0]);
364  Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
365  accumulate_bit(acc, Y + d128[1]);
366  Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
367  accumulate_bit(acc, Y + d128[2]);
368  Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
369  accumulate_bit(acc, Y + d128[3]);
370  Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
371  accumulate_bit(acc, Y + d128[4]);
372  Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
373  accumulate_bit(acc, Y + d128[5]);
374  Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
375  accumulate_bit(acc, Y + d128[6]);
376  Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
377  accumulate_bit(acc, Y + d128[7]);
378 
379  output_pixel(*dest++, acc);
380  }
381 }
382 
383 static av_always_inline void
384 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
385  const int16_t *ubuf[2], const int16_t *vbuf[2],
386  const int16_t *abuf0, uint8_t *dest, int dstW,
387  int uvalpha, int y, enum AVPixelFormat target)
388 {
389  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
390  int i;
391 
392  for (i = 0; i < dstW; i += 8) {
393  int acc = 0;
394 
395  accumulate_bit(acc, (buf0[i + 0] >> 7) + d128[0]);
396  accumulate_bit(acc, (buf0[i + 1] >> 7) + d128[1]);
397  accumulate_bit(acc, (buf0[i + 2] >> 7) + d128[2]);
398  accumulate_bit(acc, (buf0[i + 3] >> 7) + d128[3]);
399  accumulate_bit(acc, (buf0[i + 4] >> 7) + d128[4]);
400  accumulate_bit(acc, (buf0[i + 5] >> 7) + d128[5]);
401  accumulate_bit(acc, (buf0[i + 6] >> 7) + d128[6]);
402  accumulate_bit(acc, (buf0[i + 7] >> 7) + d128[7]);
403 
404  output_pixel(*dest++, acc);
405  }
406 }
407 
408 #undef output_pixel
409 #undef accumulate_bit
410 
411 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
412 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
413  const int16_t **lumSrc, int lumFilterSize, \
414  const int16_t *chrFilter, const int16_t **chrUSrc, \
415  const int16_t **chrVSrc, int chrFilterSize, \
416  const int16_t **alpSrc, uint8_t *dest, int dstW, \
417  int y) \
418 { \
419  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
420  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
421  alpSrc, dest, dstW, y, fmt); \
422 } \
423  \
424 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
425  const int16_t *ubuf[2], const int16_t *vbuf[2], \
426  const int16_t *abuf[2], uint8_t *dest, int dstW, \
427  int yalpha, int uvalpha, int y) \
428 { \
429  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
430  dest, dstW, yalpha, uvalpha, y, fmt); \
431 } \
432  \
433 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
434  const int16_t *ubuf[2], const int16_t *vbuf[2], \
435  const int16_t *abuf0, uint8_t *dest, int dstW, \
436  int uvalpha, int y) \
437 { \
438  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
439  abuf0, dest, dstW, uvalpha, \
440  y, fmt); \
441 }
442 
443 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
444 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
445 
446 #define output_pixels(pos, Y1, U, Y2, V) \
447  if (target == AV_PIX_FMT_YUYV422) { \
448  dest[pos + 0] = Y1; \
449  dest[pos + 1] = U; \
450  dest[pos + 2] = Y2; \
451  dest[pos + 3] = V; \
452  } else if (target == AV_PIX_FMT_YVYU422) { \
453  dest[pos + 0] = Y1; \
454  dest[pos + 1] = V; \
455  dest[pos + 2] = Y2; \
456  dest[pos + 3] = U; \
457  } else { /* AV_PIX_FMT_UYVY422 */ \
458  dest[pos + 0] = U; \
459  dest[pos + 1] = Y1; \
460  dest[pos + 2] = V; \
461  dest[pos + 3] = Y2; \
462  }
463 
464 static av_always_inline void
465 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
466  const int16_t **lumSrc, int lumFilterSize,
467  const int16_t *chrFilter, const int16_t **chrUSrc,
468  const int16_t **chrVSrc, int chrFilterSize,
469  const int16_t **alpSrc, uint8_t *dest, int dstW,
470  int y, enum AVPixelFormat target)
471 {
472  int i;
473 
474  for (i = 0; i < ((dstW + 1) >> 1); i++) {
475  int j;
476  int Y1 = 1 << 18;
477  int Y2 = 1 << 18;
478  int U = 1 << 18;
479  int V = 1 << 18;
480 
481  for (j = 0; j < lumFilterSize; j++) {
482  Y1 += lumSrc[j][i * 2] * lumFilter[j];
483  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
484  }
485  for (j = 0; j < chrFilterSize; j++) {
486  U += chrUSrc[j][i] * chrFilter[j];
487  V += chrVSrc[j][i] * chrFilter[j];
488  }
489  Y1 >>= 19;
490  Y2 >>= 19;
491  U >>= 19;
492  V >>= 19;
493  if ((Y1 | Y2 | U | V) & 0x100) {
494  Y1 = av_clip_uint8(Y1);
495  Y2 = av_clip_uint8(Y2);
496  U = av_clip_uint8(U);
497  V = av_clip_uint8(V);
498  }
499  output_pixels(4*i, Y1, U, Y2, V);
500  }
501 }
502 
503 static av_always_inline void
504 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
505  const int16_t *ubuf[2], const int16_t *vbuf[2],
506  const int16_t *abuf[2], uint8_t *dest, int dstW,
507  int yalpha, int uvalpha, int y,
508  enum AVPixelFormat target)
509 {
510  const int16_t *buf0 = buf[0], *buf1 = buf[1],
511  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
512  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
513  int yalpha1 = 4096 - yalpha;
514  int uvalpha1 = 4096 - uvalpha;
515  int i;
516 
517  for (i = 0; i < ((dstW + 1) >> 1); i++) {
518  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
519  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
520  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
521  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
522 
523  Y1 = av_clip_uint8(Y1);
524  Y2 = av_clip_uint8(Y2);
525  U = av_clip_uint8(U);
526  V = av_clip_uint8(V);
527 
528  output_pixels(i * 4, Y1, U, Y2, V);
529  }
530 }
531 
532 static av_always_inline void
533 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
534  const int16_t *ubuf[2], const int16_t *vbuf[2],
535  const int16_t *abuf0, uint8_t *dest, int dstW,
536  int uvalpha, int y, enum AVPixelFormat target)
537 {
538  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
539  int i;
540 
541  if (uvalpha < 2048) {
542  for (i = 0; i < ((dstW + 1) >> 1); i++) {
543  int Y1 = buf0[i * 2] >> 7;
544  int Y2 = buf0[i * 2 + 1] >> 7;
545  int U = ubuf0[i] >> 7;
546  int V = vbuf0[i] >> 7;
547 
548  Y1 = av_clip_uint8(Y1);
549  Y2 = av_clip_uint8(Y2);
550  U = av_clip_uint8(U);
551  V = av_clip_uint8(V);
552 
553  output_pixels(i * 4, Y1, U, Y2, V);
554  }
555  } else {
556  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
557  for (i = 0; i < ((dstW + 1) >> 1); i++) {
558  int Y1 = buf0[i * 2] >> 7;
559  int Y2 = buf0[i * 2 + 1] >> 7;
560  int U = (ubuf0[i] + ubuf1[i]) >> 8;
561  int V = (vbuf0[i] + vbuf1[i]) >> 8;
562 
563  Y1 = av_clip_uint8(Y1);
564  Y2 = av_clip_uint8(Y2);
565  U = av_clip_uint8(U);
566  V = av_clip_uint8(V);
567 
568  output_pixels(i * 4, Y1, U, Y2, V);
569  }
570  }
571 }
572 
573 #undef output_pixels
574 
575 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
576 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
577 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
578 
579 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? R : B)
580 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? B : R)
581 #define output_pixel(pos, val) \
582  if (isBE(target)) { \
583  AV_WB16(pos, val); \
584  } else { \
585  AV_WL16(pos, val); \
586  }
587 
588 static av_always_inline void
589 yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter,
590  const int32_t **lumSrc, int lumFilterSize,
591  const int16_t *chrFilter, const int32_t **chrUSrc,
592  const int32_t **chrVSrc, int chrFilterSize,
593  const int32_t **alpSrc, uint16_t *dest, int dstW,
594  int y, enum AVPixelFormat target)
595 {
596  int i;
597 
598  for (i = 0; i < ((dstW + 1) >> 1); i++) {
599  int j;
600  int Y1 = -0x40000000;
601  int Y2 = -0x40000000;
602  int U = -128 << 23; // 19
603  int V = -128 << 23;
604  int R, G, B;
605 
606  for (j = 0; j < lumFilterSize; j++) {
607  Y1 += lumSrc[j][i * 2] * lumFilter[j];
608  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
609  }
610  for (j = 0; j < chrFilterSize; j++) {
611  U += chrUSrc[j][i] * chrFilter[j];
612  V += chrVSrc[j][i] * chrFilter[j];
613  }
614 
615  // 8 bits: 12+15=27; 16 bits: 12+19=31
616  Y1 >>= 14; // 10
617  Y1 += 0x10000;
618  Y2 >>= 14;
619  Y2 += 0x10000;
620  U >>= 14;
621  V >>= 14;
622 
623  // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
624  Y1 -= c->yuv2rgb_y_offset;
625  Y2 -= c->yuv2rgb_y_offset;
626  Y1 *= c->yuv2rgb_y_coeff;
627  Y2 *= c->yuv2rgb_y_coeff;
628  Y1 += 1 << 13; // 21
629  Y2 += 1 << 13;
630  // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
631 
632  R = V * c->yuv2rgb_v2r_coeff;
633  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
634  B = U * c->yuv2rgb_u2b_coeff;
635 
636  // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
637  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
638  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
639  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
640  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
641  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
642  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
643  dest += 6;
644  }
645 }
646 
647 static av_always_inline void
649  const int32_t *ubuf[2], const int32_t *vbuf[2],
650  const int32_t *abuf[2], uint16_t *dest, int dstW,
651  int yalpha, int uvalpha, int y,
652  enum AVPixelFormat target)
653 {
654  const int32_t *buf0 = buf[0], *buf1 = buf[1],
655  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
656  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
657  int yalpha1 = 4096 - yalpha;
658  int uvalpha1 = 4096 - uvalpha;
659  int i;
660 
661  for (i = 0; i < ((dstW + 1) >> 1); i++) {
662  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
663  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
664  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
665  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14;
666  int R, G, B;
667 
668  Y1 -= c->yuv2rgb_y_offset;
669  Y2 -= c->yuv2rgb_y_offset;
670  Y1 *= c->yuv2rgb_y_coeff;
671  Y2 *= c->yuv2rgb_y_coeff;
672  Y1 += 1 << 13;
673  Y2 += 1 << 13;
674 
675  R = V * c->yuv2rgb_v2r_coeff;
676  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
677  B = U * c->yuv2rgb_u2b_coeff;
678 
679  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
680  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
681  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
682  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
683  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
684  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
685  dest += 6;
686  }
687 }
688 
689 static av_always_inline void
691  const int32_t *ubuf[2], const int32_t *vbuf[2],
692  const int32_t *abuf0, uint16_t *dest, int dstW,
693  int uvalpha, int y, enum AVPixelFormat target)
694 {
695  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
696  int i;
697 
698  if (uvalpha < 2048) {
699  for (i = 0; i < ((dstW + 1) >> 1); i++) {
700  int Y1 = (buf0[i * 2] ) >> 2;
701  int Y2 = (buf0[i * 2 + 1]) >> 2;
702  int U = (ubuf0[i] + (-128 << 11)) >> 2;
703  int V = (vbuf0[i] + (-128 << 11)) >> 2;
704  int R, G, B;
705 
706  Y1 -= c->yuv2rgb_y_offset;
707  Y2 -= c->yuv2rgb_y_offset;
708  Y1 *= c->yuv2rgb_y_coeff;
709  Y2 *= c->yuv2rgb_y_coeff;
710  Y1 += 1 << 13;
711  Y2 += 1 << 13;
712 
713  R = V * c->yuv2rgb_v2r_coeff;
714  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
715  B = U * c->yuv2rgb_u2b_coeff;
716 
717  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
718  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
719  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
720  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
721  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
722  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
723  dest += 6;
724  }
725  } else {
726  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
727  for (i = 0; i < ((dstW + 1) >> 1); i++) {
728  int Y1 = (buf0[i * 2] ) >> 2;
729  int Y2 = (buf0[i * 2 + 1]) >> 2;
730  int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
731  int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3;
732  int R, G, B;
733 
734  Y1 -= c->yuv2rgb_y_offset;
735  Y2 -= c->yuv2rgb_y_offset;
736  Y1 *= c->yuv2rgb_y_coeff;
737  Y2 *= c->yuv2rgb_y_coeff;
738  Y1 += 1 << 13;
739  Y2 += 1 << 13;
740 
741  R = V * c->yuv2rgb_v2r_coeff;
742  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
743  B = U * c->yuv2rgb_u2b_coeff;
744 
745  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
746  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
747  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
748  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
749  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
750  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
751  dest += 6;
752  }
753  }
754 }
755 
756 #undef output_pixel
757 #undef r_b
758 #undef b_r
759 
760 #define YUV2PACKED16WRAPPER(name, base, ext, fmt) \
761 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
762  const int16_t **_lumSrc, int lumFilterSize, \
763  const int16_t *chrFilter, const int16_t **_chrUSrc, \
764  const int16_t **_chrVSrc, int chrFilterSize, \
765  const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
766  int y) \
767 { \
768  const int32_t **lumSrc = (const int32_t **) _lumSrc, \
769  **chrUSrc = (const int32_t **) _chrUSrc, \
770  **chrVSrc = (const int32_t **) _chrVSrc, \
771  **alpSrc = (const int32_t **) _alpSrc; \
772  uint16_t *dest = (uint16_t *) _dest; \
773  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
774  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
775  alpSrc, dest, dstW, y, fmt); \
776 } \
777  \
778 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
779  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
780  const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
781  int yalpha, int uvalpha, int y) \
782 { \
783  const int32_t **buf = (const int32_t **) _buf, \
784  **ubuf = (const int32_t **) _ubuf, \
785  **vbuf = (const int32_t **) _vbuf, \
786  **abuf = (const int32_t **) _abuf; \
787  uint16_t *dest = (uint16_t *) _dest; \
788  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
789  dest, dstW, yalpha, uvalpha, y, fmt); \
790 } \
791  \
792 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
793  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
794  const int16_t *_abuf0, uint8_t *_dest, int dstW, \
795  int uvalpha, int y) \
796 { \
797  const int32_t *buf0 = (const int32_t *) _buf0, \
798  **ubuf = (const int32_t **) _ubuf, \
799  **vbuf = (const int32_t **) _vbuf, \
800  *abuf0 = (const int32_t *) _abuf0; \
801  uint16_t *dest = (uint16_t *) _dest; \
802  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
803  dstW, uvalpha, y, fmt); \
804 }
805 
806 YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48be, AV_PIX_FMT_RGB48BE)
807 YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48le, AV_PIX_FMT_RGB48LE)
808 YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48be, AV_PIX_FMT_BGR48BE)
809 YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48le, AV_PIX_FMT_BGR48LE)
810 
811 /*
812  * Write out 2 RGB pixels in the target pixel format. This function takes a
813  * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
814  * things like endianness conversion and shifting. The caller takes care of
815  * setting the correct offset in these tables from the chroma (U/V) values.
816  * This function then uses the luminance (Y1/Y2) values to write out the
817  * correct RGB values into the destination buffer.
818  */
819 static av_always_inline void
820 yuv2rgb_write(uint8_t *_dest, int i, unsigned Y1, unsigned Y2,
821  unsigned A1, unsigned A2,
822  const void *_r, const void *_g, const void *_b, int y,
823  enum AVPixelFormat target, int hasAlpha)
824 {
825  if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
826  target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
827  uint32_t *dest = (uint32_t *) _dest;
828  const uint32_t *r = (const uint32_t *) _r;
829  const uint32_t *g = (const uint32_t *) _g;
830  const uint32_t *b = (const uint32_t *) _b;
831 
832 #if CONFIG_SMALL
833  int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
834 
835  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
836  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
837 #else
838  if (hasAlpha) {
839  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
840 
841  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
842  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
843  } else {
844  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
845  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
846  }
847 #endif
848  } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
849  uint8_t *dest = (uint8_t *) _dest;
850  const uint8_t *r = (const uint8_t *) _r;
851  const uint8_t *g = (const uint8_t *) _g;
852  const uint8_t *b = (const uint8_t *) _b;
853 
854 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
855 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
856  dest[i * 6 + 0] = r_b[Y1];
857  dest[i * 6 + 1] = g[Y1];
858  dest[i * 6 + 2] = b_r[Y1];
859  dest[i * 6 + 3] = r_b[Y2];
860  dest[i * 6 + 4] = g[Y2];
861  dest[i * 6 + 5] = b_r[Y2];
862 #undef r_b
863 #undef b_r
864  } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
865  target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
866  target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
867  uint16_t *dest = (uint16_t *) _dest;
868  const uint16_t *r = (const uint16_t *) _r;
869  const uint16_t *g = (const uint16_t *) _g;
870  const uint16_t *b = (const uint16_t *) _b;
871  int dr1, dg1, db1, dr2, dg2, db2;
872 
873  if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
874  dr1 = dither_2x2_8[ y & 1 ][0];
875  dg1 = dither_2x2_4[ y & 1 ][0];
876  db1 = dither_2x2_8[(y & 1) ^ 1][0];
877  dr2 = dither_2x2_8[ y & 1 ][1];
878  dg2 = dither_2x2_4[ y & 1 ][1];
879  db2 = dither_2x2_8[(y & 1) ^ 1][1];
880  } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
881  dr1 = dither_2x2_8[ y & 1 ][0];
882  dg1 = dither_2x2_8[ y & 1 ][1];
883  db1 = dither_2x2_8[(y & 1) ^ 1][0];
884  dr2 = dither_2x2_8[ y & 1 ][1];
885  dg2 = dither_2x2_8[ y & 1 ][0];
886  db2 = dither_2x2_8[(y & 1) ^ 1][1];
887  } else {
888  dr1 = ff_dither_4x4_16[ y & 3 ][0];
889  dg1 = ff_dither_4x4_16[ y & 3 ][1];
890  db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
891  dr2 = ff_dither_4x4_16[ y & 3 ][1];
892  dg2 = ff_dither_4x4_16[ y & 3 ][0];
893  db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
894  }
895 
896  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
897  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
898  } else /* 8/4 bits */ {
899  uint8_t *dest = (uint8_t *) _dest;
900  const uint8_t *r = (const uint8_t *) _r;
901  const uint8_t *g = (const uint8_t *) _g;
902  const uint8_t *b = (const uint8_t *) _b;
903  int dr1, dg1, db1, dr2, dg2, db2;
904 
905  if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
906  const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
907  const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
908  dr1 = dg1 = d32[(i * 2 + 0) & 7];
909  db1 = d64[(i * 2 + 0) & 7];
910  dr2 = dg2 = d32[(i * 2 + 1) & 7];
911  db2 = d64[(i * 2 + 1) & 7];
912  } else {
913  const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
914  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
915  dr1 = db1 = d128[(i * 2 + 0) & 7];
916  dg1 = d64[(i * 2 + 0) & 7];
917  dr2 = db2 = d128[(i * 2 + 1) & 7];
918  dg2 = d64[(i * 2 + 1) & 7];
919  }
920 
921  if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
922  dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
923  ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
924  } else {
925  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
926  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
927  }
928  }
929 }
930 
931 static av_always_inline void
932 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
933  const int16_t **lumSrc, int lumFilterSize,
934  const int16_t *chrFilter, const int16_t **chrUSrc,
935  const int16_t **chrVSrc, int chrFilterSize,
936  const int16_t **alpSrc, uint8_t *dest, int dstW,
937  int y, enum AVPixelFormat target, int hasAlpha)
938 {
939  int i;
940 
941  for (i = 0; i < ((dstW + 1) >> 1); i++) {
942  int j, A1, A2;
943  int Y1 = 1 << 18;
944  int Y2 = 1 << 18;
945  int U = 1 << 18;
946  int V = 1 << 18;
947  const void *r, *g, *b;
948 
949  for (j = 0; j < lumFilterSize; j++) {
950  Y1 += lumSrc[j][i * 2] * lumFilter[j];
951  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
952  }
953  for (j = 0; j < chrFilterSize; j++) {
954  U += chrUSrc[j][i] * chrFilter[j];
955  V += chrVSrc[j][i] * chrFilter[j];
956  }
957  Y1 >>= 19;
958  Y2 >>= 19;
959  U >>= 19;
960  V >>= 19;
961  if ((Y1 | Y2 | U | V) & 0x100) {
962  Y1 = av_clip_uint8(Y1);
963  Y2 = av_clip_uint8(Y2);
964  U = av_clip_uint8(U);
965  V = av_clip_uint8(V);
966  }
967  if (hasAlpha) {
968  A1 = 1 << 18;
969  A2 = 1 << 18;
970  for (j = 0; j < lumFilterSize; j++) {
971  A1 += alpSrc[j][i * 2 ] * lumFilter[j];
972  A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
973  }
974  A1 >>= 19;
975  A2 >>= 19;
976  if ((A1 | A2) & 0x100) {
977  A1 = av_clip_uint8(A1);
978  A2 = av_clip_uint8(A2);
979  }
980  }
981 
982  /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/
983  r = c->table_rV[V];
984  g = (c->table_gU[U] + c->table_gV[V]);
985  b = c->table_bU[U];
986 
987  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
988  r, g, b, y, target, hasAlpha);
989  }
990 }
991 
992 static av_always_inline void
993 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
994  const int16_t *ubuf[2], const int16_t *vbuf[2],
995  const int16_t *abuf[2], uint8_t *dest, int dstW,
996  int yalpha, int uvalpha, int y,
997  enum AVPixelFormat target, int hasAlpha)
998 {
999  const int16_t *buf0 = buf[0], *buf1 = buf[1],
1000  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1001  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1002  *abuf0 = hasAlpha ? abuf[0] : NULL,
1003  *abuf1 = hasAlpha ? abuf[1] : NULL;
1004  int yalpha1 = 4096 - yalpha;
1005  int uvalpha1 = 4096 - uvalpha;
1006  int i;
1007 
1008  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1009  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1010  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1011  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1012  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1013  int A1, A2;
1014  const void *r, *g, *b;
1015 
1016  Y1 = av_clip_uint8(Y1);
1017  Y2 = av_clip_uint8(Y2);
1018  U = av_clip_uint8(U);
1019  V = av_clip_uint8(V);
1020 
1021  r = c->table_rV[V];
1022  g = (c->table_gU[U] + c->table_gV[V]);
1023  b = c->table_bU[U];
1024 
1025  if (hasAlpha) {
1026  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1027  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1028  A1 = av_clip_uint8(A1);
1029  A2 = av_clip_uint8(A2);
1030  }
1031 
1032  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1033  r, g, b, y, target, hasAlpha);
1034  }
1035 }
1036 
1037 static av_always_inline void
1038 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1039  const int16_t *ubuf[2], const int16_t *vbuf[2],
1040  const int16_t *abuf0, uint8_t *dest, int dstW,
1041  int uvalpha, int y, enum AVPixelFormat target,
1042  int hasAlpha)
1043 {
1044  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1045  int i;
1046 
1047  if (uvalpha < 2048) {
1048  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1049  int Y1 = buf0[i * 2] >> 7;
1050  int Y2 = buf0[i * 2 + 1] >> 7;
1051  int U = ubuf0[i] >> 7;
1052  int V = vbuf0[i] >> 7;
1053  int A1, A2;
1054  const void *r, *g, *b;
1055 
1056  Y1 = av_clip_uint8(Y1);
1057  Y2 = av_clip_uint8(Y2);
1058  U = av_clip_uint8(U);
1059  V = av_clip_uint8(V);
1060 
1061  r = c->table_rV[V];
1062  g = (c->table_gU[U] + c->table_gV[V]);
1063  b = c->table_bU[U];
1064 
1065  if (hasAlpha) {
1066  A1 = abuf0[i * 2 ] >> 7;
1067  A2 = abuf0[i * 2 + 1] >> 7;
1068  A1 = av_clip_uint8(A1);
1069  A2 = av_clip_uint8(A2);
1070  }
1071 
1072  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1073  r, g, b, y, target, hasAlpha);
1074  }
1075  } else {
1076  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1077  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1078  int Y1 = buf0[i * 2] >> 7;
1079  int Y2 = buf0[i * 2 + 1] >> 7;
1080  int U = (ubuf0[i] + ubuf1[i]) >> 8;
1081  int V = (vbuf0[i] + vbuf1[i]) >> 8;
1082  int A1, A2;
1083  const void *r, *g, *b;
1084 
1085  Y1 = av_clip_uint8(Y1);
1086  Y2 = av_clip_uint8(Y2);
1087  U = av_clip_uint8(U);
1088  V = av_clip_uint8(V);
1089 
1090  r = c->table_rV[V];
1091  g = (c->table_gU[U] + c->table_gV[V]);
1092  b = c->table_bU[U];
1093 
1094  if (hasAlpha) {
1095  A1 = abuf0[i * 2 ] >> 7;
1096  A2 = abuf0[i * 2 + 1] >> 7;
1097  A1 = av_clip_uint8(A1);
1098  A2 = av_clip_uint8(A2);
1099  }
1100 
1101  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1102  r, g, b, y, target, hasAlpha);
1103  }
1104  }
1105 }
1106 
1107 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1108 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1109  const int16_t **lumSrc, int lumFilterSize, \
1110  const int16_t *chrFilter, const int16_t **chrUSrc, \
1111  const int16_t **chrVSrc, int chrFilterSize, \
1112  const int16_t **alpSrc, uint8_t *dest, int dstW, \
1113  int y) \
1114 { \
1115  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1116  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1117  alpSrc, dest, dstW, y, fmt, hasAlpha); \
1118 }
1119 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1120 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1121 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1122  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1123  const int16_t *abuf[2], uint8_t *dest, int dstW, \
1124  int yalpha, int uvalpha, int y) \
1125 { \
1126  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1127  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1128 } \
1129  \
1130 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1131  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1132  const int16_t *abuf0, uint8_t *dest, int dstW, \
1133  int uvalpha, int y) \
1134 { \
1135  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1136  dstW, uvalpha, y, fmt, hasAlpha); \
1137 }
1138 
1139 #if CONFIG_SMALL
1142 #else
1143 #if CONFIG_SWSCALE_ALPHA
1144 YUV2RGBWRAPPER(yuv2rgb,, a32_1, AV_PIX_FMT_RGB32_1, 1)
1145 YUV2RGBWRAPPER(yuv2rgb,, a32, AV_PIX_FMT_RGB32, 1)
1146 #endif
1147 YUV2RGBWRAPPER(yuv2rgb,, x32_1, AV_PIX_FMT_RGB32_1, 0)
1148 YUV2RGBWRAPPER(yuv2rgb,, x32, AV_PIX_FMT_RGB32, 0)
1149 #endif
1150 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1151 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1152 YUV2RGBWRAPPER(yuv2rgb,, 16, AV_PIX_FMT_RGB565, 0)
1153 YUV2RGBWRAPPER(yuv2rgb,, 15, AV_PIX_FMT_RGB555, 0)
1154 YUV2RGBWRAPPER(yuv2rgb,, 12, AV_PIX_FMT_RGB444, 0)
1155 YUV2RGBWRAPPER(yuv2rgb,, 8, AV_PIX_FMT_RGB8, 0)
1156 YUV2RGBWRAPPER(yuv2rgb,, 4, AV_PIX_FMT_RGB4, 0)
1157 YUV2RGBWRAPPER(yuv2rgb,, 4b, AV_PIX_FMT_RGB4_BYTE, 0)
1158 
1159 static av_always_inline void
1160 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1161  const int16_t **lumSrc, int lumFilterSize,
1162  const int16_t *chrFilter, const int16_t **chrUSrc,
1163  const int16_t **chrVSrc, int chrFilterSize,
1164  const int16_t **alpSrc, uint8_t *dest,
1165  int dstW, int y, enum AVPixelFormat target, int hasAlpha)
1166 {
1167  int i;
1168  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1169 
1170  for (i = 0; i < dstW; i++) {
1171  int j;
1172  int Y = 0;
1173  int U = -128 << 19;
1174  int V = -128 << 19;
1175  int R, G, B, A;
1176 
1177  for (j = 0; j < lumFilterSize; j++) {
1178  Y += lumSrc[j][i] * lumFilter[j];
1179  }
1180  for (j = 0; j < chrFilterSize; j++) {
1181  U += chrUSrc[j][i] * chrFilter[j];
1182  V += chrVSrc[j][i] * chrFilter[j];
1183  }
1184  Y >>= 10;
1185  U >>= 10;
1186  V >>= 10;
1187  if (hasAlpha) {
1188  A = 1 << 21;
1189  for (j = 0; j < lumFilterSize; j++) {
1190  A += alpSrc[j][i] * lumFilter[j];
1191  }
1192  A >>= 19;
1193  if (A & 0x100)
1194  A = av_clip_uint8(A);
1195  }
1196  Y -= c->yuv2rgb_y_offset;
1197  Y *= c->yuv2rgb_y_coeff;
1198  Y += 1 << 21;
1199  R = Y + V*c->yuv2rgb_v2r_coeff;
1200  G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1201  B = Y + U*c->yuv2rgb_u2b_coeff;
1202  if ((R | G | B) & 0xC0000000) {
1203  R = av_clip_uintp2(R, 30);
1204  G = av_clip_uintp2(G, 30);
1205  B = av_clip_uintp2(B, 30);
1206  }
1207 
1208  switch(target) {
1209  case AV_PIX_FMT_ARGB:
1210  dest[0] = hasAlpha ? A : 255;
1211  dest[1] = R >> 22;
1212  dest[2] = G >> 22;
1213  dest[3] = B >> 22;
1214  break;
1215  case AV_PIX_FMT_RGB24:
1216  dest[0] = R >> 22;
1217  dest[1] = G >> 22;
1218  dest[2] = B >> 22;
1219  break;
1220  case AV_PIX_FMT_RGBA:
1221  dest[0] = R >> 22;
1222  dest[1] = G >> 22;
1223  dest[2] = B >> 22;
1224  dest[3] = hasAlpha ? A : 255;
1225  break;
1226  case AV_PIX_FMT_ABGR:
1227  dest[0] = hasAlpha ? A : 255;
1228  dest[1] = B >> 22;
1229  dest[2] = G >> 22;
1230  dest[3] = R >> 22;
1231  dest += 4;
1232  break;
1233  case AV_PIX_FMT_BGR24:
1234  dest[0] = B >> 22;
1235  dest[1] = G >> 22;
1236  dest[2] = R >> 22;
1237  break;
1238  case AV_PIX_FMT_BGRA:
1239  dest[0] = B >> 22;
1240  dest[1] = G >> 22;
1241  dest[2] = R >> 22;
1242  dest[3] = hasAlpha ? A : 255;
1243  break;
1244  }
1245  dest += step;
1246  }
1247 }
1248 
1249 #if CONFIG_SMALL
1250 YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1251 YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1252 YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1253 YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1254 #else
1255 #if CONFIG_SWSCALE_ALPHA
1256 YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
1257 YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
1258 YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
1259 YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
1260 #endif
1261 YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
1262 YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
1263 YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
1264 YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
1265 #endif
1266 YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
1267 YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
1268 
1269 static void
1270 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
1271  const int16_t **lumSrc, int lumFilterSize,
1272  const int16_t *chrFilter, const int16_t **chrUSrc,
1273  const int16_t **chrVSrc, int chrFilterSize,
1274  const int16_t **alpSrc, uint8_t **dest,
1275  int dstW, int y)
1276 {
1278  int i;
1279  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
1280  uint16_t **dest16 = (uint16_t**)dest;
1281  int SH = 22 + 8 - desc->comp[0].depth;
1282 
1283  for (i = 0; i < dstW; i++) {
1284  int j;
1285  int Y = 1 << 9;
1286  int U = (1 << 9) - (128 << 19);
1287  int V = (1 << 9) - (128 << 19);
1288  int R, G, B, A;
1289 
1290  for (j = 0; j < lumFilterSize; j++)
1291  Y += lumSrc[j][i] * lumFilter[j];
1292 
1293  for (j = 0; j < chrFilterSize; j++) {
1294  U += chrUSrc[j][i] * chrFilter[j];
1295  V += chrVSrc[j][i] * chrFilter[j];
1296  }
1297 
1298  Y >>= 10;
1299  U >>= 10;
1300  V >>= 10;
1301 
1302  if (hasAlpha) {
1303  A = 1 << 18;
1304 
1305  for (j = 0; j < lumFilterSize; j++)
1306  A += alpSrc[j][i] * lumFilter[j];
1307 
1308  A >>= 19;
1309 
1310  if (A & 0x100)
1311  A = av_clip_uint8(A);
1312  }
1313 
1314  Y -= c->yuv2rgb_y_offset;
1315  Y *= c->yuv2rgb_y_coeff;
1316  Y += 1 << 21;
1317  R = Y + V * c->yuv2rgb_v2r_coeff;
1318  G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1319  B = Y + U * c->yuv2rgb_u2b_coeff;
1320 
1321  if ((R | G | B) & 0xC0000000) {
1322  R = av_clip_uintp2(R, 30);
1323  G = av_clip_uintp2(G, 30);
1324  B = av_clip_uintp2(B, 30);
1325  }
1326 
1327  if (SH != 22) {
1328  dest16[0][i] = G >> SH;
1329  dest16[1][i] = B >> SH;
1330  dest16[2][i] = R >> SH;
1331  if (hasAlpha)
1332  dest16[3][i] = A;
1333  } else {
1334  dest[0][i] = G >> 22;
1335  dest[1][i] = B >> 22;
1336  dest[2][i] = R >> 22;
1337  if (hasAlpha)
1338  dest[3][i] = A;
1339  }
1340  }
1341  if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
1342  for (i = 0; i < dstW; i++) {
1343  dest16[0][i] = av_bswap16(dest16[0][i]);
1344  dest16[1][i] = av_bswap16(dest16[1][i]);
1345  dest16[2][i] = av_bswap16(dest16[2][i]);
1346  if (hasAlpha)
1347  dest16[3][i] = av_bswap16(dest16[3][i]);
1348  }
1349  }
1350 }
1351 
1360 {
1361  enum AVPixelFormat dstFormat = c->dstFormat;
1362  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
1363 
1364  if (is16BPS(dstFormat)) {
1365  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
1366  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
1367  } else if (is9_OR_10BPS(dstFormat)) {
1368  if (desc->comp[0].depth == 9) {
1369  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
1370  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
1371  } else {
1372  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
1373  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
1374  }
1375  } else {
1376  *yuv2plane1 = yuv2plane1_8_c;
1377  *yuv2planeX = yuv2planeX_8_c;
1378  if (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21)
1379  *yuv2nv12cX = yuv2nv12cX_c;
1380  }
1381 
1382  if(c->flags & SWS_FULL_CHR_H_INT) {
1383  switch (dstFormat) {
1384  case AV_PIX_FMT_RGBA:
1385 #if CONFIG_SMALL
1386  *yuv2packedX = yuv2rgba32_full_X_c;
1387 #else
1388 #if CONFIG_SWSCALE_ALPHA
1389  if (c->alpPixBuf) {
1390  *yuv2packedX = yuv2rgba32_full_X_c;
1391  } else
1392 #endif /* CONFIG_SWSCALE_ALPHA */
1393  {
1394  *yuv2packedX = yuv2rgbx32_full_X_c;
1395  }
1396 #endif /* !CONFIG_SMALL */
1397  break;
1398  case AV_PIX_FMT_ARGB:
1399 #if CONFIG_SMALL
1400  *yuv2packedX = yuv2argb32_full_X_c;
1401 #else
1402 #if CONFIG_SWSCALE_ALPHA
1403  if (c->alpPixBuf) {
1404  *yuv2packedX = yuv2argb32_full_X_c;
1405  } else
1406 #endif /* CONFIG_SWSCALE_ALPHA */
1407  {
1408  *yuv2packedX = yuv2xrgb32_full_X_c;
1409  }
1410 #endif /* !CONFIG_SMALL */
1411  break;
1412  case AV_PIX_FMT_BGRA:
1413 #if CONFIG_SMALL
1414  *yuv2packedX = yuv2bgra32_full_X_c;
1415 #else
1416 #if CONFIG_SWSCALE_ALPHA
1417  if (c->alpPixBuf) {
1418  *yuv2packedX = yuv2bgra32_full_X_c;
1419  } else
1420 #endif /* CONFIG_SWSCALE_ALPHA */
1421  {
1422  *yuv2packedX = yuv2bgrx32_full_X_c;
1423  }
1424 #endif /* !CONFIG_SMALL */
1425  break;
1426  case AV_PIX_FMT_ABGR:
1427 #if CONFIG_SMALL
1428  *yuv2packedX = yuv2abgr32_full_X_c;
1429 #else
1430 #if CONFIG_SWSCALE_ALPHA
1431  if (c->alpPixBuf) {
1432  *yuv2packedX = yuv2abgr32_full_X_c;
1433  } else
1434 #endif /* CONFIG_SWSCALE_ALPHA */
1435  {
1436  *yuv2packedX = yuv2xbgr32_full_X_c;
1437  }
1438 #endif /* !CONFIG_SMALL */
1439  break;
1440  case AV_PIX_FMT_RGB24:
1441  *yuv2packedX = yuv2rgb24_full_X_c;
1442  break;
1443  case AV_PIX_FMT_BGR24:
1444  *yuv2packedX = yuv2bgr24_full_X_c;
1445  break;
1446  case AV_PIX_FMT_GBRP:
1447  case AV_PIX_FMT_GBRP9BE:
1448  case AV_PIX_FMT_GBRP9LE:
1449  case AV_PIX_FMT_GBRP10BE:
1450  case AV_PIX_FMT_GBRP10LE:
1451  case AV_PIX_FMT_GBRP16BE:
1452  case AV_PIX_FMT_GBRP16LE:
1453  case AV_PIX_FMT_GBRAP:
1454  *yuv2anyX = yuv2gbrp_full_X_c;
1455  break;
1456  }
1457  } else {
1458  switch (dstFormat) {
1459  case AV_PIX_FMT_RGB48LE:
1460  *yuv2packed1 = yuv2rgb48le_1_c;
1461  *yuv2packed2 = yuv2rgb48le_2_c;
1462  *yuv2packedX = yuv2rgb48le_X_c;
1463  break;
1464  case AV_PIX_FMT_RGB48BE:
1465  *yuv2packed1 = yuv2rgb48be_1_c;
1466  *yuv2packed2 = yuv2rgb48be_2_c;
1467  *yuv2packedX = yuv2rgb48be_X_c;
1468  break;
1469  case AV_PIX_FMT_BGR48LE:
1470  *yuv2packed1 = yuv2bgr48le_1_c;
1471  *yuv2packed2 = yuv2bgr48le_2_c;
1472  *yuv2packedX = yuv2bgr48le_X_c;
1473  break;
1474  case AV_PIX_FMT_BGR48BE:
1475  *yuv2packed1 = yuv2bgr48be_1_c;
1476  *yuv2packed2 = yuv2bgr48be_2_c;
1477  *yuv2packedX = yuv2bgr48be_X_c;
1478  break;
1479  case AV_PIX_FMT_RGB32:
1480  case AV_PIX_FMT_BGR32:
1481 #if CONFIG_SMALL
1482  *yuv2packed1 = yuv2rgb32_1_c;
1483  *yuv2packed2 = yuv2rgb32_2_c;
1484  *yuv2packedX = yuv2rgb32_X_c;
1485 #else
1486 #if CONFIG_SWSCALE_ALPHA
1487  if (c->alpPixBuf) {
1488  *yuv2packed1 = yuv2rgba32_1_c;
1489  *yuv2packed2 = yuv2rgba32_2_c;
1490  *yuv2packedX = yuv2rgba32_X_c;
1491  } else
1492 #endif /* CONFIG_SWSCALE_ALPHA */
1493  {
1494  *yuv2packed1 = yuv2rgbx32_1_c;
1495  *yuv2packed2 = yuv2rgbx32_2_c;
1496  *yuv2packedX = yuv2rgbx32_X_c;
1497  }
1498 #endif /* !CONFIG_SMALL */
1499  break;
1500  case AV_PIX_FMT_RGB32_1:
1501  case AV_PIX_FMT_BGR32_1:
1502 #if CONFIG_SMALL
1503  *yuv2packed1 = yuv2rgb32_1_1_c;
1504  *yuv2packed2 = yuv2rgb32_1_2_c;
1505  *yuv2packedX = yuv2rgb32_1_X_c;
1506 #else
1507 #if CONFIG_SWSCALE_ALPHA
1508  if (c->alpPixBuf) {
1509  *yuv2packed1 = yuv2rgba32_1_1_c;
1510  *yuv2packed2 = yuv2rgba32_1_2_c;
1511  *yuv2packedX = yuv2rgba32_1_X_c;
1512  } else
1513 #endif /* CONFIG_SWSCALE_ALPHA */
1514  {
1515  *yuv2packed1 = yuv2rgbx32_1_1_c;
1516  *yuv2packed2 = yuv2rgbx32_1_2_c;
1517  *yuv2packedX = yuv2rgbx32_1_X_c;
1518  }
1519 #endif /* !CONFIG_SMALL */
1520  break;
1521  case AV_PIX_FMT_RGB24:
1522  *yuv2packed1 = yuv2rgb24_1_c;
1523  *yuv2packed2 = yuv2rgb24_2_c;
1524  *yuv2packedX = yuv2rgb24_X_c;
1525  break;
1526  case AV_PIX_FMT_BGR24:
1527  *yuv2packed1 = yuv2bgr24_1_c;
1528  *yuv2packed2 = yuv2bgr24_2_c;
1529  *yuv2packedX = yuv2bgr24_X_c;
1530  break;
1531  case AV_PIX_FMT_RGB565LE:
1532  case AV_PIX_FMT_RGB565BE:
1533  case AV_PIX_FMT_BGR565LE:
1534  case AV_PIX_FMT_BGR565BE:
1535  *yuv2packed1 = yuv2rgb16_1_c;
1536  *yuv2packed2 = yuv2rgb16_2_c;
1537  *yuv2packedX = yuv2rgb16_X_c;
1538  break;
1539  case AV_PIX_FMT_RGB555LE:
1540  case AV_PIX_FMT_RGB555BE:
1541  case AV_PIX_FMT_BGR555LE:
1542  case AV_PIX_FMT_BGR555BE:
1543  *yuv2packed1 = yuv2rgb15_1_c;
1544  *yuv2packed2 = yuv2rgb15_2_c;
1545  *yuv2packedX = yuv2rgb15_X_c;
1546  break;
1547  case AV_PIX_FMT_RGB444LE:
1548  case AV_PIX_FMT_RGB444BE:
1549  case AV_PIX_FMT_BGR444LE:
1550  case AV_PIX_FMT_BGR444BE:
1551  *yuv2packed1 = yuv2rgb12_1_c;
1552  *yuv2packed2 = yuv2rgb12_2_c;
1553  *yuv2packedX = yuv2rgb12_X_c;
1554  break;
1555  case AV_PIX_FMT_RGB8:
1556  case AV_PIX_FMT_BGR8:
1557  *yuv2packed1 = yuv2rgb8_1_c;
1558  *yuv2packed2 = yuv2rgb8_2_c;
1559  *yuv2packedX = yuv2rgb8_X_c;
1560  break;
1561  case AV_PIX_FMT_RGB4:
1562  case AV_PIX_FMT_BGR4:
1563  *yuv2packed1 = yuv2rgb4_1_c;
1564  *yuv2packed2 = yuv2rgb4_2_c;
1565  *yuv2packedX = yuv2rgb4_X_c;
1566  break;
1567  case AV_PIX_FMT_RGB4_BYTE:
1568  case AV_PIX_FMT_BGR4_BYTE:
1569  *yuv2packed1 = yuv2rgb4b_1_c;
1570  *yuv2packed2 = yuv2rgb4b_2_c;
1571  *yuv2packedX = yuv2rgb4b_X_c;
1572  break;
1573  }
1574  }
1575  switch (dstFormat) {
1576  case AV_PIX_FMT_MONOWHITE:
1577  *yuv2packed1 = yuv2monowhite_1_c;
1578  *yuv2packed2 = yuv2monowhite_2_c;
1579  *yuv2packedX = yuv2monowhite_X_c;
1580  break;
1581  case AV_PIX_FMT_MONOBLACK:
1582  *yuv2packed1 = yuv2monoblack_1_c;
1583  *yuv2packed2 = yuv2monoblack_2_c;
1584  *yuv2packedX = yuv2monoblack_X_c;
1585  break;
1586  case AV_PIX_FMT_YUYV422:
1587  *yuv2packed1 = yuv2yuyv422_1_c;
1588  *yuv2packed2 = yuv2yuyv422_2_c;
1589  *yuv2packedX = yuv2yuyv422_X_c;
1590  break;
1591  case AV_PIX_FMT_YVYU422:
1592  *yuv2packed1 = yuv2yvyu422_1_c;
1593  *yuv2packed2 = yuv2yvyu422_2_c;
1594  *yuv2packedX = yuv2yvyu422_X_c;
1595  break;
1596  case AV_PIX_FMT_UYVY422:
1597  *yuv2packed1 = yuv2uyvy422_1_c;
1598  *yuv2packed2 = yuv2uyvy422_2_c;
1599  *yuv2packedX = yuv2uyvy422_X_c;
1600  break;
1601  }
1602 }
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:78
#define G
Definition: huffyuv.h:50
int16_t ** alpPixBuf
Ring buffer for scaled horizontal alpha plane lines to be fed to the vertical scaler.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1768
Definition: vf_drawbox.c:37
#define A1
Definition: binkdsp.c:31
#define b_r
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:61
#define B_R
Definition: output.c:580
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:81
const char * desc
Definition: nvenc.c:101
#define R
Definition: huffyuv.h:51
int acc
Definition: yuv2rgb.c:482
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:276
static av_always_inline void yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:648
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:162
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 ...
Definition: pixfmt.h:112
#define av_bswap16
Definition: bswap.h:31
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:58
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
external API header
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:115
packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 ...
Definition: pixfmt.h:140
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:252
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
Definition: output.c:214
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:348
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:84
Macro definitions for various function/variable attributes.
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:110
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:82
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:119
uint8_t
#define av_cold
Definition: attributes.h:66
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:160
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:107
Definition: vf_drawbox.c:37
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1038
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Definition: output.c:1270
#define b
Definition: input.c:52
#define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha)
Definition: output.c:1107
uint8_t * table_bU[256]
packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 ...
Definition: pixfmt.h:139
#define SWS_FULL_CHR_H_INT
Definition: swscale.h:78
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:109
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:91
static const uint8_t dither_2x2_8[2][8]
Definition: output.c:43
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:167
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
external api for the swscale stuff
enum AVPixelFormat dstFormat
Destination pixel format.
#define B
Definition: huffyuv.h:49
yuv2packedX_fn yuv2packedX
#define r
Definition: input.c:51
#define src
Definition: vp8dsp.c:254
planar GBR 4:4:4 27bpp, big-endian
Definition: pixfmt.h:163
yuv2anyX_fn yuv2anyX
#define A2
Definition: binkdsp.c:32
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:245
#define r_b
const uint8_t * d64
Definition: yuv2rgb.c:450
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:92
static const uint8_t dither_2x2_4[2][8]
Definition: output.c:38
g
Definition: yuv2rgb.c:546
yuv2packed1_fn yuv2packed1
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:86
const uint8_t ff_dither_8x8_32[8][8]
Definition: output.c:55
#define output_pixel(pos, val, bias, signedness)
Definition: output.c:581
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:89
int chrDstW
Width of destination chroma planes.
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:148
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Definition: output.c:1119
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:90
static av_always_inline void yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:690
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:134
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:533
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
Definition: mpegaudioenc.c:307
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:105
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:465
as above, but U and V bytes are swapped
Definition: pixfmt.h:87
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:147
#define V
Definition: options_table.h:34
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, unsigned Y1, unsigned Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:820
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:85
#define YUV2PACKED16WRAPPER(name, base, ext, fmt)
Definition: output.c:760
static av_always_inline int is9_OR_10BPS(enum AVPixelFormat pix_fmt)
yuv2planar1_fn yuv2plane1
yuv2interleavedX_fn yuv2nv12cX
int32_t
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
Definition: pixfmt.h:201
int table_gV[256]
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:62
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1160
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
Definition: output.c:411
const uint8_t ff_dither_4x4_16[4][8]
Definition: output.c:48
int dstW
Width of destination luma/alpha planes.
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:147
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:114
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:256
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:244
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
static av_always_inline void yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:181
NULL
Definition: eval.c:55
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:80
static const uint16_t dither[8][8]
Definition: vf_gradfun.c:46
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:194
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
#define R_B
Definition: output.c:579
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:80
yuv2planarX_fn yuv2planeX
planar GBR 4:4:4 30bpp, big-endian
Definition: pixfmt.h:165
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:384
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:242
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:60
static av_always_inline void yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:589
uint8_t * table_gU[256]
static av_always_inline void yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:309
Definition: vf_drawbox.c:37
const uint8_t ff_dither_8x8_220[8][8]
Definition: output.c:78
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
byte swapping routines
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:993
static int step
Definition: avplay.c:247
packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 ...
Definition: pixfmt.h:117
#define u(width,...)
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:255
const uint8_t * chrDither8
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:252
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 ...
Definition: pixfmt.h:111
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:504
packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 ...
Definition: pixfmt.h:142
const uint8_t ff_dither_8x8_73[8][8]
Definition: output.c:66
#define output_pixels(pos, Y1, U, Y2, V)
Definition: output.c:446
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:69
yuv2packed2_fn yuv2packed2
#define CONFIG_SWSCALE_ALPHA
Definition: config.h:408
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:68
static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
Definition: output.c:262
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:208
planar GBR 4:4:4 27bpp, little-endian
Definition: pixfmt.h:164
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:106
#define AV_PIX_FMT_BGR444
Definition: pixfmt.h:257
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
const uint8_t * d128
Definition: yuv2rgb.c:481
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:83
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:251
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:1352
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:243
void(* yuv2interleavedX_fn)(struct SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:250
packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 ...
Definition: pixfmt.h:116
#define av_always_inline
Definition: attributes.h:40
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:168
const uint8_t * d32
Definition: yuv2rgb.c:449
packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 ...
Definition: pixfmt.h:141
#define HAVE_BIGENDIAN
Definition: config.h:173
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:932
int depth
Number of bits in the component.
Definition: pixdesc.h:57
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
Definition: pixfmt.h:57
uint8_t * table_rV[256]
#define accumulate_bit(acc, val)
Definition: output.c:298
planar GBR 4:4:4 30bpp, little-endian
Definition: pixfmt.h:166