103 #undef PROFILE_THE_BEAST 106 typedef unsigned char ubyte;
107 typedef signed char sbyte;
144 static const vector
unsigned char 145 perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
146 0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
147 perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
148 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
149 perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
150 0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
151 perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
152 0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
154 #define vec_merge3(x2, x1, x0, y0, y1, y2) \ 156 __typeof__(x0) o0, o2, o3; \ 157 o0 = vec_mergeh(x0, x1); \ 158 y0 = vec_perm(o0, x2, perm_rgb_0); \ 159 o2 = vec_perm(o0, x2, perm_rgb_1); \ 160 o3 = vec_mergel(x0, x1); \ 161 y1 = vec_perm(o3, o2, perm_rgb_2); \ 162 y2 = vec_perm(o3, o2, perm_rgb_3); \ 165 #define vec_mstbgr24(x0, x1, x2, ptr) \ 167 __typeof__(x0) _0, _1, _2; \ 168 vec_merge3(x0, x1, x2, _0, _1, _2); \ 169 vec_st(_0, 0, ptr++); \ 170 vec_st(_1, 0, ptr++); \ 171 vec_st(_2, 0, ptr++); \ 174 #define vec_mstrgb24(x0, x1, x2, ptr) \ 176 __typeof__(x0) _0, _1, _2; \ 177 vec_merge3(x2, x1, x0, _0, _1, _2); \ 178 vec_st(_0, 0, ptr++); \ 179 vec_st(_1, 0, ptr++); \ 180 vec_st(_2, 0, ptr++); \ 187 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr) \ 190 _0 = vec_mergeh(x0, x1); \ 191 _1 = vec_mergeh(x2, x3); \ 192 _2 = (T) vec_mergeh((vector unsigned short) _0, \ 193 (vector unsigned short) _1); \ 194 _3 = (T) vec_mergel((vector unsigned short) _0, \ 195 (vector unsigned short) _1); \ 196 vec_st(_2, 0 * 16, (T *) ptr); \ 197 vec_st(_3, 1 * 16, (T *) ptr); \ 198 _0 = vec_mergel(x0, x1); \ 199 _1 = vec_mergel(x2, x3); \ 200 _2 = (T) vec_mergeh((vector unsigned short) _0, \ 201 (vector unsigned short) _1); \ 202 _3 = (T) vec_mergel((vector unsigned short) _0, \ 203 (vector unsigned short) _1); \ 204 vec_st(_2, 2 * 16, (T *) ptr); \ 205 vec_st(_3, 3 * 16, (T *) ptr); \ 223 (vector signed short) \ 224 vec_perm(x, (__typeof__(x)) { 0 }, \ 225 ((vector unsigned char) { \ 226 0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03, \ 227 0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 })) 230 (vector signed short) \ 231 vec_perm(x, (__typeof__(x)) { 0 }, \ 232 ((vector unsigned char) { \ 233 0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B, \ 234 0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F })) 236 #define vec_clip_s16(x) \ 237 vec_max(vec_min(x, ((vector signed short) { \ 238 235, 235, 235, 235, 235, 235, 235, 235 })), \ 239 ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 })) 241 #define vec_packclp(x, y) \ 242 (vector unsigned char) \ 243 vec_packs((vector unsigned short) \ 244 vec_max(x, ((vector signed short) { 0 })), \ 245 (vector unsigned short) \ 246 vec_max(y, ((vector signed short) { 0 }))) 248 static inline void cvtyuvtoRGB(
SwsContext *c, vector
signed short Y,
249 vector
signed short U, vector
signed short V,
250 vector
signed short *
R, vector
signed short *
G,
251 vector
signed short *
B)
253 vector
signed short vx, ux, uvx;
255 Y = vec_mradds(Y, c->CY, c->OY);
256 U = vec_sub(U, (vector
signed short)
257 vec_splat((vector
signed short) { 128 }, 0));
258 V = vec_sub(V, (vector
signed short)
259 vec_splat((vector
signed short) { 128 }, 0));
262 ux = vec_sl(U, c->CSHIFT);
263 *B = vec_mradds(ux, c->CBU, Y);
266 vx = vec_sl(V, c->CSHIFT);
267 *R = vec_mradds(vx, c->CRV, Y);
270 uvx = vec_mradds(U, c->CGU, Y);
271 *G = vec_mradds(V, c->CGV, uvx);
280 #define DEFCSP420_CVT(name, out_pixels) \ 281 static int altivec_ ## name(SwsContext *c, const unsigned char **in, \ 282 int *instrides, int srcSliceY, int srcSliceH, \ 283 unsigned char **oplanes, int *outstrides) \ 288 int instrides_scl[3]; \ 289 vector unsigned char y0, y1; \ 291 vector signed char u, v; \ 293 vector signed short Y0, Y1, Y2, Y3; \ 294 vector signed short U, V; \ 295 vector signed short vx, ux, uvx; \ 296 vector signed short vx0, ux0, uvx0; \ 297 vector signed short vx1, ux1, uvx1; \ 298 vector signed short R0, G0, B0; \ 299 vector signed short R1, G1, B1; \ 300 vector unsigned char R, G, B; \ 302 const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \ 303 vector unsigned char align_perm; \ 305 vector signed short lCY = c->CY; \ 306 vector signed short lOY = c->OY; \ 307 vector signed short lCRV = c->CRV; \ 308 vector signed short lCBU = c->CBU; \ 309 vector signed short lCGU = c->CGU; \ 310 vector signed short lCGV = c->CGV; \ 311 vector unsigned short lCSHIFT = c->CSHIFT; \ 313 const ubyte *y1i = in[0]; \ 314 const ubyte *y2i = in[0] + instrides[0]; \ 315 const ubyte *ui = in[1]; \ 316 const ubyte *vi = in[2]; \ 318 vector unsigned char *oute = \ 319 (vector unsigned char *) \ 320 (oplanes[0] + srcSliceY * outstrides[0]); \ 321 vector unsigned char *outo = \ 322 (vector unsigned char *) \ 323 (oplanes[0] + srcSliceY * outstrides[0] + outstrides[0]); \ 326 instrides_scl[0] = instrides[0] * 2 - w; \ 328 instrides_scl[1] = instrides[1] - w / 2; \ 330 instrides_scl[2] = instrides[2] - w / 2; \ 332 for (i = 0; i < h / 2; i++) { \ 333 vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0); \ 334 vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1); \ 336 for (j = 0; j < w / 16; j++) { \ 337 y1ivP = (const vector unsigned char *) y1i; \ 338 y2ivP = (const vector unsigned char *) y2i; \ 339 uivP = (const vector unsigned char *) ui; \ 340 vivP = (const vector unsigned char *) vi; \ 342 align_perm = vec_lvsl(0, y1i); \ 343 y0 = (vector unsigned char) \ 344 vec_perm(y1ivP[0], y1ivP[1], align_perm); \ 346 align_perm = vec_lvsl(0, y2i); \ 347 y1 = (vector unsigned char) \ 348 vec_perm(y2ivP[0], y2ivP[1], align_perm); \ 350 align_perm = vec_lvsl(0, ui); \ 351 u = (vector signed char) \ 352 vec_perm(uivP[0], uivP[1], align_perm); \ 354 align_perm = vec_lvsl(0, vi); \ 355 v = (vector signed char) \ 356 vec_perm(vivP[0], vivP[1], align_perm); \ 358 u = (vector signed char) \ 360 (vector signed char) \ 361 vec_splat((vector signed char) { 128 }, 0)); \ 362 v = (vector signed char) \ 364 (vector signed char) \ 365 vec_splat((vector signed char) { 128 }, 0)); \ 367 U = vec_unpackh(u); \ 368 V = vec_unpackh(v); \ 375 Y0 = vec_mradds(Y0, lCY, lOY); \ 376 Y1 = vec_mradds(Y1, lCY, lOY); \ 377 Y2 = vec_mradds(Y2, lCY, lOY); \ 378 Y3 = vec_mradds(Y3, lCY, lOY); \ 381 ux = vec_sl(U, lCSHIFT); \ 382 ux = vec_mradds(ux, lCBU, (vector signed short) { 0 }); \ 383 ux0 = vec_mergeh(ux, ux); \ 384 ux1 = vec_mergel(ux, ux); \ 387 vx = vec_sl(V, lCSHIFT); \ 388 vx = vec_mradds(vx, lCRV, (vector signed short) { 0 }); \ 389 vx0 = vec_mergeh(vx, vx); \ 390 vx1 = vec_mergel(vx, vx); \ 393 uvx = vec_mradds(U, lCGU, (vector signed short) { 0 }); \ 394 uvx = vec_mradds(V, lCGV, uvx); \ 395 uvx0 = vec_mergeh(uvx, uvx); \ 396 uvx1 = vec_mergel(uvx, uvx); \ 398 R0 = vec_add(Y0, vx0); \ 399 G0 = vec_add(Y0, uvx0); \ 400 B0 = vec_add(Y0, ux0); \ 401 R1 = vec_add(Y1, vx1); \ 402 G1 = vec_add(Y1, uvx1); \ 403 B1 = vec_add(Y1, ux1); \ 405 R = vec_packclp(R0, R1); \ 406 G = vec_packclp(G0, G1); \ 407 B = vec_packclp(B0, B1); \ 409 out_pixels(R, G, B, oute); \ 411 R0 = vec_add(Y2, vx0); \ 412 G0 = vec_add(Y2, uvx0); \ 413 B0 = vec_add(Y2, ux0); \ 414 R1 = vec_add(Y3, vx1); \ 415 G1 = vec_add(Y3, uvx1); \ 416 B1 = vec_add(Y3, ux1); \ 417 R = vec_packclp(R0, R1); \ 418 G = vec_packclp(G0, G1); \ 419 B = vec_packclp(B0, B1); \ 422 out_pixels(R, G, B, outo); \ 430 outo += (outstrides[0]) >> 4; \ 431 oute += (outstrides[0]) >> 4; \ 433 ui += instrides_scl[1]; \ 434 vi += instrides_scl[2]; \ 435 y1i += instrides_scl[0]; \ 436 y2i += instrides_scl[0]; \ 441 #define out_abgr(a, b, c, ptr) \ 442 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr) 443 #define out_bgra(a, b, c, ptr) \ 444 vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr) 445 #define out_rgba(a, b, c, ptr) \ 446 vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr) 447 #define out_argb(a, b, c, ptr) \ 448 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr) 449 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr) 450 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr) 452 DEFCSP420_CVT(yuv2_abgr, out_abgr)
453 DEFCSP420_CVT(yuv2_bgra, out_bgra)
454 DEFCSP420_CVT(yuv2_rgba, out_rgba)
455 DEFCSP420_CVT(yuv2_argb, out_argb)
456 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
457 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
461 static const vector
unsigned char 462 demux_u = { 0x10, 0x00, 0x10, 0x00,
463 0x10, 0x04, 0x10, 0x04,
464 0x10, 0x08, 0x10, 0x08,
465 0x10, 0x0c, 0x10, 0x0c },
466 demux_v = { 0x10, 0x02, 0x10, 0x02,
467 0x10, 0x06, 0x10, 0x06,
468 0x10, 0x0A, 0x10, 0x0A,
469 0x10, 0x0E, 0x10, 0x0E },
470 demux_y = { 0x10, 0x01, 0x10, 0x03,
471 0x10, 0x05, 0x10, 0x07,
472 0x10, 0x09, 0x10, 0x0B,
473 0x10, 0x0D, 0x10, 0x0F };
478 static int altivec_uyvy_rgb32(
SwsContext *c,
const unsigned char **
in,
479 int *instrides,
int srcSliceY,
int srcSliceH,
480 unsigned char **oplanes,
int *outstrides)
485 vector
unsigned char uyvy;
486 vector
signed short Y,
U,
V;
487 vector
signed short R0, G0,
B0,
R1, G1,
B1;
488 vector
unsigned char R,
G,
B;
489 vector
unsigned char *
out;
493 out = (vector
unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
495 for (i = 0; i < h; i++)
496 for (j = 0; j < w / 16; j++) {
497 uyvy = vec_ld(0, img);
499 U = (vector
signed short)
500 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
501 V = (vector
signed short)
502 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
503 Y = (vector
signed short)
504 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
506 cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0);
508 uyvy = vec_ld(16, img);
510 U = (vector
signed short)
511 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
512 V = (vector
signed short)
513 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
514 Y = (vector
signed short)
515 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
517 cvtyuvtoRGB(c, Y, U, V, &R1, &G1, &B1);
519 R = vec_packclp(R0, R1);
520 G = vec_packclp(G0, G1);
521 B = vec_packclp(B0, B1);
524 out_rgba(R, G, B, out);
552 if ((c->
srcW & 0xf) != 0)
562 if ((c->
srcH & 0x1) != 0)
568 return altivec_yuv2_rgb24;
571 return altivec_yuv2_bgr24;
574 return altivec_yuv2_argb;
577 return altivec_yuv2_abgr;
580 return altivec_yuv2_rgba;
583 return altivec_yuv2_bgra;
584 default:
return NULL;
592 return altivec_uyvy_rgb32;
593 default:
return NULL;
603 const int inv_table[4],
611 vector
signed short vec;
617 buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;
618 buf.tmp[1] = -256 * brightness;
619 buf.tmp[2] = (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);
620 buf.tmp[3] = (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);
621 buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));
622 buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));
624 c->CSHIFT = (vector
unsigned short) vec_splat_u16(2);
625 c->CY = vec_splat((vector
signed short) buf.vec, 0);
626 c->OY = vec_splat((vector
signed short) buf.vec, 1);
627 c->CRV = vec_splat((vector
signed short) buf.vec, 2);
628 c->CBU = vec_splat((vector
signed short) buf.vec, 3);
629 c->CGU = vec_splat((vector
signed short) buf.vec, 4);
630 c->CGV = vec_splat((vector
signed short) buf.vec, 5);
638 const int16_t *lumFilter,
639 const int16_t **lumSrc,
641 const int16_t *chrFilter,
642 const int16_t **chrUSrc,
643 const int16_t **chrVSrc,
645 const int16_t **alpSrc,
651 vector
signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1,
U,
V;
652 vector
signed short R0, G0,
B0,
R1, G1,
B1;
654 vector
unsigned char R,
G,
B;
655 vector
unsigned char *
out, *nout;
657 vector
signed short RND = vec_splat_s16(1 << 3);
658 vector
unsigned short SCL = vec_splat_u16(4);
661 vector
signed short *YCoeffs, *CCoeffs;
663 YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
664 CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
666 out = (vector
unsigned char *) dest;
668 for (i = 0; i < dstW; i += 16) {
672 for (j = 0; j < lumFilterSize; j++) {
673 X0 = vec_ld(0, &lumSrc[j][i]);
674 X1 = vec_ld(16, &lumSrc[j][i]);
675 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
676 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
682 for (j = 0; j < chrFilterSize; j++) {
683 X = vec_ld(0, &chrUSrc[j][i / 2]);
684 U = vec_mradds(X, CCoeffs[j], U);
685 X = vec_ld(0, &chrVSrc[j][i / 2]);
686 V = vec_mradds(X, CCoeffs[j], V);
690 Y0 = vec_sra(Y0, SCL);
691 Y1 = vec_sra(Y1, SCL);
695 Y0 = vec_clip_s16(Y0);
696 Y1 = vec_clip_s16(Y1);
709 U0 = vec_mergeh(U, U);
710 V0 = vec_mergeh(V, V);
712 U1 = vec_mergel(U, U);
713 V1 = vec_mergel(V, V);
715 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
716 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
718 R = vec_packclp(R0, R1);
719 G = vec_packclp(G0, G1);
720 B = vec_packclp(B0, B1);
724 out_abgr(R, G, B, out);
727 out_bgra(R, G, B, out);
730 out_rgba(R, G, B, out);
733 out_argb(R, G, B, out);
736 out_rgb24(R, G, B, out);
739 out_bgr24(R, G, B, out);
745 static int printed_error_message;
746 if (!printed_error_message) {
748 "altivec_yuv2packedX doesn't support %s output\n",
750 printed_error_message = 1;
763 for (j = 0; j < lumFilterSize; j++) {
764 X0 = vec_ld(0, &lumSrc[j][i]);
765 X1 = vec_ld(16, &lumSrc[j][i]);
766 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
767 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
773 for (j = 0; j < chrFilterSize; j++) {
774 X = vec_ld(0, &chrUSrc[j][i / 2]);
775 U = vec_mradds(X, CCoeffs[j], U);
776 X = vec_ld(0, &chrVSrc[j][i / 2]);
777 V = vec_mradds(X, CCoeffs[j], V);
781 Y0 = vec_sra(Y0, SCL);
782 Y1 = vec_sra(Y1, SCL);
786 Y0 = vec_clip_s16(Y0);
787 Y1 = vec_clip_s16(Y1);
800 U0 = vec_mergeh(U, U);
801 V0 = vec_mergeh(V, V);
803 U1 = vec_mergel(U, U);
804 V1 = vec_mergel(V, V);
806 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
807 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
809 R = vec_packclp(R0, R1);
810 G = vec_packclp(G0, G1);
811 B = vec_packclp(B0, B1);
813 nout = (vector
unsigned char *) scratch;
816 out_abgr(R, G, B, nout);
819 out_bgra(R, G, B, nout);
822 out_rgba(R, G, B, nout);
825 out_argb(R, G, B, nout);
828 out_rgb24(R, G, B, nout);
831 out_bgr24(R, G, B, nout);
836 "altivec_yuv2packedX doesn't support %s output\n",
841 memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
845 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \ 846 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, \ 847 const int16_t *lumFilter, \ 848 const int16_t **lumSrc, \ 850 const int16_t *chrFilter, \ 851 const int16_t **chrUSrc, \ 852 const int16_t **chrVSrc, \ 854 const int16_t **alpSrc, \ 855 uint8_t *dest, int dstW, int dstY) \ 857 yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \ 858 chrFilter, chrUSrc, chrVSrc, \ 859 chrFilterSize, alpSrc, \ 860 dest, dstW, dstY, pixfmt); \ packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
const char * sws_format_name(enum AVPixelFormat format)
#define AV_CPU_FLAG_ALTIVEC
standard
av_cold void ff_yuv2rgb_init_tables_ppc(SwsContext *c, const int inv_table[4], int brightness, int contrast, int saturation)
#define AV_LOG_WARNING
Something somehow does not look correct.
packed RGB 8:8:8, 24bpp, RGBRGB...
av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (%s)\, len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic ? ac->func_descr_generic :ac->func_descr)
#define DECLARE_ALIGNED(n, t, v)
Macro definitions for various function/variable attributes.
int srcH
Height of source luma/alpha planes.
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
external api for the swscale stuff
enum AVPixelFormat dstFormat
Destination pixel format.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define R0(v, w, x, y, z, i)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
as above, but U and V bytes are swapped
packed RGB 8:8:8, 24bpp, BGRBGR...
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
av_cold SwsFunc ff_yuv2rgb_init_ppc(SwsContext *c)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
enum AVPixelFormat srcFormat
Source pixel format.
int srcW
Width of source luma/alpha planes.
AVPixelFormat
Pixel format.