59 #define FREEZE_INTERVAL 128 79 int frontier = 1 << avctx->
trellis;
82 max_paths *
sizeof(*s->
paths), error);
84 2 * frontier *
sizeof(*s->
node_buf), error);
86 2 * frontier *
sizeof(*s->
nodep_buf), error);
117 bytestream_put_le16(&extradata, avctx->
frame_size);
118 bytestream_put_le16(&extradata, 7);
119 for (i = 0; i < 7; i++) {
183 int diff = step >> 3;
191 for (mask = 4;
mask;) {
226 nibble = (nibble + bias) / c->
idelta;
227 nibble = av_clip(nibble, -8, 7) & 0x0F;
229 predictor += ((nibble & 0x08) ? (nibble - 0x10) :
nibble) * c->
idelta;
232 c->
sample1 = av_clip_int16(predictor);
253 nibble =
FFMIN(7, abs(delta) * 4 / c->
step) + (delta < 0) * 8;
258 c->
step = av_clip(c->
step, 127, 24567);
264 const int16_t *samples,
uint8_t *dst,
269 const int frontier = 1 << avctx->
trellis;
276 int pathn = 0, froze = -1, i, j, k, generation = 0;
278 memset(hash, 0xff, 65536 *
sizeof(*hash));
280 memset(nodep_buf, 0, 2 * frontier *
sizeof(*nodep_buf));
281 nodes[0] = node_buf + frontier;
295 nodes[0]->
step = 127;
303 for (i = 0; i < n; i++) {
308 memset(nodes_next, 0, frontier *
sizeof(
TrellisNode*));
309 for (j = 0; j < frontier && nodes[j]; j++) {
312 const int range = (j < frontier / 2) ? 1 : 0;
313 const int step = nodes[j]->step;
317 (nodes[j]->sample2 * c->
coeff2)) / 64;
318 const int div = (sample -
predictor) / step;
319 const int nmin = av_clip(div-range, -8, 6);
320 const int nmax = av_clip(div+range, -7, 7);
321 for (nidx = nmin; nidx <= nmax; nidx++) {
322 const int nibble = nidx & 0xf;
323 int dec_sample = predictor + nidx *
step;
324 #define STORE_NODE(NAME, STEP_INDEX)\ 330 dec_sample = av_clip_int16(dec_sample);\ 331 d = sample - dec_sample;\ 332 ssd = nodes[j]->ssd + d*d;\ 337 if (ssd < nodes[j]->ssd)\ 350 h = &hash[(uint16_t) dec_sample];\ 351 if (*h == generation)\ 353 if (heap_pos < frontier) {\ 358 pos = (frontier >> 1) +\ 359 (heap_pos & ((frontier >> 1) - 1));\ 360 if (ssd > nodes_next[pos]->ssd)\ 365 u = nodes_next[pos];\ 367 assert(pathn < FREEZE_INTERVAL << avctx->trellis);\ 369 nodes_next[pos] = u;\ 373 u->step = STEP_INDEX;\ 374 u->sample2 = nodes[j]->sample1;\ 375 u->sample1 = dec_sample;\ 376 paths[u->path].nibble = nibble;\ 377 paths[u->path].prev = nodes[j]->path;\ 381 int parent = (pos - 1) >> 1;\ 382 if (nodes_next[parent]->ssd <= ssd)\ 384 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ 394 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ 395 const int predictor = nodes[j]->sample1;\ 396 const int div = (sample - predictor) * 4 / STEP_TABLE;\ 397 int nmin = av_clip(div - range, -7, 6);\ 398 int nmax = av_clip(div + range, -6, 7);\ 403 for (nidx = nmin; nidx <= nmax; nidx++) {\ 404 const int nibble = nidx < 0 ? 7 - nidx : nidx;\ 405 int dec_sample = predictor +\ 407 ff_adpcm_yamaha_difflookup[nibble]) / 8;\ 408 STORE_NODE(NAME, STEP_INDEX);\ 426 if (generation == 255) {
427 memset(hash, 0xff, 65536 *
sizeof(*hash));
432 if (nodes[0]->ssd > (1 << 28)) {
433 for (j = 1; j < frontier && nodes[j]; j++)
434 nodes[j]->ssd -= nodes[0]->ssd;
440 p = &paths[nodes[0]->path];
441 for (k = i; k > froze; k--) {
450 memset(nodes + 1, 0, (frontier - 1) *
sizeof(
TrellisNode*));
454 p = &paths[nodes[0]->
path];
455 for (i = n - 1; i > froze; i--) {
461 c->
sample1 = nodes[0]->sample1;
462 c->
sample2 = nodes[0]->sample2;
464 c->
step = nodes[0]->step;
465 c->
idelta = nodes[0]->step;
469 const AVFrame *frame,
int *got_packet_ptr)
471 int n, i, ch, st, pkt_size, ret;
472 const int16_t *samples;
478 samples = (
const int16_t *)frame->
data[0];
499 for (ch = 0; ch < avctx->
channels; ch++) {
512 for (ch = 0; ch < avctx->
channels; ch++) {
514 buf + ch * blocks * 8, &c->
status[ch],
517 for (i = 0; i < blocks; i++) {
518 for (ch = 0; ch < avctx->
channels; ch++) {
519 uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
520 for (j = 0; j < 8; j += 2)
521 *dst++ = buf1[j] | (buf1[j + 1] << 4);
526 for (i = 0; i < blocks; i++) {
527 for (ch = 0; ch < avctx->
channels; ch++) {
529 const int16_t *smp = &samples_p[ch][1 + i * 8];
530 for (j = 0; j < 8; j += 2) {
545 for (ch = 0; ch < avctx->
channels; ch++) {
553 for (i = 0; i < 64; i++)
557 for (i = 0; i < 64; i += 2) {
581 for (i = 0; i < avctx->
channels; i++) {
595 buf + n, &c->
status[1], n,
597 for (i = 0; i < n; i++) {
609 samples[2 * i + 1]));
616 for (i = 0; i < avctx->
channels; i++) {
622 for (i = 0; i < avctx->
channels; i++) {
627 for (i = 0; i < avctx->
channels; i++)
633 for (i = 0; i < avctx->
channels; i++)
642 for (i = 0; i < n; i += 2)
643 *dst++ = (buf[i] << 4) | buf[i + 1];
649 for (i = 0; i < n; i++)
650 *dst++ = (buf[i] << 4) | buf[n + i];
654 for (i = 7 * avctx->
channels; i < avctx->block_align; i++) {
670 for (i = 0; i < n; i += 2)
671 *dst++ = buf[i] | (buf[i + 1] << 4);
677 for (i = 0; i < n; i++)
678 *dst++ = buf[i] | (buf[n + i] << 4);
682 for (n *= avctx->
channels; n > 0; n--) {
693 avpkt->
size = pkt_size;
708 #define ADPCM_ENCODER(id_, name_, sample_fmts_, long_name_) \ 709 AVCodec ff_ ## name_ ## _encoder = { \ 711 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 712 .type = AVMEDIA_TYPE_AUDIO, \ 714 .priv_data_size = sizeof(ADPCMEncodeContext), \ 715 .init = adpcm_encode_init, \ 716 .encode2 = adpcm_encode_frame, \ 717 .close = adpcm_encode_close, \ 718 .sample_fmts = sample_fmts_, \ const struct AVCodec * codec
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (%s)\, len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic ? ac->func_descr_generic :ac->func_descr)
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
static void predictor(uint8_t *src, int size)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
ADPCM encoder/decoder common header.
static const uint16_t mask[17]
#define STORE_NODE(NAME, STEP_INDEX)
const int16_t ff_adpcm_step_table[89]
This is the step table.
static void put_bits(PutBitContext *s, int n, unsigned int value)
Write up to 31 bits into a bitstream.
const int8_t ff_adpcm_index_table[16]
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
int ff_alloc_packet(AVPacket *avpkt, int size)
Check AVPacket size and/or allocate data.
if(ac->has_optimized_func)
int frame_size
Number of samples per channel in an audio frame.
const int16_t ff_adpcm_AdaptationTable[]
Libavcodec external API header.
AVSampleFormat
Audio Sample Formats.
int sample_rate
samples per second
main external API structure.
#define ADPCM_ENCODER(id_, name_, sample_fmts_, long_name_)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const int8_t ff_adpcm_yamaha_difflookup[]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
const int16_t ff_adpcm_yamaha_indexscale[]
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
int trellis
trellis RD quantization
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
int channels
number of audio channels
static enum AVSampleFormat sample_fmts[]
uint8_t ** extended_data
pointers to the data planes/channels.
ADPCMChannelStatus status[6]
This structure stores compressed data.
int nb_samples
number of audio samples (per channel) described by this frame
static enum AVSampleFormat sample_fmts_p[]