aboutsummaryrefslogtreecommitdiff
path: root/libavcodec/apedec.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/apedec.c')
-rw-r--r--libavcodec/apedec.c104
1 files changed, 61 insertions, 43 deletions
diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c
index 35c9a23dd6..1cd7d1e66d 100644
--- a/libavcodec/apedec.c
+++ b/libavcodec/apedec.c
@@ -25,13 +25,13 @@
#include "bytestream.h"
#include "libavutil/audioconvert.h"
#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
/**
* @file
* Monkey's Audio lossless audio decoder
*/
-#define BLOCKS_PER_LOOP 4608
#define MAX_CHANNELS 2
#define MAX_BYTESPERSAMPLE 3
@@ -126,6 +126,7 @@ typedef struct APEPredictor {
/** Decoder context */
typedef struct APEContext {
+ AVClass *class; ///< class for AVOptions
AVCodecContext *avctx;
AVFrame frame;
DSPContext dsp;
@@ -142,8 +143,10 @@ typedef struct APEContext {
int frameflags; ///< frame flags
APEPredictor predictor; ///< predictor used for final reconstruction
- int32_t decoded0[BLOCKS_PER_LOOP]; ///< decoded data for the first channel
- int32_t decoded1[BLOCKS_PER_LOOP]; ///< decoded data for the second channel
+ int32_t *decoded_buffer;
+ int decoded_size;
+ int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel
+ int blocks_per_loop; ///< maximum number of samples to decode for each call
int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
@@ -170,8 +173,9 @@ static av_cold int ape_decode_close(AVCodecContext *avctx)
for (i = 0; i < APE_FILTER_LEVELS; i++)
av_freep(&s->filterbuf[i]);
+ av_freep(&s->decoded_buffer);
av_freep(&s->data);
- s->data_size = 0;
+ s->decoded_size = s->data_size = 0;
return 0;
}
@@ -469,19 +473,13 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice)
static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo)
{
- int32_t *decoded0 = ctx->decoded0;
- int32_t *decoded1 = ctx->decoded1;
+ int32_t *decoded0 = ctx->decoded[0];
+ int32_t *decoded1 = ctx->decoded[1];
- if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
- /* We are pure silence, just memset the output buffer. */
- memset(decoded0, 0, blockstodecode * sizeof(int32_t));
- memset(decoded1, 0, blockstodecode * sizeof(int32_t));
- } else {
- while (blockstodecode--) {
- *decoded0++ = ape_decode_value(ctx, &ctx->riceY);
- if (stereo)
- *decoded1++ = ape_decode_value(ctx, &ctx->riceX);
- }
+ while (blockstodecode--) {
+ *decoded0++ = ape_decode_value(ctx, &ctx->riceY);
+ if (stereo)
+ *decoded1++ = ape_decode_value(ctx, &ctx->riceX);
}
}
@@ -525,7 +523,7 @@ static void init_predictor_decoder(APEContext *ctx)
APEPredictor *p = &ctx->predictor;
/* Zero the history buffers */
- memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t));
+ memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
/* Initialize and zero the coefficients */
@@ -593,8 +591,8 @@ static av_always_inline int predictor_update_filter(APEPredictor *p,
static void predictor_decode_stereo(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
- int32_t *decoded0 = ctx->decoded0;
- int32_t *decoded1 = ctx->decoded1;
+ int32_t *decoded0 = ctx->decoded[0];
+ int32_t *decoded1 = ctx->decoded[1];
while (count--) {
/* Predictor Y */
@@ -610,7 +608,8 @@ static void predictor_decode_stereo(APEContext *ctx, int count)
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
- memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));
+ memmove(p->historybuffer, p->buf,
+ PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
}
@@ -619,7 +618,7 @@ static void predictor_decode_stereo(APEContext *ctx, int count)
static void predictor_decode_mono(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
- int32_t *decoded0 = ctx->decoded0;
+ int32_t *decoded0 = ctx->decoded[0];
int32_t predictionA, currentA, A, sign;
currentA = p->lastA[0];
@@ -650,7 +649,8 @@ static void predictor_decode_mono(APEContext *ctx, int count)
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
- memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));
+ memmove(p->historybuffer, p->buf,
+ PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
@@ -668,8 +668,8 @@ static void do_init_filter(APEFilter *f, int16_t *buf, int order)
f->delay = f->historybuffer + order * 2;
f->adaptcoeffs = f->historybuffer + order;
- memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t));
- memset(f->coeffs, 0, order * sizeof(int16_t));
+ memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
+ memset(f->coeffs, 0, order * sizeof(*f->coeffs));
f->avg = 0;
}
@@ -725,7 +725,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
/* Have we filled the history buffer? */
if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
memmove(f->historybuffer, f->delay - (order * 2),
- (order * 2) * sizeof(int16_t));
+ (order * 2) * sizeof(*f->historybuffer));
f->delay = f->historybuffer + order * 2;
f->adaptcoeffs = f->historybuffer + order;
}
@@ -773,33 +773,29 @@ static int init_frame_decoder(APEContext *ctx)
static void ape_unpack_mono(APEContext *ctx, int count)
{
- int32_t *decoded0 = ctx->decoded0;
- int32_t *decoded1 = ctx->decoded1;
-
if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
- entropy_decode(ctx, count, 0);
/* We are pure silence, so we're done. */
av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
return;
}
entropy_decode(ctx, count, 0);
- ape_apply_filters(ctx, decoded0, NULL, count);
+ ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
/* Now apply the predictor decoding */
predictor_decode_mono(ctx, count);
/* Pseudo-stereo - just copy left channel to right channel */
if (ctx->channels == 2) {
- memcpy(decoded1, decoded0, count * sizeof(*decoded1));
+ memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
}
}
static void ape_unpack_stereo(APEContext *ctx, int count)
{
int32_t left, right;
- int32_t *decoded0 = ctx->decoded0;
- int32_t *decoded1 = ctx->decoded1;
+ int32_t *decoded0 = ctx->decoded[0];
+ int32_t *decoded1 = ctx->decoded[1];
if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
/* We are pure silence, so we're done. */
@@ -883,9 +879,6 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
}
s->samples = nblocks;
- memset(s->decoded0, 0, sizeof(s->decoded0));
- memset(s->decoded1, 0, sizeof(s->decoded1));
-
/* Initialize the frame decoder */
if (init_frame_decoder(s) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
@@ -900,7 +893,16 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
return avpkt->size;
}
- blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples);
+ blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
+
+ /* reallocate decoded sample buffer if needed */
+ av_fast_malloc(&s->decoded_buffer, &s->decoded_size,
+ 2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer));
+ if (!s->decoded_buffer)
+ return AVERROR(ENOMEM);
+ memset(s->decoded_buffer, 0, s->decoded_size);
+ s->decoded[0] = s->decoded_buffer;
+ s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
/* get output buffer */
s->frame.nb_samples = blockstodecode;
@@ -927,25 +929,25 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
case 8:
sample8 = (uint8_t *)s->frame.data[0];
for (i = 0; i < blockstodecode; i++) {
- *sample8++ = (s->decoded0[i] + 0x80) & 0xff;
+ *sample8++ = (s->decoded[0][i] + 0x80) & 0xff;
if (s->channels == 2)
- *sample8++ = (s->decoded1[i] + 0x80) & 0xff;
+ *sample8++ = (s->decoded[1][i] + 0x80) & 0xff;
}
break;
case 16:
sample16 = (int16_t *)s->frame.data[0];
for (i = 0; i < blockstodecode; i++) {
- *sample16++ = s->decoded0[i];
+ *sample16++ = s->decoded[0][i];
if (s->channels == 2)
- *sample16++ = s->decoded1[i];
+ *sample16++ = s->decoded[1][i];
}
break;
case 24:
sample24 = (int32_t *)s->frame.data[0];
for (i = 0; i < blockstodecode; i++) {
- *sample24++ = s->decoded0[i] << 8;
+ *sample24++ = s->decoded[0][i] << 8;
if (s->channels == 2)
- *sample24++ = s->decoded1[i] << 8;
+ *sample24++ = s->decoded[1][i] << 8;
}
break;
}
@@ -964,6 +966,21 @@ static void ape_flush(AVCodecContext *avctx)
s->samples= 0;
}
+#define OFFSET(x) offsetof(APEContext, x)
+#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
+static const AVOption options[] = {
+ { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { 4608 }, 1, INT_MAX, PAR, "max_samples" },
+ { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
+ { NULL},
+};
+
+static const AVClass ape_decoder_class = {
+ .class_name = "APE decoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_ape_decoder = {
.name = "ape",
.type = AVMEDIA_TYPE_AUDIO,
@@ -975,4 +992,5 @@ AVCodec ff_ape_decoder = {
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
.flush = ape_flush,
.long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
+ .priv_class = &ape_decoder_class,
};