aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/APIchanges7
-rw-r--r--libavcodec/8svx.c38
-rw-r--r--libavcodec/aac.h1
-rw-r--r--libavcodec/aacdec.c49
-rw-r--r--libavcodec/ac3dec.c32
-rw-r--r--libavcodec/ac3dec.h1
-rw-r--r--libavcodec/adpcm.c42
-rw-r--r--libavcodec/adx.h1
-rw-r--r--libavcodec/adxdec.c41
-rw-r--r--libavcodec/alac.c45
-rw-r--r--libavcodec/alsdec.c43
-rw-r--r--libavcodec/amrnbdec.c25
-rw-r--r--libavcodec/amrwbdec.c27
-rw-r--r--libavcodec/apedec.c37
-rw-r--r--libavcodec/atrac1.c26
-rw-r--r--libavcodec/atrac3.c34
-rw-r--r--libavcodec/avcodec.h145
-rw-r--r--libavcodec/binkaudio.c34
-rw-r--r--libavcodec/cook.c43
-rw-r--r--libavcodec/dca.c32
-rw-r--r--libavcodec/dpcm.c28
-rw-r--r--libavcodec/dsicinav.c30
-rw-r--r--libavcodec/flacdec.c37
-rw-r--r--libavcodec/g722.h2
-rw-r--r--libavcodec/g722dec.c25
-rw-r--r--libavcodec/g726.c29
-rw-r--r--libavcodec/gsmdec.c32
-rw-r--r--libavcodec/gsmdec_data.h2
-rw-r--r--libavcodec/imc.c29
-rw-r--r--libavcodec/internal.h9
-rw-r--r--libavcodec/libgsm.c58
-rw-r--r--libavcodec/libopencore-amr.c52
-rw-r--r--libavcodec/libspeexdec.c36
-rw-r--r--libavcodec/mace.c33
-rw-r--r--libavcodec/mlpdec.c39
-rw-r--r--libavcodec/mpc.h1
-rw-r--r--libavcodec/mpc7.c29
-rw-r--r--libavcodec/mpc8.c27
-rw-r--r--libavcodec/mpegaudiodec.c86
-rw-r--r--libavcodec/mpegaudiodec_float.c17
-rw-r--r--libavcodec/nellymoserdec.c37
-rw-r--r--libavcodec/pcm.c42
-rw-r--r--libavcodec/qcelpdec.c26
-rw-r--r--libavcodec/qdm2.c32
-rw-r--r--libavcodec/ra144.h1
-rw-r--r--libavcodec/ra144dec.c31
-rw-r--r--libavcodec/ra288.c27
-rw-r--r--libavcodec/s302m.c46
-rw-r--r--libavcodec/shorten.c45
-rw-r--r--libavcodec/sipr.c34
-rw-r--r--libavcodec/smacker.c41
-rw-r--r--libavcodec/truespeech.c29
-rw-r--r--libavcodec/tta.c28
-rw-r--r--libavcodec/twinvq.c36
-rw-r--r--libavcodec/utils.c230
-rw-r--r--libavcodec/version.h5
-rw-r--r--libavcodec/vmdav.c35
-rw-r--r--libavcodec/vorbisdec.c33
-rw-r--r--libavcodec/wavpack.c116
-rw-r--r--libavcodec/wma.h1
-rw-r--r--libavcodec/wmadec.c30
-rw-r--r--libavcodec/wmaprodec.c63
-rw-r--r--libavcodec/wmavoice.c46
-rw-r--r--libavcodec/ws-snd1.c38
64 files changed, 1590 insertions, 766 deletions
diff --git a/doc/APIchanges b/doc/APIchanges
index f664376d3a..2c43e75dba 100644
--- a/doc/APIchanges
+++ b/doc/APIchanges
@@ -13,6 +13,13 @@ libavutil: 2011-04-18
API changes, most recent first:
+2011-xx-xx - xxxxxxx - lavc 53.25.0
+ Add nb_samples and extended_data fields to AVFrame.
+ Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
+ Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
+ avcodec_decode_audio4() writes output samples to an AVFrame, which allows
+ audio decoders to use get_buffer().
+
2011-xx-xx - xxxxxxx - lavc 53.24.0
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
diff --git a/libavcodec/8svx.c b/libavcodec/8svx.c
index 3e3eae6c87..4f11b8bec4 100644
--- a/libavcodec/8svx.c
+++ b/libavcodec/8svx.c
@@ -32,6 +32,7 @@
/** decoder context */
typedef struct EightSvxContext {
+ AVFrame frame;
uint8_t fib_acc[2];
const int8_t *table;
@@ -83,13 +84,13 @@ static void raw_decode(uint8_t *dst, const int8_t *src, int src_size,
}
/** decode a frame */
-static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
EightSvxContext *esc = avctx->priv_data;
int buf_size;
- uint8_t *out_data = data;
- int out_data_size;
+ uint8_t *out_data;
+ int ret;
int is_compr = (avctx->codec_id != CODEC_ID_PCM_S8_PLANAR);
/* for the first packet, copy data to buffer */
@@ -134,15 +135,18 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si
/* decode next piece of data from the buffer */
buf_size = FFMIN(MAX_FRAME_SIZE, esc->data_size - esc->data_idx);
if (buf_size <= 0) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return avpkt->size;
}
- out_data_size = buf_size * (is_compr + 1) * avctx->channels;
- if (*data_size < out_data_size) {
- av_log(avctx, AV_LOG_ERROR, "Provided buffer with size %d is too small.\n",
- *data_size);
- return AVERROR(EINVAL);
+
+ /* get output buffer */
+ esc->frame.nb_samples = buf_size * (is_compr + 1);
+ if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ out_data = esc->frame.data[0];
+
if (is_compr) {
delta_decode(out_data, &esc->data[0][esc->data_idx], buf_size,
&esc->fib_acc[0], esc->table, avctx->channels);
@@ -158,7 +162,9 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si
}
}
esc->data_idx += buf_size;
- *data_size = out_data_size;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = esc->frame;
return avpkt->size;
}
@@ -186,6 +192,10 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
return -1;
}
avctx->sample_fmt = AV_SAMPLE_FMT_U8;
+
+ avcodec_get_frame_defaults(&esc->frame);
+ avctx->coded_frame = &esc->frame;
+
return 0;
}
@@ -207,7 +217,7 @@ AVCodec ff_eightsvx_fib_decoder = {
.init = eightsvx_decode_init,
.close = eightsvx_decode_close,
.decode = eightsvx_decode_frame,
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
};
@@ -219,7 +229,7 @@ AVCodec ff_eightsvx_exp_decoder = {
.init = eightsvx_decode_init,
.close = eightsvx_decode_close,
.decode = eightsvx_decode_frame,
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
};
@@ -231,6 +241,6 @@ AVCodec ff_pcm_s8_planar_decoder = {
.init = eightsvx_decode_init,
.close = eightsvx_decode_close,
.decode = eightsvx_decode_frame,
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
};
diff --git a/libavcodec/aac.h b/libavcodec/aac.h
index 0653f810fd..30491fe85a 100644
--- a/libavcodec/aac.h
+++ b/libavcodec/aac.h
@@ -251,6 +251,7 @@ typedef struct {
*/
typedef struct {
AVCodecContext *avctx;
+ AVFrame frame;
MPEG4AudioConfig m4ac;
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
index 1015030b9a..672ba1c648 100644
--- a/libavcodec/aacdec.c
+++ b/libavcodec/aacdec.c
@@ -646,6 +646,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
cbrt_tableinit();
+ avcodec_get_frame_defaults(&ac->frame);
+ avctx->coded_frame = &ac->frame;
+
return 0;
}
@@ -2113,12 +2116,12 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
}
static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
- int *data_size, GetBitContext *gb)
+ int *got_frame_ptr, GetBitContext *gb)
{
AACContext *ac = avctx->priv_data;
ChannelElement *che = NULL, *che_prev = NULL;
enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
- int err, elem_id, data_size_tmp;
+ int err, elem_id;
int samples = 0, multiplier, audio_found = 0;
if (show_bits(gb, 12) == 0xfff) {
@@ -2222,24 +2225,26 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
avctx->frame_size = samples;
}
- data_size_tmp = samples * avctx->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < data_size_tmp) {
- av_log(avctx, AV_LOG_ERROR,
- "Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n",
- *data_size, data_size_tmp);
- return -1;
- }
- *data_size = data_size_tmp;
-
if (samples) {
+ /* get output buffer */
+ ac->frame.nb_samples = samples;
+ if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return err;
+ }
+
if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
- ac->fmt_conv.float_interleave(data, (const float **)ac->output_data,
+ ac->fmt_conv.float_interleave((float *)ac->frame.data[0],
+ (const float **)ac->output_data,
samples, avctx->channels);
else
- ac->fmt_conv.float_to_int16_interleave(data, (const float **)ac->output_data,
+ ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0],
+ (const float **)ac->output_data,
samples, avctx->channels);
+
+ *(AVFrame *)data = ac->frame;
}
+ *got_frame_ptr = !!samples;
if (ac->output_configured && audio_found)
ac->output_configured = OC_LOCKED;
@@ -2248,7 +2253,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
}
static int aac_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -2259,7 +2264,7 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
init_get_bits(&gb, buf, buf_size * 8);
- if ((err = aac_decode_frame_int(avctx, data, data_size, &gb)) < 0)
+ if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0)
return err;
buf_consumed = (get_bits_count(&gb) + 7) >> 3;
@@ -2481,8 +2486,8 @@ static int read_audio_mux_element(struct LATMContext *latmctx,
}
-static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
- AVPacket *avpkt)
+static int latm_decode_frame(AVCodecContext *avctx, void *out,
+ int *got_frame_ptr, AVPacket *avpkt)
{
struct LATMContext *latmctx = avctx->priv_data;
int muxlength, err;
@@ -2504,7 +2509,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
if (!latmctx->initialized) {
if (!avctx->extradata) {
- *out_size = 0;
+ *got_frame_ptr = 0;
return avpkt->size;
} else {
if ((err = decode_audio_specific_config(
@@ -2522,7 +2527,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
return AVERROR_INVALIDDATA;
}
- if ((err = aac_decode_frame_int(avctx, out, out_size, &gb)) < 0)
+ if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0)
return err;
return muxlength;
@@ -2552,7 +2557,7 @@ AVCodec ff_aac_decoder = {
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
},
- .capabilities = CODEC_CAP_CHANNEL_CONF,
+ .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout,
};
@@ -2573,6 +2578,6 @@ AVCodec ff_aac_latm_decoder = {
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
},
- .capabilities = CODEC_CAP_CHANNEL_CONF,
+ .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout,
};
diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c
index 8e216c039b..7e11cf49ce 100644
--- a/libavcodec/ac3dec.c
+++ b/libavcodec/ac3dec.c
@@ -208,6 +208,9 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
}
s->downmixed = 1;
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -1296,15 +1299,15 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/**
* Decode a single AC-3 frame.
*/
-static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int ac3_decode_frame(AVCodecContext * avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AC3DecodeContext *s = avctx->priv_data;
- float *out_samples_flt = data;
- int16_t *out_samples_s16 = data;
- int blk, ch, err;
+ float *out_samples_flt;
+ int16_t *out_samples_s16;
+ int blk, ch, err, ret;
const uint8_t *channel_map;
const float *output[AC3_MAX_CHANNELS];
@@ -1321,7 +1324,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
init_get_bits(&s->gbc, buf, buf_size * 8);
/* parse the syncinfo */
- *data_size = 0;
err = parse_frame_header(s);
if (err) {
@@ -1343,6 +1345,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
/* TODO: add support for substreams and dependent frames */
if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n");
+ *got_frame_ptr = 0;
return s->frame_size;
} else {
av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
@@ -1400,6 +1403,15 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
if (s->bitstream_mode == 0x7 && s->channels > 1)
avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
+ /* get output buffer */
+ s->frame.nb_samples = s->num_blocks * 256;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ out_samples_flt = (float *)s->frame.data[0];
+ out_samples_s16 = (int16_t *)s->frame.data[0];
+
/* decode the audio blocks */
channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on];
for (ch = 0; ch < s->out_channels; ch++)
@@ -1419,8 +1431,10 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
out_samples_s16 += 256 * s->out_channels;
}
}
- *data_size = s->num_blocks * 256 * avctx->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return FFMIN(buf_size, s->frame_size);
}
@@ -1458,6 +1472,7 @@ AVCodec ff_ac3_decoder = {
.init = ac3_decode_init,
.close = ac3_decode_end,
.decode = ac3_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
@@ -1480,6 +1495,7 @@ AVCodec ff_eac3_decoder = {
.init = ac3_decode_init,
.close = ac3_decode_end,
.decode = ac3_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
diff --git a/libavcodec/ac3dec.h b/libavcodec/ac3dec.h
index 38262514b6..56c6553477 100644
--- a/libavcodec/ac3dec.h
+++ b/libavcodec/ac3dec.h
@@ -68,6 +68,7 @@
typedef struct {
AVClass *class; ///< class for AVOptions
AVCodecContext *avctx; ///< parent context
+ AVFrame frame; ///< AVFrame for decoded output
GetBitContext gbc; ///< bitstream reader
///@name Bit stream information
diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c
index 4a818575cf..3ada328df3 100644
--- a/libavcodec/adpcm.c
+++ b/libavcodec/adpcm.c
@@ -84,6 +84,7 @@ static const int swf_index_tables[4][16] = {
/* end of tables */
typedef struct ADPCMDecodeContext {
+ AVFrame frame;
ADPCMChannelStatus status[6];
} ADPCMDecodeContext;
@@ -124,6 +125,10 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
break;
}
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
@@ -501,9 +506,8 @@ static int get_nb_samples(AVCodecContext *avctx, const uint8_t *buf,
decode_top_nibble_next = 1; \
}
-static int adpcm_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -514,7 +518,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
const uint8_t *src;
int st; /* stereo */
int count1, count2;
- int nb_samples, coded_samples, out_bps, out_size;
+ int nb_samples, coded_samples, ret;
nb_samples = get_nb_samples(avctx, buf, buf_size, &coded_samples);
if (nb_samples <= 0) {
@@ -522,22 +526,22 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA;
}
- out_bps = av_get_bytes_per_sample(avctx->sample_fmt);
- out_size = nb_samples * avctx->channels * out_bps;
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ c->frame.nb_samples = nb_samples;
+ if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (short *)c->frame.data[0];
+
/* use coded_samples when applicable */
/* it is always <= nb_samples, so the output buffer will be large enough */
if (coded_samples) {
if (coded_samples != nb_samples)
av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
- nb_samples = coded_samples;
- out_size = nb_samples * avctx->channels * out_bps;
+ c->frame.nb_samples = nb_samples = coded_samples;
}
- samples = data;
src = buf;
st = avctx->channels == 2 ? 1 : 0;
@@ -576,7 +580,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
cs->step_index = 88;
}
- samples = (short*)data + channel;
+ samples = (short *)c->frame.data[0] + channel;
for (m = 0; m < 32; m++) {
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
@@ -628,7 +632,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
}
for (i = 0; i < avctx->channels; i++) {
- samples = (short*)data + i;
+ samples = (short *)c->frame.data[0] + i;
cs = &c->status[i];
for (n = nb_samples >> 1; n > 0; n--, src++) {
uint8_t v = *src;
@@ -965,7 +969,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
}
}
- out_size = count * 28 * avctx->channels * out_bps;
+ c->frame.nb_samples = count * 28;
src = src_end;
break;
}
@@ -1144,7 +1148,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
prev[0][i] = (int16_t)bytestream_get_be16(&src);
for (ch = 0; ch <= st; ch++) {
- samples = (unsigned short *) data + ch;
+ samples = (short *)c->frame.data[0] + ch;
/* Read in every sample for this channel. */
for (i = 0; i < nb_samples / 14; i++) {
@@ -1177,7 +1181,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
default:
return -1;
}
- *data_size = out_size;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
+
return src - buf;
}
@@ -1190,6 +1197,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(ADPCMDecodeContext), \
.init = adpcm_decode_init, \
.decode = adpcm_decode_frame, \
+ .capabilities = CODEC_CAP_DR1, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
}
diff --git a/libavcodec/adx.h b/libavcodec/adx.h
index da40eec929..92abe5f163 100644
--- a/libavcodec/adx.h
+++ b/libavcodec/adx.h
@@ -40,6 +40,7 @@ typedef struct {
} ADXChannelState;
typedef struct {
+ AVFrame frame;
int channels;
ADXChannelState prev[2];
int header_parsed;
diff --git a/libavcodec/adxdec.c b/libavcodec/adxdec.c
index 4558060781..e9104133fa 100644
--- a/libavcodec/adxdec.c
+++ b/libavcodec/adxdec.c
@@ -50,6 +50,10 @@ static av_cold int adx_decode_init(AVCodecContext *avctx)
c->channels = avctx->channels;
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
@@ -89,36 +93,42 @@ static int adx_decode(ADXContext *c, int16_t *out, const uint8_t *in, int ch)
return 0;
}
-static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int adx_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
int buf_size = avpkt->size;
ADXContext *c = avctx->priv_data;
- int16_t *samples = data;
+ int16_t *samples;
const uint8_t *buf = avpkt->data;
- int num_blocks, ch;
+ int num_blocks, ch, ret;
if (c->eof) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
- /* 18 bytes of data are expanded into 32*2 bytes of audio,
- so guard against buffer overflows */
+ /* calculate number of blocks in the packet */
num_blocks = buf_size / (BLOCK_SIZE * c->channels);
- if (num_blocks > *data_size / (BLOCK_SAMPLES * c->channels)) {
- buf_size = (*data_size / (BLOCK_SAMPLES * c->channels)) * BLOCK_SIZE;
- num_blocks = buf_size / (BLOCK_SIZE * c->channels);
- }
- if (!buf_size || buf_size % (BLOCK_SIZE * avctx->channels)) {
+
+ /* if the packet is not an even multiple of BLOCK_SIZE, check for an EOF
+ packet */
+ if (!num_blocks || buf_size % (BLOCK_SIZE * avctx->channels)) {
if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) {
c->eof = 1;
- *data_size = 0;
+ *got_frame_ptr = 0;
return avpkt->size;
}
return AVERROR_INVALIDDATA;
}
+ /* get output buffer */
+ c->frame.nb_samples = num_blocks * BLOCK_SAMPLES;
+ if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples = (int16_t *)c->frame.data[0];
+
while (num_blocks--) {
for (ch = 0; ch < c->channels; ch++) {
if (adx_decode(c, samples + ch, buf, ch)) {
@@ -132,7 +142,9 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
samples += BLOCK_SAMPLES * c->channels;
}
- *data_size = (uint8_t*)samples - (uint8_t*)data;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
+
return buf - avpkt->data;
}
@@ -143,5 +155,6 @@ AVCodec ff_adpcm_adx_decoder = {
.priv_data_size = sizeof(ADXContext),
.init = adx_decode_init,
.decode = adx_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
};
diff --git a/libavcodec/alac.c b/libavcodec/alac.c
index 1056e6c8f4..47234ecf13 100644
--- a/libavcodec/alac.c
+++ b/libavcodec/alac.c
@@ -62,10 +62,10 @@
typedef struct {
AVCodecContext *avctx;
+ AVFrame frame;
GetBitContext gb;
int numchannels;
- int bytespersample;
/* buffers */
int32_t *predicterror_buffer[MAX_CHANNELS];
@@ -351,9 +351,8 @@ static void interleave_stereo_24(int32_t *buffer[MAX_CHANNELS],
}
}
-static int alac_decode_frame(AVCodecContext *avctx,
- void *outbuffer, int *outputsize,
- AVPacket *avpkt)
+static int alac_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *inbuffer = avpkt->data;
int input_buffer_size = avpkt->size;
@@ -366,7 +365,7 @@ static int alac_decode_frame(AVCodecContext *avctx,
int isnotcompressed;
uint8_t interlacing_shift;
uint8_t interlacing_leftweight;
- int i, ch;
+ int i, ch, ret;
init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8);
@@ -401,14 +400,17 @@ static int alac_decode_frame(AVCodecContext *avctx,
} else
outputsamples = alac->setinfo_max_samples_per_frame;
- alac->bytespersample = channels * av_get_bytes_per_sample(avctx->sample_fmt);
-
- if(outputsamples > *outputsize / alac->bytespersample){
- av_log(avctx, AV_LOG_ERROR, "sample buffer too small\n");
- return -1;
+ /* get output buffer */
+ if (outputsamples > INT32_MAX) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported block size: %u\n", outputsamples);
+ return AVERROR_INVALIDDATA;
+ }
+ alac->frame.nb_samples = outputsamples;
+ if ((ret = avctx->get_buffer(avctx, &alac->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
- *outputsize = outputsamples * alac->bytespersample;
readsamplesize = alac->setinfo_sample_size - alac->extra_bits + channels - 1;
if (readsamplesize > MIN_CACHE_BITS) {
av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize);
@@ -501,21 +503,23 @@ static int alac_decode_frame(AVCodecContext *avctx,
switch(alac->setinfo_sample_size) {
case 16:
if (channels == 2) {
- interleave_stereo_16(alac->outputsamples_buffer, outbuffer,
- outputsamples);
+ interleave_stereo_16(alac->outputsamples_buffer,
+ (int16_t *)alac->frame.data[0], outputsamples);
} else {
+ int16_t *outbuffer = (int16_t *)alac->frame.data[0];
for (i = 0; i < outputsamples; i++) {
- ((int16_t*)outbuffer)[i] = alac->outputsamples_buffer[0][i];
+ outbuffer[i] = alac->outputsamples_buffer[0][i];
}
}
break;
case 24:
if (channels == 2) {
- interleave_stereo_24(alac->outputsamples_buffer, outbuffer,
- outputsamples);
+ interleave_stereo_24(alac->outputsamples_buffer,
+ (int32_t *)alac->frame.data[0], outputsamples);
} else {
+ int32_t *outbuffer = (int32_t *)alac->frame.data[0];
for (i = 0; i < outputsamples; i++)
- ((int32_t *)outbuffer)[i] = alac->outputsamples_buffer[0][i] << 8;
+ outbuffer[i] = alac->outputsamples_buffer[0][i] << 8;
}
break;
}
@@ -523,6 +527,9 @@ static int alac_decode_frame(AVCodecContext *avctx,
if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8)
av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb));
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = alac->frame;
+
return input_buffer_size;
}
@@ -637,6 +644,9 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
return ret;
}
+ avcodec_get_frame_defaults(&alac->frame);
+ avctx->coded_frame = &alac->frame;
+
return 0;
}
@@ -648,5 +658,6 @@ AVCodec ff_alac_decoder = {
.init = alac_decode_init,
.close = alac_decode_close,
.decode = alac_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
};
diff --git a/libavcodec/alsdec.c b/libavcodec/alsdec.c
index e7a0de24b1..71495803a3 100644
--- a/libavcodec/alsdec.c
+++ b/libavcodec/alsdec.c
@@ -191,6 +191,7 @@ typedef struct {
typedef struct {
AVCodecContext *avctx;
+ AVFrame frame;
ALSSpecificConfig sconf;
GetBitContext gb;
DSPContext dsp;
@@ -1415,15 +1416,14 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
/** Decode an ALS frame.
*/
-static int decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
+static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
AVPacket *avpkt)
{
ALSDecContext *ctx = avctx->priv_data;
ALSSpecificConfig *sconf = &ctx->sconf;
const uint8_t *buffer = avpkt->data;
int buffer_size = avpkt->size;
- int invalid_frame, size;
+ int invalid_frame, ret;
unsigned int c, sample, ra_frame, bytes_read, shift;
init_get_bits(&ctx->gb, buffer, buffer_size * 8);
@@ -1448,21 +1448,17 @@ static int decode_frame(AVCodecContext *avctx,
ctx->frame_id++;
- // check for size of decoded data
- size = ctx->cur_frame_length * avctx->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
-
- if (size > *data_size) {
- av_log(avctx, AV_LOG_ERROR, "Decoded data exceeds buffer size.\n");
- return -1;
+ /* get output buffer */
+ ctx->frame.nb_samples = ctx->cur_frame_length;
+ if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
- *data_size = size;
-
// transform decoded frame into output format
#define INTERLEAVE_OUTPUT(bps) \
{ \
- int##bps##_t *dest = (int##bps##_t*) data; \
+ int##bps##_t *dest = (int##bps##_t*)ctx->frame.data[0]; \
shift = bps - ctx->avctx->bits_per_raw_sample; \
for (sample = 0; sample < ctx->cur_frame_length; sample++) \
for (c = 0; c < avctx->channels; c++) \
@@ -1480,7 +1476,7 @@ static int decode_frame(AVCodecContext *avctx,
int swap = HAVE_BIGENDIAN != sconf->msb_first;
if (ctx->avctx->bits_per_raw_sample == 24) {
- int32_t *src = data;
+ int32_t *src = (int32_t *)ctx->frame.data[0];
for (sample = 0;
sample < ctx->cur_frame_length * avctx->channels;
@@ -1501,22 +1497,25 @@ static int decode_frame(AVCodecContext *avctx,
if (swap) {
if (ctx->avctx->bits_per_raw_sample <= 16) {
- int16_t *src = (int16_t*) data;
+ int16_t *src = (int16_t*) ctx->frame.data[0];
int16_t *dest = (int16_t*) ctx->crc_buffer;
for (sample = 0;
sample < ctx->cur_frame_length * avctx->channels;
sample++)
*dest++ = av_bswap16(src[sample]);
} else {
- ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer, data,
+ ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer,
+ (uint32_t *)ctx->frame.data[0],
ctx->cur_frame_length * avctx->channels);
}
crc_source = ctx->crc_buffer;
} else {
- crc_source = data;
+ crc_source = ctx->frame.data[0];
}
- ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source, size);
+ ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source,
+ ctx->cur_frame_length * avctx->channels *
+ av_get_bytes_per_sample(avctx->sample_fmt));
}
@@ -1527,6 +1526,9 @@ static int decode_frame(AVCodecContext *avctx,
}
}
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = ctx->frame;
+
bytes_read = invalid_frame ? buffer_size :
(get_bits_count(&ctx->gb) + 7) >> 3;
@@ -1724,6 +1726,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
dsputil_init(&ctx->dsp, avctx);
+ avcodec_get_frame_defaults(&ctx->frame);
+ avctx->coded_frame = &ctx->frame;
+
return 0;
}
@@ -1747,7 +1752,7 @@ AVCodec ff_als_decoder = {
.close = decode_end,
.decode = decode_frame,
.flush = flush,
- .capabilities = CODEC_CAP_SUBFRAMES,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"),
};
diff --git a/libavcodec/amrnbdec.c b/libavcodec/amrnbdec.c
index 501b137780..b594af760a 100644
--- a/libavcodec/amrnbdec.c
+++ b/libavcodec/amrnbdec.c
@@ -95,6 +95,7 @@
#define AMR_AGC_ALPHA 0.9
typedef struct AMRContext {
+ AVFrame avframe; ///< AVFrame for decoded samples
AMRNBFrame frame; ///< decoded AMR parameters (lsf coefficients, codebook indexes, etc)
uint8_t bad_frame_indicator; ///< bad frame ? 1 : 0
enum Mode cur_frame_mode;
@@ -167,6 +168,9 @@ static av_cold int amrnb_decode_init(AVCodecContext *avctx)
for (i = 0; i < 4; i++)
p->prediction_error[i] = MIN_ENERGY;
+ avcodec_get_frame_defaults(&p->avframe);
+ avctx->coded_frame = &p->avframe;
+
return 0;
}
@@ -919,21 +923,29 @@ static void postfilter(AMRContext *p, float *lpc, float *buf_out)
/// @}
-static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
AMRContext *p = avctx->priv_data; // pointer to private data
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- float *buf_out = data; // pointer to the output data buffer
- int i, subframe;
+ float *buf_out; // pointer to the output data buffer
+ int i, subframe, ret;
float fixed_gain_factor;
AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing
float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing
float synth_fixed_gain; // the fixed gain that synthesis should use
const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
+ /* get output buffer */
+ p->avframe.nb_samples = AMR_BLOCK_SIZE;
+ if ((ret = avctx->get_buffer(avctx, &p->avframe)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ buf_out = (float *)p->avframe.data[0];
+
p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
if (p->cur_frame_mode == MODE_DTX) {
av_log_missing_feature(avctx, "dtx mode", 1);
@@ -1028,8 +1040,8 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
ff_weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3],
0.84, 0.16, LP_FILTER_ORDER);
- /* report how many samples we got */
- *data_size = AMR_BLOCK_SIZE * sizeof(float);
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = p->avframe;
/* return the amount of bytes consumed if everything was OK */
return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC
@@ -1043,6 +1055,7 @@ AVCodec ff_amrnb_decoder = {
.priv_data_size = sizeof(AMRContext),
.init = amrnb_decode_init,
.decode = amrnb_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
};
diff --git a/libavcodec/amrwbdec.c b/libavcodec/amrwbdec.c
index d4bb7760ef..d4aa557d07 100644
--- a/libavcodec/amrwbdec.c
+++ b/libavcodec/amrwbdec.c
@@ -41,6 +41,7 @@
#include "amrwbdata.h"
typedef struct {
+ AVFrame avframe; ///< AVFrame for decoded samples
AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream
enum Mode fr_cur_mode; ///< mode index of current frame
uint8_t fr_quality; ///< frame quality index (FQI)
@@ -102,6 +103,9 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
for (i = 0; i < 4; i++)
ctx->prediction_error[i] = MIN_ENERGY;
+ avcodec_get_frame_defaults(&ctx->avframe);
+ avctx->coded_frame = &ctx->avframe;
+
return 0;
}
@@ -1062,15 +1066,15 @@ static void update_sub_state(AMRWBContext *ctx)
LP_ORDER_16k * sizeof(float));
}
-static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
AMRWBContext *ctx = avctx->priv_data;
AMRWBFrame *cf = &ctx->frame;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int expected_fr_size, header_size;
- float *buf_out = data;
+ float *buf_out;
float spare_vector[AMRWB_SFR_SIZE]; // extra stack space to hold result from anti-sparseness processing
float fixed_gain_factor; // fixed gain correction factor (gamma)
float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
@@ -1080,7 +1084,15 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
float hb_exc[AMRWB_SFR_SIZE_16k]; // excitation for the high frequency band
float hb_samples[AMRWB_SFR_SIZE_16k]; // filtered high-band samples from synthesis
float hb_gain;
- int sub, i;
+ int sub, i, ret;
+
+ /* get output buffer */
+ ctx->avframe.nb_samples = 4 * AMRWB_SFR_SIZE_16k;
+ if ((ret = avctx->get_buffer(avctx, &ctx->avframe)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ buf_out = (float *)ctx->avframe.data[0];
header_size = decode_mime_header(ctx, buf);
expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1;
@@ -1088,7 +1100,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if (buf_size < expected_fr_size) {
av_log(avctx, AV_LOG_ERROR,
"Frame too small (%d bytes). Truncated file?\n", buf_size);
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
@@ -1219,8 +1231,8 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(ctx->isp[3][0]));
memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float));
- /* report how many samples we got */
- *data_size = 4 * AMRWB_SFR_SIZE_16k * sizeof(float);
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = ctx->avframe;
return expected_fr_size;
}
@@ -1232,6 +1244,7 @@ AVCodec ff_amrwb_decoder = {
.priv_data_size = sizeof(AMRWBContext),
.init = amrwb_decode_init,
.decode = amrwb_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate WideBand"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
};
diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c
index 7702b291c8..2d03c554a6 100644
--- a/libavcodec/apedec.c
+++ b/libavcodec/apedec.c
@@ -129,6 +129,7 @@ typedef struct APEPredictor {
/** Decoder context */
typedef struct APEContext {
AVCodecContext *avctx;
+ AVFrame frame;
DSPContext dsp;
int channels;
int samples; ///< samples left to decode in current frame
@@ -215,6 +216,10 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
dsputil_init(&s->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
filter_alloc_fail:
ape_decode_close(avctx);
@@ -805,16 +810,15 @@ static void ape_unpack_stereo(APEContext *ctx, int count)
}
}
-static int ape_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int ape_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
APEContext *s = avctx->priv_data;
- int16_t *samples = data;
- int i;
- int blockstodecode, out_size;
+ int16_t *samples;
+ int i, ret;
+ int blockstodecode;
int bytes_used = 0;
/* this should never be negative, but bad things will happen if it is, so
@@ -826,7 +830,7 @@ static int ape_decode_frame(AVCodecContext *avctx,
void *tmp_data;
if (!buf_size) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return 0;
}
if (buf_size < 8) {
@@ -874,18 +878,19 @@ static int ape_decode_frame(AVCodecContext *avctx,
}
if (!s->data) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples);
- out_size = blockstodecode * avctx->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small.\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame.nb_samples = blockstodecode;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)s->frame.data[0];
s->error=0;
@@ -909,7 +914,9 @@ static int ape_decode_frame(AVCodecContext *avctx,
s->samples -= blockstodecode;
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return bytes_used;
}
@@ -927,7 +934,7 @@ AVCodec ff_ape_decoder = {
.init = ape_decode_init,
.close = ape_decode_close,
.decode = ape_decode_frame,
- .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
.flush = ape_flush,
.long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
};
diff --git a/libavcodec/atrac1.c b/libavcodec/atrac1.c
index 770b1bf90e..9ead80d5c8 100644
--- a/libavcodec/atrac1.c
+++ b/libavcodec/atrac1.c
@@ -72,6 +72,7 @@ typedef struct {
* The atrac1 context, holds all needed parameters for decoding
*/
typedef struct {
+ AVFrame frame;
AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit
DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer
@@ -273,14 +274,14 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut)
static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AT1Ctx *q = avctx->priv_data;
- int ch, ret, out_size;
+ int ch, ret;
GetBitContext gb;
- float* samples = data;
+ float *samples;
if (buf_size < 212 * q->channels) {
@@ -288,12 +289,13 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA;
}
- out_size = q->channels * AT1_SU_SAMPLES *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ q->frame.nb_samples = AT1_SU_SAMPLES;
+ if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (float *)q->frame.data[0];
for (ch = 0; ch < q->channels; ch++) {
AT1SUCtx* su = &q->SUs[ch];
@@ -321,7 +323,9 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
AT1_SU_SAMPLES, 2);
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = q->frame;
+
return avctx->block_align;
}
@@ -389,6 +393,9 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
q->SUs[1].spectrum[0] = q->SUs[1].spec1;
q->SUs[1].spectrum[1] = q->SUs[1].spec2;
+ avcodec_get_frame_defaults(&q->frame);
+ avctx->coded_frame = &q->frame;
+
return 0;
}
@@ -401,5 +408,6 @@ AVCodec ff_atrac1_decoder = {
.init = atrac1_decode_init,
.close = atrac1_decode_end,
.decode = atrac1_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Atrac 1 (Adaptive TRansform Acoustic Coding)"),
};
diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c
index 3a48a5a647..bdd03402da 100644
--- a/libavcodec/atrac3.c
+++ b/libavcodec/atrac3.c
@@ -86,6 +86,7 @@ typedef struct {
} channel_unit;
typedef struct {
+ AVFrame frame;
GetBitContext gb;
//@{
/** stream data */
@@ -823,16 +824,16 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf,
* @param avctx pointer to the AVCodecContext
*/
-static int atrac3_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt) {
+static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ATRAC3Context *q = avctx->priv_data;
- int result = 0, out_size;
+ int result;
const uint8_t* databuf;
- float *samples_flt = data;
- int16_t *samples_s16 = data;
+ float *samples_flt;
+ int16_t *samples_s16;
if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR,
@@ -840,12 +841,14 @@ static int atrac3_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA;
}
- out_size = SAMPLES_PER_FRAME * q->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ q->frame.nb_samples = SAMPLES_PER_FRAME;
+ if ((result = avctx->get_buffer(avctx, &q->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return result;
}
+ samples_flt = (float *)q->frame.data[0];
+ samples_s16 = (int16_t *)q->frame.data[0];
/* Check if we need to descramble and what buffer to pass on. */
if (q->scrambled_stream) {
@@ -875,7 +878,9 @@ static int atrac3_decode_frame(AVCodecContext *avctx,
(const float **)q->outSamples,
SAMPLES_PER_FRAME, q->channels);
}
- *data_size = out_size;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = q->frame;
return avctx->block_align;
}
@@ -1047,6 +1052,9 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
}
}
+ avcodec_get_frame_defaults(&q->frame);
+ avctx->coded_frame = &q->frame;
+
return 0;
}
@@ -1060,6 +1068,6 @@ AVCodec ff_atrac3_decoder =
.init = atrac3_decode_init,
.close = atrac3_decode_close,
.decode = atrac3_decode_frame,
- .capabilities = CODEC_CAP_SUBFRAMES,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"),
};
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index eeafce4c45..83fb39b99e 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -480,8 +480,10 @@ enum CodecID {
#define CH_LAYOUT_STEREO_DOWNMIX AV_CH_LAYOUT_STEREO_DOWNMIX
#endif
+#if FF_API_OLD_DECODE_AUDIO
/* in bytes */
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
+#endif
/**
* Required number of additionally allocated bytes at the end of the input bitstream for decoding.
@@ -933,13 +935,24 @@ typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8
#endif
/**
- * pointer to the picture planes.
+ * pointer to the picture/channel planes.
* This might be different from the first allocated byte
- * - encoding:
- * - decoding:
+ * - encoding: Set by user
+ * - decoding: set by AVCodecContext.get_buffer()
*/
uint8_t *data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * Size, in bytes, of the data for each picture/channel plane.
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * - encoding: Set by user (video only)
+ * - decoding: set by AVCodecContext.get_buffer()
+ */
int linesize[AV_NUM_DATA_POINTERS];
+
/**
* pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
* This isn't used by libavcodec unless the default get/release_buffer() is used.
@@ -993,7 +1006,7 @@ typedef struct AVFrame {
* buffer age (1->was last buffer and dint change, 2->..., ...).
* Set to INT_MAX if the buffer has not been used yet.
* - encoding: unused
- * - decoding: MUST be set by get_buffer().
+ * - decoding: MUST be set by get_buffer() for video.
*/
int age;
@@ -1190,6 +1203,33 @@ typedef struct AVFrame {
* - decoding: Set by libavcodec.
*/
void *thread_opaque;
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ int nb_samples;
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data will always be set by get_buffer(),
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used by the decoder in order to access all
+ * channels.
+ *
+ * encoding: unused
+ * decoding: set by AVCodecContext.get_buffer()
+ */
+ uint8_t **extended_data;
} AVFrame;
struct AVCodecInternal;
@@ -1545,15 +1585,56 @@ typedef struct AVCodecContext {
/**
* Called at the beginning of each frame to get a buffer for it.
- * If pic.reference is set then the frame will be read later by libavcodec.
- * avcodec_align_dimensions2() should be used to find the required width and
- * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * The function will set AVFrame.data[], AVFrame.linesize[].
+ * AVFrame.extended_data[] must also be set, but it should be the same as
+ * AVFrame.data[] except for planar audio with more channels than can fit
+ * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
+ * many data pointers as it can hold.
+ *
* if CODEC_CAP_DR1 is not set then get_buffer() must call
* avcodec_default_get_buffer() instead of providing buffers allocated by
* some other means.
+ *
+ * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
+ * need it. avcodec_default_get_buffer() aligns the output buffer properly,
+ * but if get_buffer() is overridden then alignment considerations should
+ * be taken into account.
+ *
+ * @see avcodec_default_get_buffer()
+ *
+ * Video:
+ *
+ * If pic.reference is set then the frame will be read later by libavcodec.
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
* If frame multithreading is used and thread_safe_callbacks is set,
- * it may be called from a different thread, but not from more than one at once.
- * Does not need to be reentrant.
+ * it may be called from a different thread, but not from more than one at
+ * once. Does not need to be reentrant.
+ *
+ * @see release_buffer(), reget_buffer()
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * Decoders cannot use the buffer after returning from
+ * avcodec_decode_audio4(), so they will not call release_buffer(), as it
+ * is assumed to be released immediately upon return.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
@@ -3882,7 +3963,12 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
*/
int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
+#if FF_API_OLD_DECODE_AUDIO
/**
+ * Wrapper function which calls avcodec_decode_audio4.
+ *
+ * @deprecated Use avcodec_decode_audio4 instead.
+ *
* Decode the audio frame of size avpkt->size from avpkt->data into samples.
* Some decoders may support multiple frames in a single AVPacket, such
* decoders would then just decode the first frame. In this case,
@@ -3917,6 +4003,8 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
*
* @param avctx the codec context
* @param[out] samples the output buffer, sample type in avctx->sample_fmt
+ * If the sample format is planar, each channel plane will
+ * be the same size, with no padding between channels.
* @param[in,out] frame_size_ptr the output buffer size in bytes
* @param[in] avpkt The input AVPacket containing the input buffer.
* You can create such packet with av_init_packet() and by then setting
@@ -3925,9 +4013,46 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
* @return On error a negative value is returned, otherwise the number of bytes
* used or zero if no frame data was decompressed (used) from the input AVPacket.
*/
-int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
+attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
AVPacket *avpkt);
+#endif
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio4 has to be called again with an AVPacket containing
+ * the remaining data in order to decode the second frame, etc...
+ * Even if no frames are returned, the packet needs to be fed to the decoder
+ * with remaining data until it is completely consumed or an error occurs.
+ *
+ * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note You might have to align the input buffer. The alignment requirements
+ * depend on the CPU and the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The
+ * decoder may, however, only utilize part of the buffer by
+ * setting AVFrame.nb_samples to a smaller value in the
+ * output frame.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ * non-zero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * At least avpkt->data and avpkt->size should be set. Some
+ * decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ * decoding, otherwise the number of bytes consumed from the input
+ * AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt);
/**
* Decode the video frame of size avpkt->size from avpkt->data into picture.
diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c
index d917e7a12c..1dceeb74c3 100644
--- a/libavcodec/binkaudio.c
+++ b/libavcodec/binkaudio.c
@@ -45,6 +45,7 @@ static float quant_table[96];
#define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11)
typedef struct {
+ AVFrame frame;
GetBitContext gb;
DSPContext dsp;
FmtConvertContext fmt_conv;
@@ -147,6 +148,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
else
return -1;
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -293,6 +297,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
ff_rdft_end(&s->trans.rdft);
else if (CONFIG_BINKAUDIO_DCT_DECODER)
ff_dct_end(&s->trans.dct);
+
return 0;
}
@@ -302,20 +307,19 @@ static void get_bits_align32(GetBitContext *s)
if (n) skip_bits(s, n);
}
-static int decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
BinkAudioContext *s = avctx->priv_data;
- int16_t *samples = data;
+ int16_t *samples;
GetBitContext *gb = &s->gb;
- int out_size, consumed = 0;
+ int ret, consumed = 0;
if (!get_bits_left(gb)) {
uint8_t *buf;
/* handle end-of-stream */
if (!avpkt->size) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return 0;
}
if (avpkt->size < 4) {
@@ -334,11 +338,13 @@ static int decode_frame(AVCodecContext *avctx,
skip_bits_long(gb, 32);
}
- out_size = s->block_size * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame.nb_samples = s->block_size / avctx->channels;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)s->frame.data[0];
if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT)) {
av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
@@ -346,7 +352,9 @@ static int decode_frame(AVCodecContext *avctx,
}
get_bits_align32(gb);
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return consumed;
}
@@ -358,7 +366,7 @@ AVCodec ff_binkaudio_rdft_decoder = {
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)")
};
@@ -370,6 +378,6 @@ AVCodec ff_binkaudio_dct_decoder = {
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)")
};
diff --git a/libavcodec/cook.c b/libavcodec/cook.c
index 8b0a351495..81a1aae9d1 100644
--- a/libavcodec/cook.c
+++ b/libavcodec/cook.c
@@ -122,6 +122,7 @@ typedef struct cook {
void (* saturate_output) (struct cook *q, int chan, float *out);
AVCodecContext* avctx;
+ AVFrame frame;
GetBitContext gb;
/* stream data */
int nb_channels;
@@ -131,6 +132,7 @@ typedef struct cook {
int samples_per_channel;
/* states */
AVLFG random_state;
+ int discarded_packets;
/* transform data */
FFTContext mdct_ctx;
@@ -896,7 +898,8 @@ mlt_compensate_output(COOKContext *q, float *decode_buffer,
float *out, int chan)
{
imlt_gain(q, decode_buffer, gains_ptr, previous_buffer);
- q->saturate_output (q, chan, out);
+ if (out)
+ q->saturate_output(q, chan, out);
}
@@ -953,24 +956,28 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
* @param avctx pointer to the AVCodecContext
*/
-static int cook_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt) {
+static int cook_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
COOKContext *q = avctx->priv_data;
- int i, out_size;
+ float *samples = NULL;
+ int i, ret;
int offset = 0;
int chidx = 0;
if (buf_size < avctx->block_align)
return buf_size;
- out_size = q->nb_channels * q->samples_per_channel *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ if (q->discarded_packets >= 2) {
+ q->frame.nb_samples = q->samples_per_channel;
+ if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples = (float *)q->frame.data[0];
}
/* estimate subpacket sizes */
@@ -990,15 +997,21 @@ static int cook_decode_frame(AVCodecContext *avctx,
q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv;
q->subpacket[i].ch_idx = chidx;
av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] size %i js %i %i block_align %i\n",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align);
- decode_subpacket(q, &q->subpacket[i], buf + offset, data);
+ decode_subpacket(q, &q->subpacket[i], buf + offset, samples);
offset += q->subpacket[i].size;
chidx += q->subpacket[i].num_channels;
av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] %i %i\n",i,q->subpacket[i].size * 8,get_bits_count(&q->gb));
}
- *data_size = out_size;
/* Discard the first two frames: no valid audio. */
- if (avctx->frame_number < 2) *data_size = 0;
+ if (q->discarded_packets < 2) {
+ q->discarded_packets++;
+ *got_frame_ptr = 0;
+ return avctx->block_align;
+ }
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = q->frame;
return avctx->block_align;
}
@@ -1246,6 +1259,9 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
else
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
+ avcodec_get_frame_defaults(&q->frame);
+ avctx->coded_frame = &q->frame;
+
#ifdef DEBUG
dump_cook_context(q);
#endif
@@ -1262,5 +1278,6 @@ AVCodec ff_cook_decoder =
.init = cook_decode_init,
.close = cook_decode_close,
.decode = cook_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("COOK"),
};
diff --git a/libavcodec/dca.c b/libavcodec/dca.c
index 21a245585d..e3f87b92eb 100644
--- a/libavcodec/dca.c
+++ b/libavcodec/dca.c
@@ -261,6 +261,7 @@ static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, int id
typedef struct {
AVCodecContext *avctx;
+ AVFrame frame;
/* Frame header */
int frame_type; ///< type of the current frame
int samples_deficit; ///< deficit sample count
@@ -1635,9 +1636,8 @@ static void dca_exss_parse_header(DCAContext *s)
* Main frame decoding function
* FIXME add arguments
*/
-static int dca_decode_frame(AVCodecContext * avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int dca_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -1645,9 +1645,8 @@ static int dca_decode_frame(AVCodecContext * avctx,
int lfe_samples;
int num_core_channels = 0;
int i, ret;
- float *samples_flt = data;
- int16_t *samples_s16 = data;
- int out_size;
+ float *samples_flt;
+ int16_t *samples_s16;
DCAContext *s = avctx->priv_data;
int channels;
int core_ss_end;
@@ -1839,11 +1838,14 @@ static int dca_decode_frame(AVCodecContext * avctx,
return AVERROR_PATCHWELCOME;
}
- out_size = 256 / 8 * s->sample_blocks * channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size)
- return AVERROR(EINVAL);
- *data_size = out_size;
+ /* get output buffer */
+ s->frame.nb_samples = 256 * (s->sample_blocks / 8);
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples_flt = (float *)s->frame.data[0];
+ samples_s16 = (int16_t *)s->frame.data[0];
/* filter to get final output */
for (i = 0; i < (s->sample_blocks / 8); i++) {
@@ -1877,6 +1879,9 @@ static int dca_decode_frame(AVCodecContext * avctx,
s->lfe_data[i] = s->lfe_data[i + lfe_samples];
}
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return buf_size;
}
@@ -1919,6 +1924,9 @@ static av_cold int dca_decode_init(AVCodecContext * avctx)
avctx->channels = avctx->request_channels;
}
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -1947,7 +1955,7 @@ AVCodec ff_dca_decoder = {
.decode = dca_decode_frame,
.close = dca_decode_end,
.long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
- .capabilities = CODEC_CAP_CHANNEL_CONF,
+ .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
},
diff --git a/libavcodec/dpcm.c b/libavcodec/dpcm.c
index abb2019306..935f67caca 100644
--- a/libavcodec/dpcm.c
+++ b/libavcodec/dpcm.c
@@ -42,6 +42,7 @@
#include "bytestream.h"
typedef struct DPCMContext {
+ AVFrame frame;
int channels;
int16_t roq_square_array[256];
int sample[2]; ///< previous sample (for SOL_DPCM)
@@ -162,22 +163,25 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
else
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
-static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
const uint8_t *buf_end = buf + buf_size;
DPCMContext *s = avctx->priv_data;
- int out = 0;
+ int out = 0, ret;
int predictor[2];
int ch = 0;
int stereo = s->channels - 1;
- int16_t *output_samples = data;
+ int16_t *output_samples;
/* calculate output size */
switch(avctx->codec->id) {
@@ -197,15 +201,18 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
out = buf_size;
break;
}
- out *= av_get_bytes_per_sample(avctx->sample_fmt);
if (out <= 0) {
av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
return AVERROR(EINVAL);
}
- if (*data_size < out) {
- av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
+
+ /* get output buffer */
+ s->frame.nb_samples = out / s->channels;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ output_samples = (int16_t *)s->frame.data[0];
switch(avctx->codec->id) {
@@ -307,7 +314,9 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
break;
}
- *data_size = out;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return buf_size;
}
@@ -319,6 +328,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(DPCMContext), \
.init = dpcm_decode_init, \
.decode = dpcm_decode_frame, \
+ .capabilities = CODEC_CAP_DR1, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
}
diff --git a/libavcodec/dsicinav.c b/libavcodec/dsicinav.c
index cbf7c4a6f8..37d39f5405 100644
--- a/libavcodec/dsicinav.c
+++ b/libavcodec/dsicinav.c
@@ -44,6 +44,7 @@ typedef struct CinVideoContext {
} CinVideoContext;
typedef struct CinAudioContext {
+ AVFrame frame;
int initial_decode_frame;
int delta;
} CinAudioContext;
@@ -317,25 +318,28 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
cin->delta = 0;
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ avcodec_get_frame_defaults(&cin->frame);
+ avctx->coded_frame = &cin->frame;
+
return 0;
}
-static int cinaudio_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
CinAudioContext *cin = avctx->priv_data;
const uint8_t *buf_end = buf + avpkt->size;
- int16_t *samples = data;
- int delta, out_size;
-
- out_size = (avpkt->size - cin->initial_decode_frame) *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ int16_t *samples;
+ int delta, ret;
+
+ /* get output buffer */
+ cin->frame.nb_samples = avpkt->size - cin->initial_decode_frame;
+ if ((ret = avctx->get_buffer(avctx, &cin->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)cin->frame.data[0];
delta = cin->delta;
if (cin->initial_decode_frame) {
@@ -351,7 +355,8 @@ static int cinaudio_decode_frame(AVCodecContext *avctx,
}
cin->delta = delta;
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = cin->frame;
return avpkt->size;
}
@@ -376,5 +381,6 @@ AVCodec ff_dsicinaudio_decoder = {
.priv_data_size = sizeof(CinAudioContext),
.init = cinaudio_decode_init,
.decode = cinaudio_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"),
};
diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c
index 95cf2bccb4..58eb66def9 100644
--- a/libavcodec/flacdec.c
+++ b/libavcodec/flacdec.c
@@ -49,6 +49,7 @@ typedef struct FLACContext {
FLACSTREAMINFO
AVCodecContext *avctx; ///< parent AVCodecContext
+ AVFrame frame;
GetBitContext gb; ///< GetBitContext initialized to start at the current frame
int blocksize; ///< number of samples in the current frame
@@ -116,6 +117,9 @@ static av_cold int flac_decode_init(AVCodecContext *avctx)
allocate_buffers(s);
s->got_streaminfo = 1;
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -542,20 +546,18 @@ static int decode_frame(FLACContext *s)
return 0;
}
-static int flac_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int flac_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
FLACContext *s = avctx->priv_data;
int i, j = 0, bytes_read = 0;
- int16_t *samples_16 = data;
- int32_t *samples_32 = data;
- int alloc_data_size= *data_size;
- int output_size;
+ int16_t *samples_16;
+ int32_t *samples_32;
+ int ret;
- *data_size=0;
+ *got_frame_ptr = 0;
if (s->max_framesize == 0) {
s->max_framesize =
@@ -586,15 +588,14 @@ static int flac_decode_frame(AVCodecContext *avctx,
}
bytes_read = (get_bits_count(&s->gb)+7)/8;
- /* check if allocated data size is large enough for output */
- output_size = s->blocksize * s->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (output_size > alloc_data_size) {
- av_log(s->avctx, AV_LOG_ERROR, "output data size is larger than "
- "allocated data size\n");
- return -1;
+ /* get output buffer */
+ s->frame.nb_samples = s->blocksize;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
- *data_size = output_size;
+ samples_16 = (int16_t *)s->frame.data[0];
+ samples_32 = (int32_t *)s->frame.data[0];
#define DECORRELATE(left, right)\
assert(s->channels == 2);\
@@ -639,6 +640,9 @@ static int flac_decode_frame(AVCodecContext *avctx,
buf_size - bytes_read, buf_size);
}
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return bytes_read;
}
@@ -662,5 +666,6 @@ AVCodec ff_flac_decoder = {
.init = flac_decode_init,
.close = flac_decode_close,
.decode = flac_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
};
diff --git a/libavcodec/g722.h b/libavcodec/g722.h
index 5edb6c8119..69e7a86e25 100644
--- a/libavcodec/g722.h
+++ b/libavcodec/g722.h
@@ -26,10 +26,12 @@
#define AVCODEC_G722_H
#include <stdint.h>
+#include "avcodec.h"
#define PREV_SAMPLES_BUF_SIZE 1024
typedef struct {
+ AVFrame frame;
int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples
int prev_samples_pos; ///< the number of values in prev_samples
diff --git a/libavcodec/g722dec.c b/libavcodec/g722dec.c
index 2be47159a4..652a1aa4ae 100644
--- a/libavcodec/g722dec.c
+++ b/libavcodec/g722dec.c
@@ -66,6 +66,9 @@ static av_cold int g722_decode_init(AVCodecContext * avctx)
c->band[1].scale_factor = 2;
c->prev_samples_pos = 22;
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
@@ -81,20 +84,22 @@ static const int16_t *low_inv_quants[3] = { ff_g722_low_inv_quant6,
ff_g722_low_inv_quant4 };
static int g722_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
G722Context *c = avctx->priv_data;
- int16_t *out_buf = data;
- int j, out_len;
+ int16_t *out_buf;
+ int j, ret;
const int skip = 8 - avctx->bits_per_coded_sample;
const int16_t *quantizer_table = low_inv_quants[skip];
GetBitContext gb;
- out_len = avpkt->size * 2 * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_len) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ c->frame.nb_samples = avpkt->size * 2;
+ if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ out_buf = (int16_t *)c->frame.data[0];
init_get_bits(&gb, avpkt->data, avpkt->size * 8);
@@ -128,7 +133,10 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
c->prev_samples_pos = 22;
}
}
- *data_size = out_len;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
+
return avpkt->size;
}
@@ -139,5 +147,6 @@ AVCodec ff_adpcm_g722_decoder = {
.priv_data_size = sizeof(G722Context),
.init = g722_decode_init,
.decode = g722_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
};
diff --git a/libavcodec/g726.c b/libavcodec/g726.c
index 37b0adf3b4..85711f854c 100644
--- a/libavcodec/g726.c
+++ b/libavcodec/g726.c
@@ -75,6 +75,7 @@ typedef struct G726Tables {
typedef struct G726Context {
AVClass *class;
+ AVFrame frame;
G726Tables tbls; /**< static tables needed for computation */
Float11 sr[2]; /**< prev. reconstructed samples */
@@ -427,26 +428,31 @@ static av_cold int g726_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
-static int g726_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int g726_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
G726Context *c = avctx->priv_data;
- int16_t *samples = data;
+ int16_t *samples;
GetBitContext gb;
- int out_samples, out_size;
+ int out_samples, ret;
out_samples = buf_size * 8 / c->code_size;
- out_size = out_samples * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+
+ /* get output buffer */
+ c->frame.nb_samples = out_samples;
+ if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)c->frame.data[0];
init_get_bits(&gb, buf, buf_size * 8);
@@ -456,7 +462,9 @@ static int g726_decode_frame(AVCodecContext *avctx,
if (get_bits_left(&gb) > 0)
av_log(avctx, AV_LOG_ERROR, "Frame invalidly split, missing parser?\n");
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
+
return buf_size;
}
@@ -474,6 +482,7 @@ AVCodec ff_adpcm_g726_decoder = {
.init = g726_decode_init,
.decode = g726_decode_frame,
.flush = g726_decode_flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"),
};
#endif
diff --git a/libavcodec/gsmdec.c b/libavcodec/gsmdec.c
index 1091745f4b..97b6fe8492 100644
--- a/libavcodec/gsmdec.c
+++ b/libavcodec/gsmdec.c
@@ -32,6 +32,8 @@
static av_cold int gsm_init(AVCodecContext *avctx)
{
+ GSMContext *s = avctx->priv_data;
+
avctx->channels = 1;
if (!avctx->sample_rate)
avctx->sample_rate = 8000;
@@ -47,30 +49,35 @@ static av_cold int gsm_init(AVCodecContext *avctx)
avctx->block_align = GSM_MS_BLOCK_SIZE;
}
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
static int gsm_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
+ GSMContext *s = avctx->priv_data;
int res;
GetBitContext gb;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- int16_t *samples = data;
- int frame_bytes = avctx->frame_size *
- av_get_bytes_per_sample(avctx->sample_fmt);
-
- if (*data_size < frame_bytes) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
- }
+ int16_t *samples;
if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA;
}
+ /* get output buffer */
+ s->frame.nb_samples = avctx->frame_size;
+ if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return res;
+ }
+ samples = (int16_t *)s->frame.data[0];
+
switch (avctx->codec_id) {
case CODEC_ID_GSM:
init_get_bits(&gb, buf, buf_size * 8);
@@ -85,7 +92,10 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data,
if (res < 0)
return res;
}
- *data_size = frame_bytes;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return avctx->block_align;
}
@@ -103,6 +113,7 @@ AVCodec ff_gsm_decoder = {
.init = gsm_init,
.decode = gsm_decode_frame,
.flush = gsm_flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("GSM"),
};
@@ -114,5 +125,6 @@ AVCodec ff_gsm_ms_decoder = {
.init = gsm_init,
.decode = gsm_decode_frame,
.flush = gsm_flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"),
};
diff --git a/libavcodec/gsmdec_data.h b/libavcodec/gsmdec_data.h
index b78daa7335..21789f725b 100644
--- a/libavcodec/gsmdec_data.h
+++ b/libavcodec/gsmdec_data.h
@@ -23,6 +23,7 @@
#define AVCODEC_GSMDEC_DATA
#include <stdint.h>
+#include "avcodec.h"
// input and output sizes in byte
#define GSM_BLOCK_SIZE 33
@@ -30,6 +31,7 @@
#define GSM_FRAME_SIZE 160
typedef struct {
+ AVFrame frame;
// Contains first 120 elements from the previous frame
// (used by long_term_synth according to the "lag"),
// then in the following 160 elements the current
diff --git a/libavcodec/imc.c b/libavcodec/imc.c
index 1f1db6c121..b55eee9b70 100644
--- a/libavcodec/imc.c
+++ b/libavcodec/imc.c
@@ -51,6 +51,8 @@
#define COEFFS 256
typedef struct {
+ AVFrame frame;
+
float old_floor[BANDS];
float flcoeffs1[BANDS];
float flcoeffs2[BANDS];
@@ -168,6 +170,10 @@ static av_cold int imc_decode_init(AVCodecContext * avctx)
dsputil_init(&q->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avctx->channel_layout = AV_CH_LAYOUT_MONO;
+
+ avcodec_get_frame_defaults(&q->frame);
+ avctx->coded_frame = &q->frame;
+
return 0;
}
@@ -649,9 +655,8 @@ static int imc_get_coeffs (IMCContext* q) {
return 0;
}
-static int imc_decode_frame(AVCodecContext * avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int imc_decode_frame(AVCodecContext * avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -659,7 +664,7 @@ static int imc_decode_frame(AVCodecContext * avctx,
IMCContext *q = avctx->priv_data;
int stream_format_code;
- int imc_hdr, i, j, out_size, ret;
+ int imc_hdr, i, j, ret;
int flag;
int bits, summer;
int counter, bitscount;
@@ -670,15 +675,16 @@ static int imc_decode_frame(AVCodecContext * avctx,
return AVERROR_INVALIDDATA;
}
- out_size = COEFFS * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ q->frame.nb_samples = COEFFS;
+ if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ q->out_samples = (float *)q->frame.data[0];
q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2);
- q->out_samples = data;
init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8);
/* Check the frame header */
@@ -823,7 +829,8 @@ static int imc_decode_frame(AVCodecContext * avctx,
imc_imdct256(q);
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = q->frame;
return IMC_BLOCK_SIZE;
}
@@ -834,6 +841,7 @@ static av_cold int imc_decode_close(AVCodecContext * avctx)
IMCContext *q = avctx->priv_data;
ff_fft_end(&q->fft);
+
return 0;
}
@@ -846,5 +854,6 @@ AVCodec ff_imc_decoder = {
.init = imc_decode_init,
.close = imc_decode_close,
.decode = imc_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"),
};
diff --git a/libavcodec/internal.h b/libavcodec/internal.h
index 18e851c48e..fb011c7a3a 100644
--- a/libavcodec/internal.h
+++ b/libavcodec/internal.h
@@ -31,12 +31,15 @@
typedef struct InternalBuffer {
int last_pic_num;
- uint8_t *base[4];
- uint8_t *data[4];
- int linesize[4];
+ uint8_t *base[AV_NUM_DATA_POINTERS];
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+ int linesize[AV_NUM_DATA_POINTERS];
int width;
int height;
enum PixelFormat pix_fmt;
+ uint8_t **extended_data;
+ int audio_data_size;
+ int nb_channels;
} InternalBuffer;
typedef struct AVCodecInternal {
diff --git a/libavcodec/libgsm.c b/libavcodec/libgsm.c
index c02594d0d6..22629c657c 100644
--- a/libavcodec/libgsm.c
+++ b/libavcodec/libgsm.c
@@ -124,7 +124,14 @@ AVCodec ff_libgsm_ms_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
};
+typedef struct LibGSMDecodeContext {
+ AVFrame frame;
+ struct gsm_state *state;
+} LibGSMDecodeContext;
+
static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
+ LibGSMDecodeContext *s = avctx->priv_data;
+
if (avctx->channels > 1) {
av_log(avctx, AV_LOG_ERROR, "Mono required for GSM, got %d channels\n",
avctx->channels);
@@ -139,7 +146,7 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
- avctx->priv_data = gsm_create();
+ s->state = gsm_create();
switch(avctx->codec_id) {
case CODEC_ID_GSM:
@@ -154,59 +161,72 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
}
}
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
static av_cold int libgsm_decode_close(AVCodecContext *avctx) {
- gsm_destroy(avctx->priv_data);
- avctx->priv_data = NULL;
+ LibGSMDecodeContext *s = avctx->priv_data;
+
+ gsm_destroy(s->state);
+ s->state = NULL;
return 0;
}
-static int libgsm_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt) {
+static int libgsm_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
int i, ret;
- struct gsm_state *s = avctx->priv_data;
+ LibGSMDecodeContext *s = avctx->priv_data;
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- int16_t *samples = data;
- int out_size = avctx->frame_size * av_get_bytes_per_sample(avctx->sample_fmt);
-
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
- }
+ int16_t *samples;
if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA;
}
+ /* get output buffer */
+ s->frame.nb_samples = avctx->frame_size;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples = (int16_t *)s->frame.data[0];
+
for (i = 0; i < avctx->frame_size / GSM_FRAME_SIZE; i++) {
- if ((ret = gsm_decode(s, buf, samples)) < 0)
+ if ((ret = gsm_decode(s->state, buf, samples)) < 0)
return -1;
buf += GSM_BLOCK_SIZE;
samples += GSM_FRAME_SIZE;
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return avctx->block_align;
}
static void libgsm_flush(AVCodecContext *avctx) {
- gsm_destroy(avctx->priv_data);
- avctx->priv_data = gsm_create();
+ LibGSMDecodeContext *s = avctx->priv_data;
+
+ gsm_destroy(s->state);
+ s->state = gsm_create();
}
AVCodec ff_libgsm_decoder = {
.name = "libgsm",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_GSM,
+ .priv_data_size = sizeof(LibGSMDecodeContext),
.init = libgsm_decode_init,
.close = libgsm_decode_close,
.decode = libgsm_decode_frame,
.flush = libgsm_flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"),
};
@@ -214,9 +234,11 @@ AVCodec ff_libgsm_ms_decoder = {
.name = "libgsm_ms",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_GSM_MS,
+ .priv_data_size = sizeof(LibGSMDecodeContext),
.init = libgsm_decode_init,
.close = libgsm_decode_close,
.decode = libgsm_decode_frame,
.flush = libgsm_flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
};
diff --git a/libavcodec/libopencore-amr.c b/libavcodec/libopencore-amr.c
index a705975aa9..ded92179d3 100644
--- a/libavcodec/libopencore-amr.c
+++ b/libavcodec/libopencore-amr.c
@@ -79,6 +79,7 @@ static int get_bitrate_mode(int bitrate, void *log_ctx)
typedef struct AMRContext {
AVClass *av_class;
+ AVFrame frame;
void *dec_state;
void *enc_state;
int enc_bitrate;
@@ -112,6 +113,9 @@ static av_cold int amr_nb_decode_init(AVCodecContext *avctx)
return AVERROR(ENOSYS);
}
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -120,26 +124,28 @@ static av_cold int amr_nb_decode_close(AVCodecContext *avctx)
AMRContext *s = avctx->priv_data;
Decoder_Interface_exit(s->dec_state);
+
return 0;
}
static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AMRContext *s = avctx->priv_data;
static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 };
enum Mode dec_mode;
- int packet_size, out_size;
+ int packet_size, ret;
av_dlog(avctx, "amr_decode_frame buf=%p buf_size=%d frame_count=%d!!\n",
buf, buf_size, avctx->frame_number);
- out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame.nb_samples = 160;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
dec_mode = (buf[0] >> 3) & 0x000F;
@@ -154,8 +160,10 @@ static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
av_dlog(avctx, "packet_size=%d buf= 0x%X %X %X %X\n",
packet_size, buf[0], buf[1], buf[2], buf[3]);
/* call decoder */
- Decoder_Interface_Decode(s->dec_state, buf, data, 0);
- *data_size = out_size;
+ Decoder_Interface_Decode(s->dec_state, buf, (short *)s->frame.data[0], 0);
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
return packet_size;
}
@@ -168,6 +176,7 @@ AVCodec ff_libopencore_amrnb_decoder = {
.init = amr_nb_decode_init,
.close = amr_nb_decode_close,
.decode = amr_nb_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"),
};
@@ -251,6 +260,7 @@ AVCodec ff_libopencore_amrnb_encoder = {
#include <opencore-amrwb/if_rom.h>
typedef struct AMRWBContext {
+ AVFrame frame;
void *state;
} AMRWBContext;
@@ -267,23 +277,27 @@ static av_cold int amr_wb_decode_init(AVCodecContext *avctx)
return AVERROR(ENOSYS);
}
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AMRWBContext *s = avctx->priv_data;
- int mode;
- int packet_size, out_size;
+ int mode, ret;
+ int packet_size;
static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
- out_size = 320 * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame.nb_samples = 320;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
mode = (buf[0] >> 3) & 0x000F;
@@ -295,8 +309,11 @@ static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA;
}
- D_IF_decode(s->state, buf, data, _good_frame);
- *data_size = out_size;
+ D_IF_decode(s->state, buf, (short *)s->frame.data[0], _good_frame);
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return packet_size;
}
@@ -316,6 +333,7 @@ AVCodec ff_libopencore_amrwb_decoder = {
.init = amr_wb_decode_init,
.close = amr_wb_decode_close,
.decode = amr_wb_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Wide-Band"),
};
diff --git a/libavcodec/libspeexdec.c b/libavcodec/libspeexdec.c
index 8bbae6c4f3..eba2f16949 100644
--- a/libavcodec/libspeexdec.c
+++ b/libavcodec/libspeexdec.c
@@ -25,6 +25,7 @@
#include "avcodec.h"
typedef struct {
+ AVFrame frame;
SpeexBits bits;
SpeexStereoState stereo;
void *dec_state;
@@ -89,26 +90,29 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
s->stereo = (SpeexStereoState)SPEEX_STEREO_STATE_INIT;
speex_decoder_ctl(s->dec_state, SPEEX_SET_HANDLER, &callback);
}
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
-static int libspeex_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int libspeex_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LibSpeexContext *s = avctx->priv_data;
- int16_t *output = data;
- int out_size, ret, consumed = 0;
-
- /* check output buffer size */
- out_size = s->frame_size * avctx->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ int16_t *output;
+ int ret, consumed = 0;
+
+ /* get output buffer */
+ s->frame.nb_samples = s->frame_size;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ output = (int16_t *)s->frame.data[0];
/* if there is not enough data left for the smallest possible frame,
reset the libspeex buffer using the current packet, otherwise ignore
@@ -116,7 +120,7 @@ static int libspeex_decode_frame(AVCodecContext *avctx,
if (speex_bits_remaining(&s->bits) < 43) {
/* check for flush packet */
if (!buf || !buf_size) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
/* set new buffer */
@@ -133,7 +137,9 @@ static int libspeex_decode_frame(AVCodecContext *avctx,
if (avctx->channels == 2)
speex_decode_stereo_int(output, s->frame_size, &s->stereo);
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return consumed;
}
@@ -163,6 +169,6 @@ AVCodec ff_libspeex_decoder = {
.close = libspeex_decode_close,
.decode = libspeex_decode_frame,
.flush = libspeex_decode_flush,
- .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"),
};
diff --git a/libavcodec/mace.c b/libavcodec/mace.c
index a55a041696..792d71d072 100644
--- a/libavcodec/mace.c
+++ b/libavcodec/mace.c
@@ -153,6 +153,7 @@ typedef struct ChannelData {
} ChannelData;
typedef struct MACEContext {
+ AVFrame frame;
ChannelData chd[2];
} MACEContext;
@@ -228,30 +229,35 @@ static void chomp6(ChannelData *chd, int16_t *output, uint8_t val,
static av_cold int mace_decode_init(AVCodecContext * avctx)
{
+ MACEContext *ctx = avctx->priv_data;
+
if (avctx->channels > 2)
return -1;
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+ avcodec_get_frame_defaults(&ctx->frame);
+ avctx->coded_frame = &ctx->frame;
+
return 0;
}
-static int mace_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int mace_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- int16_t *samples = data;
+ int16_t *samples;
MACEContext *ctx = avctx->priv_data;
- int i, j, k, l;
- int out_size;
+ int i, j, k, l, ret;
int is_mace3 = (avctx->codec_id == CODEC_ID_MACE3);
- out_size = 3 * (buf_size << (1 - is_mace3)) *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ ctx->frame.nb_samples = 3 * (buf_size << (1 - is_mace3)) / avctx->channels;
+ if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)ctx->frame.data[0];
for(i = 0; i < avctx->channels; i++) {
int16_t *output = samples + i;
@@ -277,7 +283,8 @@ static int mace_decode_frame(AVCodecContext *avctx,
}
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = ctx->frame;
return buf_size;
}
@@ -289,6 +296,7 @@ AVCodec ff_mace3_decoder = {
.priv_data_size = sizeof(MACEContext),
.init = mace_decode_init,
.decode = mace_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"),
};
@@ -299,6 +307,7 @@ AVCodec ff_mace6_decoder = {
.priv_data_size = sizeof(MACEContext),
.init = mace_decode_init,
.decode = mace_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"),
};
diff --git a/libavcodec/mlpdec.c b/libavcodec/mlpdec.c
index cefd0b5614..4dc2d9f3eb 100644
--- a/libavcodec/mlpdec.c
+++ b/libavcodec/mlpdec.c
@@ -120,6 +120,7 @@ typedef struct SubStream {
typedef struct MLPDecodeContext {
AVCodecContext *avctx;
+ AVFrame frame;
//! Current access unit being read has a major sync.
int is_major_sync_unit;
@@ -239,6 +240,9 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx)
m->substream[substr].lossless_check_data = 0xffffffff;
dsputil_init(&m->dsp, avctx);
+ avcodec_get_frame_defaults(&m->frame);
+ avctx->coded_frame = &m->frame;
+
return 0;
}
@@ -905,13 +909,14 @@ static void rematrix_channels(MLPDecodeContext *m, unsigned int substr)
/** Write the audio data into the output buffer. */
static int output_data(MLPDecodeContext *m, unsigned int substr,
- uint8_t *data, unsigned int *data_size)
+ void *data, int *got_frame_ptr)
{
+ AVCodecContext *avctx = m->avctx;
SubStream *s = &m->substream[substr];
unsigned int i, out_ch = 0;
- int out_size;
- int32_t *data_32 = (int32_t*) data;
- int16_t *data_16 = (int16_t*) data;
+ int32_t *data_32;
+ int16_t *data_16;
+ int ret;
int is32 = (m->avctx->sample_fmt == AV_SAMPLE_FMT_S32);
if (m->avctx->channels != s->max_matrix_channel + 1) {
@@ -919,11 +924,14 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
return AVERROR_INVALIDDATA;
}
- out_size = s->blockpos * m->avctx->channels *
- av_get_bytes_per_sample(m->avctx->sample_fmt);
-
- if (*data_size < out_size)
- return AVERROR(EINVAL);
+ /* get output buffer */
+ m->frame.nb_samples = s->blockpos;
+ if ((ret = avctx->get_buffer(avctx, &m->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ data_32 = (int32_t *)m->frame.data[0];
+ data_16 = (int16_t *)m->frame.data[0];
for (i = 0; i < s->blockpos; i++) {
for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) {
@@ -936,7 +944,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
}
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = m->frame;
return 0;
}
@@ -945,8 +954,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
* @return negative on error, 0 if not enough data is present in the input stream,
* otherwise the number of bytes consumed. */
-static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
- AVPacket *avpkt)
+static int read_access_unit(AVCodecContext *avctx, void* data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -982,7 +991,7 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
if (!m->params_valid) {
av_log(m->avctx, AV_LOG_WARNING,
"Stream parameters not seen; skipping frame.\n");
- *data_size = 0;
+ *got_frame_ptr = 0;
return length;
}
@@ -1127,7 +1136,7 @@ next_substr:
rematrix_channels(m, m->max_decoded_substream);
- if ((ret = output_data(m, m->max_decoded_substream, data, data_size)) < 0)
+ if ((ret = output_data(m, m->max_decoded_substream, data, got_frame_ptr)) < 0)
return ret;
return length;
@@ -1148,6 +1157,7 @@ AVCodec ff_mlp_decoder = {
.priv_data_size = sizeof(MLPDecodeContext),
.init = mlp_decode_init,
.decode = read_access_unit,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"),
};
@@ -1159,6 +1169,7 @@ AVCodec ff_truehd_decoder = {
.priv_data_size = sizeof(MLPDecodeContext),
.init = mlp_decode_init,
.decode = read_access_unit,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("TrueHD"),
};
#endif /* CONFIG_TRUEHD_DECODER */
diff --git a/libavcodec/mpc.h b/libavcodec/mpc.h
index 6d0f7b45bb..1a6e7943af 100644
--- a/libavcodec/mpc.h
+++ b/libavcodec/mpc.h
@@ -50,6 +50,7 @@ typedef struct {
}Band;
typedef struct {
+ AVFrame frame;
DSPContext dsp;
MPADSPContext mpadsp;
GetBitContext gb;
diff --git a/libavcodec/mpc7.c b/libavcodec/mpc7.c
index 576400d720..290ecfb385 100644
--- a/libavcodec/mpc7.c
+++ b/libavcodec/mpc7.c
@@ -136,6 +136,10 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx)
}
}
vlc_initialized = 1;
+
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
@@ -192,9 +196,8 @@ static int get_scale_idx(GetBitContext *gb, int ref)
return ref + t;
}
-static int mpc7_decode_frame(AVCodecContext * avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -204,7 +207,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
int i, ch;
int mb = -1;
Band *bands = c->bands;
- int off, out_size;
+ int off, ret;
int bits_used, bits_avail;
memset(bands, 0, sizeof(*bands) * (c->maxbands + 1));
@@ -213,10 +216,11 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
return AVERROR(EINVAL);
}
- out_size = (buf[1] ? c->lastframelen : MPC_FRAME_SIZE) * 4;
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ c->frame.nb_samples = buf[1] ? c->lastframelen : MPC_FRAME_SIZE;
+ if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE);
@@ -276,7 +280,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
for(ch = 0; ch < 2; ch++)
idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off);
- ff_mpc_dequantize_and_synth(c, mb, data, 2);
+ ff_mpc_dequantize_and_synth(c, mb, c->frame.data[0], 2);
av_free(bits);
@@ -288,10 +292,12 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
}
if(c->frames_to_skip){
c->frames_to_skip--;
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
- *data_size = out_size;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
return buf_size;
}
@@ -312,5 +318,6 @@ AVCodec ff_mpc7_decoder = {
.init = mpc7_decode_init,
.decode = mpc7_decode_frame,
.flush = mpc7_decode_flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"),
};
diff --git a/libavcodec/mpc8.c b/libavcodec/mpc8.c
index b38664215b..b97f3ed62c 100644
--- a/libavcodec/mpc8.c
+++ b/libavcodec/mpc8.c
@@ -228,12 +228,15 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
&mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
}
vlc_initialized = 1;
+
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
-static int mpc8_decode_frame(AVCodecContext * avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -241,14 +244,15 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
GetBitContext gb2, *gb = &gb2;
int i, j, k, ch, cnt, res, t;
Band *bands = c->bands;
- int off, out_size;
+ int off;
int maxband, keyframe;
int last[2];
- out_size = MPC_FRAME_SIZE * 2 * avctx->channels;
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ c->frame.nb_samples = MPC_FRAME_SIZE;
+ if ((res = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return res;
}
keyframe = c->cur_frame == 0;
@@ -401,14 +405,16 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
}
}
- ff_mpc_dequantize_and_synth(c, maxband, data, avctx->channels);
+ ff_mpc_dequantize_and_synth(c, maxband, c->frame.data[0], avctx->channels);
c->cur_frame++;
c->last_bits_used = get_bits_count(gb);
if(c->cur_frame >= c->frames)
c->cur_frame = 0;
- *data_size = out_size;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
return c->cur_frame ? c->last_bits_used >> 3 : buf_size;
}
@@ -420,5 +426,6 @@ AVCodec ff_mpc8_decoder = {
.priv_data_size = sizeof(MPCContext),
.init = mpc8_decode_init,
.decode = mpc8_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"),
};
diff --git a/libavcodec/mpegaudiodec.c b/libavcodec/mpegaudiodec.c
index ffd369021c..c819bc546f 100644
--- a/libavcodec/mpegaudiodec.c
+++ b/libavcodec/mpegaudiodec.c
@@ -79,6 +79,7 @@ typedef struct MPADecodeContext {
int err_recognition;
AVCodecContext* avctx;
MPADSPContext mpadsp;
+ AVFrame frame;
} MPADecodeContext;
#if CONFIG_FLOAT
@@ -474,6 +475,10 @@ static av_cold int decode_init(AVCodecContext * avctx)
if (avctx->codec_id == CODEC_ID_MP3ADU)
s->adu_mode = 1;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -1695,7 +1700,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
const uint8_t *buf, int buf_size)
{
- int i, nb_frames, ch;
+ int i, nb_frames, ch, ret;
OUT_INT *samples_ptr;
init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
@@ -1743,8 +1748,16 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
assert(i <= buf_size - HEADER_SIZE && i >= 0);
memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
s->last_buf_size += i;
+ }
- break;
+ /* get output buffer */
+ if (!samples) {
+ s->frame.nb_samples = s->avctx->frame_size;
+ if ((ret = s->avctx->get_buffer(s->avctx, &s->frame)) < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples = (OUT_INT *)s->frame.data[0];
}
/* apply the synthesis filter */
@@ -1764,7 +1777,7 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
}
-static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
+static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
@@ -1772,7 +1785,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
MPADecodeContext *s = avctx->priv_data;
uint32_t header;
int out_size;
- OUT_INT *out_samples = data;
if (buf_size < HEADER_SIZE)
return AVERROR_INVALIDDATA;
@@ -1795,10 +1807,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
avctx->bit_rate = s->bit_rate;
avctx->sub_id = s->layer;
- if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
- return AVERROR(EINVAL);
- *data_size = 0;
-
if (s->frame_size <= 0 || s->frame_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
return AVERROR_INVALIDDATA;
@@ -1807,9 +1815,10 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
buf_size= s->frame_size;
}
- out_size = mp_decode_frame(s, out_samples, buf, buf_size);
+ out_size = mp_decode_frame(s, NULL, buf, buf_size);
if (out_size >= 0) {
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
avctx->sample_rate = s->sample_rate;
//FIXME maybe move the other codec info stuff from above here too
} else {
@@ -1818,6 +1827,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
If there is more data in the packet, just consume the bad frame
instead of returning an error, which would discard the whole
packet. */
+ *got_frame_ptr = 0;
if (buf_size == avpkt->size)
return out_size;
}
@@ -1833,15 +1843,14 @@ static void flush(AVCodecContext *avctx)
}
#if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
-static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int decode_frame_adu(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MPADecodeContext *s = avctx->priv_data;
uint32_t header;
int len, out_size;
- OUT_INT *out_samples = data;
len = buf_size;
@@ -1871,9 +1880,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
avctx->bit_rate = s->bit_rate;
avctx->sub_id = s->layer;
- if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
- return AVERROR(EINVAL);
-
s->frame_size = len;
#if FF_API_PARSE_FRAME
@@ -1881,9 +1887,11 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
out_size = buf_size;
else
#endif
- out_size = mp_decode_frame(s, out_samples, buf, buf_size);
+ out_size = mp_decode_frame(s, NULL, buf, buf_size);
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
- *data_size = out_size;
return buf_size;
}
#endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
@@ -1894,6 +1902,7 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
* Context for MP3On4 decoder
*/
typedef struct MP3On4DecodeContext {
+ AVFrame *frame;
int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
int syncword; ///< syncword patch
const uint8_t *coff; ///< channel offsets in output buffer
@@ -1984,6 +1993,7 @@ static int decode_init_mp3on4(AVCodecContext * avctx)
// Put decoder context in place to make init_decode() happy
avctx->priv_data = s->mp3decctx[0];
decode_init(avctx);
+ s->frame = avctx->coded_frame;
// Restore mp3on4 context pointer
avctx->priv_data = s;
s->mp3decctx[0]->adu_mode = 1; // Set adu mode
@@ -2028,9 +2038,8 @@ static void flush_mp3on4(AVCodecContext *avctx)
}
-static int decode_frame_mp3on4(AVCodecContext * avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -2038,14 +2047,17 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
MPADecodeContext *m;
int fsize, len = buf_size, out_size = 0;
uint32_t header;
- OUT_INT *out_samples = data;
+ OUT_INT *out_samples;
OUT_INT *outptr, *bp;
- int fr, j, n, ch;
+ int fr, j, n, ch, ret;
- if (*data_size < MPA_FRAME_SIZE * avctx->channels * sizeof(OUT_INT)) {
- av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame->nb_samples = MPA_FRAME_SIZE;
+ if ((ret = avctx->get_buffer(avctx, s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ out_samples = (OUT_INT *)s->frame->data[0];
// Discard too short frames
if (buf_size < HEADER_SIZE)
@@ -2104,7 +2116,10 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
/* update codec info */
avctx->sample_rate = s->mp3decctx[0]->sample_rate;
- *data_size = out_size;
+ s->frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = *s->frame;
+
return buf_size;
}
#endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */
@@ -2119,7 +2134,9 @@ AVCodec ff_mp1_decoder = {
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
@@ -2134,7 +2151,9 @@ AVCodec ff_mp2_decoder = {
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
@@ -2149,7 +2168,9 @@ AVCodec ff_mp3_decoder = {
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
@@ -2164,7 +2185,9 @@ AVCodec ff_mp3adu_decoder = {
.init = decode_init,
.decode = decode_frame_adu,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
@@ -2179,6 +2202,7 @@ AVCodec ff_mp3on4_decoder = {
.init = decode_init_mp3on4,
.close = decode_close_mp3on4,
.decode = decode_frame_mp3on4,
+ .capabilities = CODEC_CAP_DR1,
.flush = flush_mp3on4,
.long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"),
};
diff --git a/libavcodec/mpegaudiodec_float.c b/libavcodec/mpegaudiodec_float.c
index 9300de29b9..02c83afb4c 100644
--- a/libavcodec/mpegaudiodec_float.c
+++ b/libavcodec/mpegaudiodec_float.c
@@ -31,7 +31,9 @@ AVCodec ff_mp1float_decoder = {
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
@@ -46,7 +48,9 @@ AVCodec ff_mp2float_decoder = {
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
@@ -61,7 +65,9 @@ AVCodec ff_mp3float_decoder = {
.init = decode_init,
.decode = decode_frame,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
@@ -76,7 +82,9 @@ AVCodec ff_mp3adufloat_decoder = {
.init = decode_init,
.decode = decode_frame_adu,
#if FF_API_PARSE_FRAME
- .capabilities = CODEC_CAP_PARSE_ONLY,
+ .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
+#else
+ .capabilities = CODEC_CAP_DR1,
#endif
.flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
@@ -91,6 +99,7 @@ AVCodec ff_mp3on4float_decoder = {
.init = decode_init_mp3on4,
.close = decode_close_mp3on4,
.decode = decode_frame_mp3on4,
+ .capabilities = CODEC_CAP_DR1,
.flush = flush_mp3on4,
.long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"),
};
diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c
index 278b6b3891..7723c5827b 100644
--- a/libavcodec/nellymoserdec.c
+++ b/libavcodec/nellymoserdec.c
@@ -47,6 +47,7 @@
typedef struct NellyMoserDecodeContext {
AVCodecContext* avctx;
+ AVFrame frame;
float *float_buf;
DECLARE_ALIGNED(16, float, state)[NELLY_BUF_LEN];
AVLFG random_state;
@@ -142,29 +143,28 @@ static av_cold int decode_init(AVCodecContext * avctx) {
ff_init_ff_sine_windows(7);
avctx->channel_layout = AV_CH_LAYOUT_MONO;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
-static int decode_tag(AVCodecContext * avctx,
- void *data, int *data_size,
- AVPacket *avpkt) {
+static int decode_tag(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
NellyMoserDecodeContext *s = avctx->priv_data;
- int blocks, i, block_size;
- int16_t *samples_s16 = data;
- float *samples_flt = data;
+ int blocks, i, ret;
+ int16_t *samples_s16;
+ float *samples_flt;
- block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt);
blocks = buf_size / NELLY_BLOCK_LEN;
if (blocks <= 0) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA;
}
- if (*data_size < blocks * block_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
- }
if (buf_size % NELLY_BLOCK_LEN) {
av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n",
buf_size % NELLY_BLOCK_LEN);
@@ -177,6 +177,15 @@ static int decode_tag(AVCodecContext * avctx,
* 44100 Hz - 8
*/
+ /* get output buffer */
+ s->frame.nb_samples = NELLY_SAMPLES * blocks;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples_s16 = (int16_t *)s->frame.data[0];
+ samples_flt = (float *)s->frame.data[0];
+
for (i=0 ; i<blocks ; i++) {
if (avctx->sample_fmt == SAMPLE_FMT_FLT) {
nelly_decode_block(s, buf, samples_flt);
@@ -188,7 +197,9 @@ static int decode_tag(AVCodecContext * avctx,
}
buf += NELLY_BLOCK_LEN;
}
- *data_size = blocks * block_size;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
return buf_size;
}
@@ -198,6 +209,7 @@ static av_cold int decode_end(AVCodecContext * avctx) {
av_freep(&s->float_buf);
ff_mdct_end(&s->imdct_ctx);
+
return 0;
}
@@ -209,6 +221,7 @@ AVCodec ff_nellymoser_decoder = {
.init = decode_init,
.close = decode_end,
.decode = decode_tag,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_S16,
diff --git a/libavcodec/pcm.c b/libavcodec/pcm.c
index 0e9e685989..76d5c100bc 100644
--- a/libavcodec/pcm.c
+++ b/libavcodec/pcm.c
@@ -192,6 +192,7 @@ static int pcm_encode_frame(AVCodecContext *avctx,
}
typedef struct PCMDecode {
+ AVFrame frame;
short table[256];
} PCMDecode;
@@ -223,6 +224,9 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec->id);
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -243,22 +247,20 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
dst += size / 8; \
}
-static int pcm_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int pcm_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *src = avpkt->data;
int buf_size = avpkt->size;
PCMDecode *s = avctx->priv_data;
- int sample_size, c, n, out_size;
+ int sample_size, c, n, ret, samples_per_block;
uint8_t *samples;
int32_t *dst_int32_t;
- samples = data;
-
sample_size = av_get_bits_per_sample(avctx->codec_id)/8;
/* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */
+ samples_per_block = 1;
if (CODEC_ID_PCM_DVD == avctx->codec_id) {
if (avctx->bits_per_coded_sample != 20 &&
avctx->bits_per_coded_sample != 24) {
@@ -266,10 +268,13 @@ static int pcm_decode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL);
}
/* 2 samples are interleaved per block in PCM_DVD */
+ samples_per_block = 2;
sample_size = avctx->bits_per_coded_sample * 2 / 8;
- } else if (avctx->codec_id == CODEC_ID_PCM_LXF)
+ } else if (avctx->codec_id == CODEC_ID_PCM_LXF) {
/* we process 40-bit blocks per channel for LXF */
+ samples_per_block = 2;
sample_size = 5;
+ }
if (sample_size == 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid sample_size\n");
@@ -288,14 +293,13 @@ static int pcm_decode_frame(AVCodecContext *avctx,
n = buf_size/sample_size;
- out_size = n * av_get_bytes_per_sample(avctx->sample_fmt);
- if (avctx->codec_id == CODEC_ID_PCM_DVD ||
- avctx->codec_id == CODEC_ID_PCM_LXF)
- out_size *= 2;
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame.nb_samples = n * samples_per_block / avctx->channels;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = s->frame.data[0];
switch(avctx->codec->id) {
case CODEC_ID_PCM_U32LE:
@@ -401,7 +405,7 @@ static int pcm_decode_frame(AVCodecContext *avctx,
case CODEC_ID_PCM_DVD:
{
const uint8_t *src8;
- dst_int32_t = data;
+ dst_int32_t = (int32_t *)s->frame.data[0];
n /= avctx->channels;
switch (avctx->bits_per_coded_sample) {
case 20:
@@ -433,7 +437,7 @@ static int pcm_decode_frame(AVCodecContext *avctx,
{
int i;
const uint8_t *src8;
- dst_int32_t = data;
+ dst_int32_t = (int32_t *)s->frame.data[0];
n /= avctx->channels;
//unpack and de-planerize
for (i = 0; i < n; i++) {
@@ -454,7 +458,10 @@ static int pcm_decode_frame(AVCodecContext *avctx,
default:
return -1;
}
- *data_size = out_size;
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return buf_size;
}
@@ -483,6 +490,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(PCMDecode), \
.init = pcm_decode_init, \
.decode = pcm_decode_frame, \
+ .capabilities = CODEC_CAP_DR1, \
.sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
}
diff --git a/libavcodec/qcelpdec.c b/libavcodec/qcelpdec.c
index 9e7e13118b..20a0484b42 100644
--- a/libavcodec/qcelpdec.c
+++ b/libavcodec/qcelpdec.c
@@ -56,6 +56,7 @@ typedef enum
typedef struct
{
+ AVFrame avframe;
GetBitContext gb;
qcelp_packet_rate bitrate;
QCELPFrame frame; /**< unpacked data frame */
@@ -97,6 +98,9 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx)
for(i=0; i<10; i++)
q->prev_lspf[i] = (i+1)/11.;
+ avcodec_get_frame_defaults(&q->avframe);
+ avctx->coded_frame = &q->avframe;
+
return 0;
}
@@ -682,23 +686,25 @@ static void postfilter(QCELPContext *q, float *samples, float *lpc)
160, 0.9375, &q->postfilter_agc_mem);
}
-static int qcelp_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
- AVPacket *avpkt)
+static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
QCELPContext *q = avctx->priv_data;
- float *outbuffer = data;
- int i, out_size;
+ float *outbuffer;
+ int i, ret;
float quantized_lspf[10], lpc[10];
float gain[16];
float *formant_mem;
- out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ q->avframe.nb_samples = 160;
+ if ((ret = avctx->get_buffer(avctx, &q->avframe)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ outbuffer = (float *)q->avframe.data[0];
if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) {
warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
@@ -783,7 +789,8 @@ erasure:
memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf));
q->prev_bitrate = q->bitrate;
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = q->avframe;
return buf_size;
}
@@ -795,6 +802,7 @@ AVCodec ff_qcelp_decoder =
.id = CODEC_ID_QCELP,
.init = qcelp_decode_init,
.decode = qcelp_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.priv_data_size = sizeof(QCELPContext),
.long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"),
};
diff --git a/libavcodec/qdm2.c b/libavcodec/qdm2.c
index 5068e675cb..9341c69281 100644
--- a/libavcodec/qdm2.c
+++ b/libavcodec/qdm2.c
@@ -130,6 +130,8 @@ typedef struct {
* QDM2 decoder context
*/
typedef struct {
+ AVFrame frame;
+
/// Parameters from codec header, do not change during playback
int nb_channels; ///< number of channels
int channels; ///< number of channels
@@ -1875,6 +1877,9 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
// dump_context(s);
return 0;
}
@@ -1952,30 +1957,27 @@ static int qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out)
}
-static int qdm2_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
QDM2Context *s = avctx->priv_data;
- int16_t *out = data;
- int i, out_size;
+ int16_t *out;
+ int i, ret;
if(!buf)
return 0;
if(buf_size < s->checksum_size)
return -1;
- out_size = 16 * s->channels * s->frame_size *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame.nb_samples = 16 * s->frame_size;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
-
- av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n",
- buf_size, buf, s->checksum_size, data, *data_size);
+ out = (int16_t *)s->frame.data[0];
for (i = 0; i < 16; i++) {
if (qdm2_decode(s, buf, out) < 0)
@@ -1983,7 +1985,8 @@ static int qdm2_decode_frame(AVCodecContext *avctx,
out += s->channels * s->frame_size;
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
return s->checksum_size;
}
@@ -1997,5 +2000,6 @@ AVCodec ff_qdm2_decoder =
.init = qdm2_decode_init,
.close = qdm2_decode_close,
.decode = qdm2_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"),
};
diff --git a/libavcodec/ra144.h b/libavcodec/ra144.h
index dcdfbb8ccc..f6475d45ff 100644
--- a/libavcodec/ra144.h
+++ b/libavcodec/ra144.h
@@ -34,6 +34,7 @@
typedef struct {
AVCodecContext *avctx;
+ AVFrame frame;
LPCContext lpc_ctx;
unsigned int old_energy; ///< previous frame energy
diff --git a/libavcodec/ra144dec.c b/libavcodec/ra144dec.c
index 5fff696d83..dd8838c417 100644
--- a/libavcodec/ra144dec.c
+++ b/libavcodec/ra144dec.c
@@ -38,6 +38,10 @@ static av_cold int ra144_decode_init(AVCodecContext * avctx)
ractx->lpc_coef[1] = ractx->lpc_tables[1];
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+ avcodec_get_frame_defaults(&ractx->frame);
+ avctx->coded_frame = &ractx->frame;
+
return 0;
}
@@ -54,8 +58,8 @@ static void do_output_subblock(RA144Context *ractx, const uint16_t *lpc_coefs,
}
/** Uncompress one block (20 bytes -> 160*2 bytes). */
-static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
- int *data_size, AVPacket *avpkt)
+static int ra144_decode_frame(AVCodecContext * avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -64,23 +68,25 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
uint16_t block_coefs[NBLOCKS][LPC_ORDER]; // LPC coefficients of each sub-block
unsigned int lpc_refl[LPC_ORDER]; // LPC reflection coefficients of the frame
int i, j;
- int out_size;
- int16_t *data = vdata;
+ int ret;
+ int16_t *samples;
unsigned int energy;
RA144Context *ractx = avctx->priv_data;
GetBitContext gb;
- out_size = NBLOCKS * BLOCKSIZE * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ ractx->frame.nb_samples = NBLOCKS * BLOCKSIZE;
+ if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)ractx->frame.data[0];
if(buf_size < FRAMESIZE) {
av_log(avctx, AV_LOG_ERROR,
"Frame too small (%d bytes). Truncated file?\n", buf_size);
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
init_get_bits(&gb, buf, FRAMESIZE * 8);
@@ -106,7 +112,7 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
do_output_subblock(ractx, block_coefs[i], refl_rms[i], &gb);
for (j=0; j < BLOCKSIZE; j++)
- *data++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2);
+ *samples++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2);
}
ractx->old_energy = energy;
@@ -114,7 +120,9 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = ractx->frame;
+
return FRAMESIZE;
}
@@ -125,5 +133,6 @@ AVCodec ff_ra_144_decoder = {
.priv_data_size = sizeof(RA144Context),
.init = ra144_decode_init,
.decode = ra144_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"),
};
diff --git a/libavcodec/ra288.c b/libavcodec/ra288.c
index eac2e2e3cd..062d9fac94 100644
--- a/libavcodec/ra288.c
+++ b/libavcodec/ra288.c
@@ -36,6 +36,7 @@
#define RA288_BLOCKS_PER_FRAME 32
typedef struct {
+ AVFrame frame;
DSPContext dsp;
DECLARE_ALIGNED(16, float, sp_lpc)[FFALIGN(36, 8)]; ///< LPC coefficients for speech data (spec: A)
DECLARE_ALIGNED(16, float, gain_lpc)[FFALIGN(10, 8)]; ///< LPC coefficients for gain (spec: GB)
@@ -62,6 +63,10 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx)
RA288Context *ractx = avctx->priv_data;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
dsputil_init(&ractx->dsp, avctx);
+
+ avcodec_get_frame_defaults(&ractx->frame);
+ avctx->coded_frame = &ractx->frame;
+
return 0;
}
@@ -165,12 +170,12 @@ static void backward_filter(RA288Context *ractx,
}
static int ra288_decode_frame(AVCodecContext * avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- float *out = data;
- int i, out_size;
+ float *out;
+ int i, ret;
RA288Context *ractx = avctx->priv_data;
GetBitContext gb;
@@ -181,12 +186,13 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
return AVERROR_INVALIDDATA;
}
- out_size = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ ractx->frame.nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME;
+ if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ out = (float *)ractx->frame.data[0];
init_get_bits(&gb, buf, avctx->block_align * 8);
@@ -208,7 +214,9 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
}
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = ractx->frame;
+
return avctx->block_align;
}
@@ -219,5 +227,6 @@ AVCodec ff_ra_288_decoder = {
.priv_data_size = sizeof(RA288Context),
.init = ra288_decode_init,
.decode = ra288_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"),
};
diff --git a/libavcodec/s302m.c b/libavcodec/s302m.c
index f6f096d89f..34018aeb46 100644
--- a/libavcodec/s302m.c
+++ b/libavcodec/s302m.c
@@ -25,6 +25,10 @@
#define AES3_HEADER_LEN 4
+typedef struct S302MDecodeContext {
+ AVFrame frame;
+} S302MDecodeContext;
+
static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
int buf_size)
{
@@ -73,10 +77,12 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
}
static int s302m_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
+ S302MDecodeContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
+ int block_size, ret;
int frame_size = s302m_parse_frame_header(avctx, buf, buf_size);
if (frame_size < 0)
@@ -85,11 +91,18 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
buf_size -= AES3_HEADER_LEN;
buf += AES3_HEADER_LEN;
- if (*data_size < 4 * buf_size * 8 / (avctx->bits_per_coded_sample + 4))
- return -1;
+ /* get output buffer */
+ block_size = (avctx->bits_per_coded_sample + 4) / 4;
+ s->frame.nb_samples = 2 * (buf_size / block_size) / avctx->channels;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+
+ buf_size = (s->frame.nb_samples * avctx->channels / 2) * block_size;
if (avctx->bits_per_coded_sample == 24) {
- uint32_t *o = data;
+ uint32_t *o = (uint32_t *)s->frame.data[0];
for (; buf_size > 6; buf_size -= 7) {
*o++ = (av_reverse[buf[2]] << 24) |
(av_reverse[buf[1]] << 16) |
@@ -100,9 +113,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
(av_reverse[buf[3] & 0x0f] << 4);
buf += 7;
}
- *data_size = (uint8_t*) o - (uint8_t*) data;
} else if (avctx->bits_per_coded_sample == 20) {
- uint32_t *o = data;
+ uint32_t *o = (uint32_t *)s->frame.data[0];
for (; buf_size > 5; buf_size -= 6) {
*o++ = (av_reverse[buf[2] & 0xf0] << 28) |
(av_reverse[buf[1]] << 20) |
@@ -112,9 +124,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
(av_reverse[buf[3]] << 12);
buf += 6;
}
- *data_size = (uint8_t*) o - (uint8_t*) data;
} else {
- uint16_t *o = data;
+ uint16_t *o = (uint16_t *)s->frame.data[0];
for (; buf_size > 4; buf_size -= 5) {
*o++ = (av_reverse[buf[1]] << 8) |
av_reverse[buf[0]];
@@ -123,10 +134,22 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
(av_reverse[buf[2]] >> 4);
buf += 5;
}
- *data_size = (uint8_t*) o - (uint8_t*) data;
}
- return buf - avpkt->data;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
+ return avpkt->size;
+}
+
+static int s302m_decode_init(AVCodecContext *avctx)
+{
+ S302MDecodeContext *s = avctx->priv_data;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
+ return 0;
}
@@ -134,6 +157,9 @@ AVCodec ff_s302m_decoder = {
.name = "s302m",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_S302M,
+ .priv_data_size = sizeof(S302MDecodeContext),
+ .init = s302m_decode_init,
.decode = s302m_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
};
diff --git a/libavcodec/shorten.c b/libavcodec/shorten.c
index da36bd58eb..da0ef08eee 100644
--- a/libavcodec/shorten.c
+++ b/libavcodec/shorten.c
@@ -79,6 +79,7 @@ static const uint8_t is_audio_command[10] = { 1, 1, 1, 1, 0, 0, 0, 1, 1, 0 };
typedef struct ShortenContext {
AVCodecContext *avctx;
+ AVFrame frame;
GetBitContext gb;
int min_framesize, max_framesize;
@@ -112,6 +113,9 @@ static av_cold int shorten_decode_init(AVCodecContext * avctx)
s->avctx = avctx;
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -394,15 +398,13 @@ static int read_header(ShortenContext *s)
return 0;
}
-static int shorten_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int shorten_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ShortenContext *s = avctx->priv_data;
int i, input_buf_size = 0;
- int16_t *samples = data;
int ret;
/* allocate internal bitstream buffer */
@@ -436,7 +438,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
/* do not decode until buffer has at least max_framesize bytes or
the end of the file has been reached */
if (buf_size < s->max_framesize && avpkt->data) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return input_buf_size;
}
}
@@ -448,13 +450,13 @@ static int shorten_decode_frame(AVCodecContext *avctx,
if (!s->got_header) {
if ((ret = read_header(s)) < 0)
return ret;
- *data_size = 0;
+ *got_frame_ptr = 0;
goto finish_frame;
}
/* if quit command was read previously, don't decode anything */
if (s->got_quit_command) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return avpkt->size;
}
@@ -464,7 +466,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
int len;
if (get_bits_left(&s->gb) < 3+FNSIZE) {
- *data_size = 0;
+ *got_frame_ptr = 0;
break;
}
@@ -472,7 +474,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
if (cmd > FN_VERBATIM) {
av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd);
- *data_size = 0;
+ *got_frame_ptr = 0;
break;
}
@@ -507,7 +509,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
break;
}
if (cmd == FN_BLOCKSIZE || cmd == FN_QUIT) {
- *data_size = 0;
+ *got_frame_ptr = 0;
break;
}
} else {
@@ -571,19 +573,23 @@ static int shorten_decode_frame(AVCodecContext *avctx,
/* if this is the last channel in the block, output the samples */
s->cur_chan++;
if (s->cur_chan == s->channels) {
- int out_size = s->blocksize * s->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ s->frame.nb_samples = s->blocksize;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
- interleave_buffer(samples, s->channels, s->blocksize, s->decoded);
- *data_size = out_size;
+ /* interleave output */
+ interleave_buffer((int16_t *)s->frame.data[0], s->channels,
+ s->blocksize, s->decoded);
+
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
}
}
}
if (s->cur_chan < s->channels)
- *data_size = 0;
+ *got_frame_ptr = 0;
finish_frame:
s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8);
@@ -614,6 +620,7 @@ static av_cold int shorten_decode_close(AVCodecContext *avctx)
}
av_freep(&s->bitstream);
av_freep(&s->coeffs);
+
return 0;
}
@@ -625,6 +632,6 @@ AVCodec ff_shorten_decoder = {
.init = shorten_decode_init,
.close = shorten_decode_close,
.decode = shorten_decode_frame,
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("Shorten"),
};
diff --git a/libavcodec/sipr.c b/libavcodec/sipr.c
index 10a12c52a5..c832b9b1fd 100644
--- a/libavcodec/sipr.c
+++ b/libavcodec/sipr.c
@@ -507,20 +507,23 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
+ avcodec_get_frame_defaults(&ctx->frame);
+ avctx->coded_frame = &ctx->frame;
+
return 0;
}
-static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
- int *data_size, AVPacket *avpkt)
+static int sipr_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
SiprContext *ctx = avctx->priv_data;
const uint8_t *buf=avpkt->data;
SiprParameters parm;
const SiprModeParam *mode_par = &modes[ctx->mode];
GetBitContext gb;
- float *data = datap;
+ float *samples;
int subframe_size = ctx->mode == MODE_16k ? L_SUBFR_16k : SUBFR_SIZE;
- int i, out_size;
+ int i, ret;
ctx->avctx = avctx;
if (avpkt->size < (mode_par->bits_per_frame >> 3)) {
@@ -530,27 +533,27 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
return -1;
}
- out_size = mode_par->frames_per_packet * subframe_size *
- mode_par->subframe_count *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR,
- "Error processing packet: output buffer (%d) too small\n",
- *data_size);
- return -1;
+ /* get output buffer */
+ ctx->frame.nb_samples = mode_par->frames_per_packet * subframe_size *
+ mode_par->subframe_count;
+ if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (float *)ctx->frame.data[0];
init_get_bits(&gb, buf, mode_par->bits_per_frame);
for (i = 0; i < mode_par->frames_per_packet; i++) {
decode_parameters(&parm, &gb, mode_par);
- ctx->decode_frame(ctx, &parm, data);
+ ctx->decode_frame(ctx, &parm, samples);
- data += subframe_size * mode_par->subframe_count;
+ samples += subframe_size * mode_par->subframe_count;
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = ctx->frame;
return mode_par->bits_per_frame >> 3;
}
@@ -562,5 +565,6 @@ AVCodec ff_sipr_decoder = {
.priv_data_size = sizeof(SiprContext),
.init = sipr_decoder_init,
.decode = sipr_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"),
};
diff --git a/libavcodec/smacker.c b/libavcodec/smacker.c
index 00ba4b8c5d..ba7da02622 100644
--- a/libavcodec/smacker.c
+++ b/libavcodec/smacker.c
@@ -558,31 +558,43 @@ static av_cold int decode_end(AVCodecContext *avctx)
}
+typedef struct SmackerAudioContext {
+ AVFrame frame;
+} SmackerAudioContext;
+
static av_cold int smka_decode_init(AVCodecContext *avctx)
{
+ SmackerAudioContext *s = avctx->priv_data;
+
if (avctx->channels < 1 || avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
return AVERROR(EINVAL);
}
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
/**
* Decode Smacker audio data
*/
-static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
+static int smka_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
+ SmackerAudioContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
GetBitContext gb;
HuffContext h[4];
VLC vlc[4];
- int16_t *samples = data;
- uint8_t *samples8 = data;
+ int16_t *samples;
+ uint8_t *samples8;
int val;
- int i, res;
+ int i, res, ret;
int unp_size;
int bits, stereo;
int pred[2] = {0, 0};
@@ -598,15 +610,11 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if(!get_bits1(&gb)){
av_log(avctx, AV_LOG_INFO, "Sound: no data\n");
- *data_size = 0;
+ *got_frame_ptr = 0;
return 1;
}
stereo = get_bits1(&gb);
bits = get_bits1(&gb);
- if (unp_size & 0xC0000000 || unp_size > *data_size) {
- av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
- return -1;
- }
if (stereo ^ (avctx->channels != 1)) {
av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
return AVERROR(EINVAL);
@@ -616,6 +624,15 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return AVERROR(EINVAL);
}
+ /* get output buffer */
+ s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1));
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples = (int16_t *)s->frame.data[0];
+ samples8 = s->frame.data[0];
+
memset(vlc, 0, sizeof(VLC) * 4);
memset(h, 0, sizeof(HuffContext) * 4);
// Initialize
@@ -705,7 +722,9 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
av_free(h[i].values);
}
- *data_size = unp_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return buf_size;
}
@@ -725,8 +744,10 @@ AVCodec ff_smackaud_decoder = {
.name = "smackaud",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_SMACKAUDIO,
+ .priv_data_size = sizeof(SmackerAudioContext),
.init = smka_decode_init,
.decode = smka_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Smacker audio"),
};
diff --git a/libavcodec/truespeech.c b/libavcodec/truespeech.c
index b7a2aa6fba..524884ddf5 100644
--- a/libavcodec/truespeech.c
+++ b/libavcodec/truespeech.c
@@ -34,6 +34,7 @@
* TrueSpeech decoder context
*/
typedef struct {
+ AVFrame frame;
DSPContext dsp;
/* input data */
uint8_t buffer[32];
@@ -69,6 +70,9 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx)
dsputil_init(&c->dsp, avctx);
+ avcodec_get_frame_defaults(&c->frame);
+ avctx->coded_frame = &c->frame;
+
return 0;
}
@@ -299,17 +303,16 @@ static void truespeech_save_prevvec(TSContext *c)
c->prevfilt[i] = c->cvector[i];
}
-static int truespeech_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TSContext *c = avctx->priv_data;
int i, j;
- short *samples = data;
- int iterations, out_size;
+ int16_t *samples;
+ int iterations, ret;
iterations = buf_size / 32;
@@ -319,13 +322,15 @@ static int truespeech_decode_frame(AVCodecContext *avctx,
return -1;
}
- out_size = iterations * 240 * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ c->frame.nb_samples = iterations * 240;
+ if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)c->frame.data[0];
- memset(samples, 0, out_size);
+ memset(samples, 0, iterations * 240 * sizeof(*samples));
for(j = 0; j < iterations; j++) {
truespeech_read_frame(c, buf);
@@ -345,7 +350,8 @@ static int truespeech_decode_frame(AVCodecContext *avctx,
truespeech_save_prevvec(c);
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = c->frame;
return buf_size;
}
@@ -357,5 +363,6 @@ AVCodec ff_truespeech_decoder = {
.priv_data_size = sizeof(TSContext),
.init = truespeech_decode_init,
.decode = truespeech_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"),
};
diff --git a/libavcodec/tta.c b/libavcodec/tta.c
index 3e4adf0c11..6b76f527c4 100644
--- a/libavcodec/tta.c
+++ b/libavcodec/tta.c
@@ -56,6 +56,7 @@ typedef struct TTAChannel {
typedef struct TTAContext {
AVCodecContext *avctx;
+ AVFrame frame;
GetBitContext gb;
int format, channels, bps, data_length;
@@ -276,17 +277,19 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
return -1;
}
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
-static int tta_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int tta_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TTAContext *s = avctx->priv_data;
- int i, out_size;
+ int i, ret;
int cur_chan = 0, framelen = s->frame_length;
int32_t *p;
@@ -297,10 +300,11 @@ static int tta_decode_frame(AVCodecContext *avctx,
if (!s->total_frames && s->last_frame_length)
framelen = s->last_frame_length;
- out_size = framelen * s->channels * av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "Output buffer size is too small.\n");
- return -1;
+ /* get output buffer */
+ s->frame.nb_samples = framelen;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
// decode directly to output buffer for 24-bit sample format
@@ -396,19 +400,20 @@ static int tta_decode_frame(AVCodecContext *avctx,
// convert to output buffer
if (s->bps == 2) {
- int16_t *samples = data;
+ int16_t *samples = (int16_t *)s->frame.data[0];
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
*samples++ = *p;
} else {
// shift samples for 24-bit sample format
- int32_t *samples = data;
+ int32_t *samples = (int32_t *)s->frame.data[0];
for (i = 0; i < framelen * s->channels; i++)
*samples++ <<= 8;
// reset decode buffer
s->decode_buffer = NULL;
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
return buf_size;
}
@@ -430,5 +435,6 @@ AVCodec ff_tta_decoder = {
.init = tta_decode_init,
.close = tta_decode_close,
.decode = tta_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("True Audio (TTA)"),
};
diff --git a/libavcodec/twinvq.c b/libavcodec/twinvq.c
index a2851562ee..22be07a5b5 100644
--- a/libavcodec/twinvq.c
+++ b/libavcodec/twinvq.c
@@ -174,6 +174,7 @@ static const ModeTab mode_44_48 = {
typedef struct TwinContext {
AVCodecContext *avctx;
+ AVFrame frame;
DSPContext dsp;
FFTContext mdct_ctx[3];
@@ -195,6 +196,7 @@ typedef struct TwinContext {
float *curr_frame; ///< non-interleaved output
float *prev_frame; ///< non-interleaved previous frame
int last_block_pos[2];
+ int discarded_packets;
float *cos_tabs[3];
@@ -676,6 +678,9 @@ static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype,
i);
}
+ if (!out)
+ return;
+
size2 = tctx->last_block_pos[0];
size1 = mtab->size - size2;
if (tctx->avctx->channels == 2) {
@@ -811,16 +816,16 @@ static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb,
}
static int twin_decode_frame(AVCodecContext * avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TwinContext *tctx = avctx->priv_data;
GetBitContext gb;
const ModeTab *mtab = tctx->mtab;
- float *out = data;
+ float *out = NULL;
enum FrameType ftype;
- int window_type, out_size;
+ int window_type, ret;
static const enum FrameType wtype_to_ftype_table[] = {
FT_LONG, FT_LONG, FT_SHORT, FT_LONG,
FT_MEDIUM, FT_LONG, FT_LONG, FT_MEDIUM, FT_MEDIUM
@@ -832,11 +837,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
return AVERROR(EINVAL);
}
- out_size = mtab->size * avctx->channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ if (tctx->discarded_packets >= 2) {
+ tctx->frame.nb_samples = mtab->size;
+ if ((ret = avctx->get_buffer(avctx, &tctx->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ out = (float *)tctx->frame.data[0];
}
init_get_bits(&gb, buf, buf_size * 8);
@@ -856,12 +864,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
FFSWAP(float*, tctx->curr_frame, tctx->prev_frame);
- if (tctx->avctx->frame_number < 2) {
- *data_size=0;
+ if (tctx->discarded_packets < 2) {
+ tctx->discarded_packets++;
+ *got_frame_ptr = 0;
return buf_size;
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = tctx->frame;;
return buf_size;
}
@@ -1153,6 +1163,9 @@ static av_cold int twin_decode_init(AVCodecContext *avctx)
memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist));
+ avcodec_get_frame_defaults(&tctx->frame);
+ avctx->coded_frame = &tctx->frame;
+
return 0;
}
@@ -1164,5 +1177,6 @@ AVCodec ff_twinvq_decoder = {
.init = twin_decode_init,
.close = twin_decode_close,
.decode = twin_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"),
};
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 998a12c149..c84439972c 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -222,9 +222,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 ||
s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F ||
s->codec_id == CODEC_ID_VP6A) {
- linesize_align[0] =
- linesize_align[1] =
- linesize_align[2] = 16;
+ for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+ linesize_align[i] = 16;
}
#endif
}
@@ -241,7 +240,108 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
*width=FFALIGN(*width, align);
}
-int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
+static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ InternalBuffer *buf;
+ int buf_size, ret, i, needs_extended_data;
+
+ buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
+ frame->nb_samples, avctx->sample_fmt,
+ 32);
+ if (buf_size < 0)
+ return AVERROR(EINVAL);
+
+ needs_extended_data = av_sample_fmt_is_planar(avctx->sample_fmt) &&
+ avctx->channels > AV_NUM_DATA_POINTERS;
+
+ /* allocate InternalBuffer if needed */
+ if (!avci->buffer) {
+ avci->buffer = av_mallocz(sizeof(InternalBuffer));
+ if (!avci->buffer)
+ return AVERROR(ENOMEM);
+ }
+ buf = avci->buffer;
+
+ /* if there is a previously-used internal buffer, check its size and
+ channel count to see if we can reuse it */
+ if (buf->extended_data) {
+ /* if current buffer is too small, free it */
+ if (buf->extended_data[0] && buf_size > buf->audio_data_size) {
+ av_free(buf->extended_data[0]);
+ if (buf->extended_data != buf->data)
+ av_free(&buf->extended_data);
+ buf->extended_data = NULL;
+ buf->data[0] = NULL;
+ }
+ /* if number of channels has changed, reset and/or free extended data
+ pointers but leave data buffer in buf->data[0] for reuse */
+ if (buf->nb_channels != avctx->channels) {
+ if (buf->extended_data != buf->data)
+ av_free(buf->extended_data);
+ buf->extended_data = NULL;
+ }
+ }
+
+ /* if there is no previous buffer or the previous buffer cannot be used
+ as-is, allocate a new buffer and/or rearrange the channel pointers */
+ if (!buf->extended_data) {
+ /* if the channel pointers will fit, just set extended_data to data,
+ otherwise allocate the extended_data channel pointers */
+ if (needs_extended_data) {
+ buf->extended_data = av_mallocz(avctx->channels *
+ sizeof(*buf->extended_data));
+ if (!buf->extended_data)
+ return AVERROR(ENOMEM);
+ } else {
+ buf->extended_data = buf->data;
+ }
+
+ /* if there is a previous buffer and it is large enough, reuse it and
+ just fill-in new channel pointers and linesize, otherwise allocate
+ a new buffer */
+ if (buf->extended_data[0]) {
+ ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0],
+ buf->extended_data[0], avctx->channels,
+ frame->nb_samples, avctx->sample_fmt,
+ 32);
+ } else {
+ ret = av_samples_alloc(buf->extended_data, &buf->linesize[0],
+ avctx->channels, frame->nb_samples,
+ avctx->sample_fmt, 32);
+ }
+ if (ret)
+ return ret;
+
+ /* if data was not used for extended_data, we need to copy as many of
+ the extended_data channel pointers as will fit */
+ if (needs_extended_data) {
+ for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+ buf->data[i] = buf->extended_data[i];
+ }
+ buf->audio_data_size = buf_size;
+ buf->nb_channels = avctx->channels;
+ }
+
+ /* copy InternalBuffer info to the AVFrame */
+ frame->type = FF_BUFFER_TYPE_INTERNAL;
+ frame->extended_data = buf->extended_data;
+ frame->linesize[0] = buf->linesize[0];
+ memcpy(frame->data, buf->data, sizeof(frame->data));
+
+ if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts;
+ else frame->pkt_pts = AV_NOPTS_VALUE;
+ frame->reordered_opaque = avctx->reordered_opaque;
+
+ if (avctx->debug & FF_DEBUG_BUFFERS)
+ av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
+ "internal audio buffer used\n", frame);
+
+ return 0;
+}
+
+static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+{
int i;
int w= s->width;
int h= s->height;
@@ -362,6 +462,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
pic->data[i]= buf->data[i];
pic->linesize[i]= buf->linesize[i];
}
+ pic->extended_data = pic->data;
avci->buffer_count++;
if(s->pkt) pic->pkt_pts= s->pkt->pts;
@@ -375,11 +476,25 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
return 0;
}
+int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ return video_get_buffer(avctx, frame);
+ case AVMEDIA_TYPE_AUDIO:
+ return audio_get_buffer(avctx, frame);
+ default:
+ return -1;
+ }
+}
+
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
int i;
InternalBuffer *buf, *last;
AVCodecInternal *avci = s->internal;
+ assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+
assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
assert(avci->buffer_count);
@@ -412,6 +527,8 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
AVFrame temp_pic;
int i;
+ assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+
/* If no picture return a new buffer */
if(pic->data[0] == NULL) {
/* We will copy from buffer, so must be readable */
@@ -761,11 +878,59 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
return ret;
}
+#if FF_API_OLD_DECODE_AUDIO
int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
AVPacket *avpkt)
{
- int ret;
+ AVFrame frame;
+ int ret, got_frame = 0;
+
+ if (avctx->get_buffer != avcodec_default_get_buffer) {
+ av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
+ "avcodec_decode_audio3()\n");
+ return AVERROR(EINVAL);
+ }
+
+ ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
+
+ if (ret >= 0 && got_frame) {
+ int ch, plane_size;
+ int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
+ int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
+ frame.nb_samples,
+ avctx->sample_fmt, 1);
+ if (*frame_size_ptr < data_size) {
+ av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
+ "the current frame (%d < %d)\n", *frame_size_ptr, data_size);
+ return AVERROR(EINVAL);
+ }
+
+ memcpy(samples, frame.extended_data[0], plane_size);
+
+ if (planar && avctx->channels > 1) {
+ uint8_t *out = ((uint8_t *)samples) + plane_size;
+ for (ch = 1; ch < avctx->channels; ch++) {
+ memcpy(out, frame.extended_data[ch], plane_size);
+ out += plane_size;
+ }
+ }
+ *frame_size_ptr = data_size;
+ } else {
+ *frame_size_ptr = 0;
+ }
+ return ret;
+}
+#endif
+
+int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
+ AVFrame *frame,
+ int *got_frame_ptr,
+ AVPacket *avpkt)
+{
+ int ret = 0;
+
+ *got_frame_ptr = 0;
avctx->pkt = avpkt;
@@ -774,23 +939,12 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa
return AVERROR(EINVAL);
}
- if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){
- //FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
- if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
- av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
- return -1;
- }
- if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
- *frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){
- av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
- return -1;
+ if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
+ ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
+ if (ret >= 0 && *got_frame_ptr) {
+ avctx->frame_number++;
+ frame->pkt_dts = avpkt->dts;
}
-
- ret = avctx->codec->decode(avctx, samples, frame_size_ptr, avpkt);
- avctx->frame_number++;
- }else{
- ret= 0;
- *frame_size_ptr=0;
}
return ret;
}
@@ -1115,7 +1269,8 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
avctx->codec->flush(avctx);
}
-void avcodec_default_free_buffers(AVCodecContext *s){
+static void video_free_buffers(AVCodecContext *s)
+{
AVCodecInternal *avci = s->internal;
int i, j;
@@ -1137,6 +1292,37 @@ void avcodec_default_free_buffers(AVCodecContext *s){
avci->buffer_count=0;
}
+static void audio_free_buffers(AVCodecContext *avctx)
+{
+ AVCodecInternal *avci = avctx->internal;
+ InternalBuffer *buf;
+
+ if (!avci->buffer)
+ return;
+ buf = avci->buffer;
+
+ if (buf->extended_data) {
+ av_free(buf->extended_data[0]);
+ if (buf->extended_data != buf->data)
+ av_free(buf->extended_data);
+ }
+ av_freep(&avci->buffer);
+}
+
+void avcodec_default_free_buffers(AVCodecContext *avctx)
+{
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ video_free_buffers(avctx);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ audio_free_buffers(avctx);
+ break;
+ default:
+ break;
+ }
+}
+
#if FF_API_OLD_FF_PICT_TYPES
char av_get_pict_type_char(int pict_type){
return av_get_picture_type_char(pict_type);
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 7262c81544..6faf793ea1 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53
-#define LIBAVCODEC_VERSION_MINOR 24
+#define LIBAVCODEC_VERSION_MINOR 25
#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
@@ -113,5 +113,8 @@
#ifndef FF_API_DATA_POINTERS
#define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
+#ifndef FF_API_OLD_DECODE_AUDIO
+#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
#endif /* AVCODEC_VERSION_H */
diff --git a/libavcodec/vmdav.c b/libavcodec/vmdav.c
index 772f98c70f..89b5c2bc6a 100644
--- a/libavcodec/vmdav.c
+++ b/libavcodec/vmdav.c
@@ -473,6 +473,7 @@ static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
#define BLOCK_TYPE_SILENCE 3
typedef struct VmdAudioContext {
+ AVFrame frame;
int out_bps;
int chunk_size;
} VmdAudioContext;
@@ -514,6 +515,9 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
"block align = %d, sample rate = %d\n",
avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
@@ -551,22 +555,21 @@ static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
}
}
-static int vmdaudio_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end;
int buf_size = avpkt->size;
VmdAudioContext *s = avctx->priv_data;
int block_type, silent_chunks, audio_chunks;
- int nb_samples, out_size;
- uint8_t *output_samples_u8 = data;
- int16_t *output_samples_s16 = data;
+ int ret;
+ uint8_t *output_samples_u8;
+ int16_t *output_samples_s16;
if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
@@ -597,10 +600,15 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
/* ensure output buffer is large enough */
audio_chunks = buf_size / s->chunk_size;
- nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
- out_size = nb_samples * avctx->channels * s->out_bps;
- if (*data_size < out_size)
- return -1;
+
+ /* get output buffer */
+ s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ output_samples_u8 = s->frame.data[0];
+ output_samples_s16 = (int16_t *)s->frame.data[0];
/* decode silent chunks */
if (silent_chunks > 0) {
@@ -630,7 +638,9 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
}
}
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return avpkt->size;
}
@@ -658,5 +668,6 @@ AVCodec ff_vmdaudio_decoder = {
.priv_data_size = sizeof(VmdAudioContext),
.init = vmdaudio_decode_init,
.decode = vmdaudio_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
};
diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c
index b202249e9b..381b61d060 100644
--- a/libavcodec/vorbisdec.c
+++ b/libavcodec/vorbisdec.c
@@ -121,6 +121,7 @@ typedef struct {
typedef struct vorbis_context_s {
AVCodecContext *avccontext;
+ AVFrame frame;
GetBitContext gb;
DSPContext dsp;
FmtConvertContext fmt_conv;
@@ -1033,6 +1034,9 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
avccontext->sample_rate = vc->audio_samplerate;
avccontext->frame_size = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2;
+ avcodec_get_frame_defaults(&vc->frame);
+ avccontext->coded_frame = &vc->frame;
+
return 0;
}
@@ -1605,16 +1609,15 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
// Return the decoded audio packet through the standard api
-static int vorbis_decode_frame(AVCodecContext *avccontext,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
vorbis_context *vc = avccontext->priv_data;
GetBitContext *gb = &(vc->gb);
const float *channel_ptrs[255];
- int i, len, out_size;
+ int i, len, ret;
av_dlog(NULL, "packet length %d \n", buf_size);
@@ -1625,18 +1628,18 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
if (!vc->first_frame) {
vc->first_frame = 1;
- *data_size = 0;
+ *got_frame_ptr = 0;
return buf_size;
}
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
- out_size = len * vc->audio_channels *
- av_get_bytes_per_sample(avccontext->sample_fmt);
- if (*data_size < out_size) {
- av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
+ /* get output buffer */
+ vc->frame.nb_samples = len;
+ if ((ret = avccontext->get_buffer(avccontext, &vc->frame)) < 0) {
+ av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
if (vc->audio_channels > 8) {
@@ -1649,12 +1652,15 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
}
if (avccontext->sample_fmt == AV_SAMPLE_FMT_FLT)
- vc->fmt_conv.float_interleave(data, channel_ptrs, len, vc->audio_channels);
+ vc->fmt_conv.float_interleave((float *)vc->frame.data[0], channel_ptrs,
+ len, vc->audio_channels);
else
- vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len,
+ vc->fmt_conv.float_to_int16_interleave((int16_t *)vc->frame.data[0],
+ channel_ptrs, len,
vc->audio_channels);
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = vc->frame;
return buf_size;
}
@@ -1678,6 +1684,7 @@ AVCodec ff_vorbis_decoder = {
.init = vorbis_decode_init,
.close = vorbis_decode_close,
.decode = vorbis_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
.channel_layouts = ff_vorbis_channel_layouts,
.sample_fmts = (const enum AVSampleFormat[]) {
diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c
index ec46fb166a..e4b7ebe43b 100644
--- a/libavcodec/wavpack.c
+++ b/libavcodec/wavpack.c
@@ -115,8 +115,6 @@ typedef struct WavpackFrameContext {
int float_shift;
int float_max_exp;
WvChannel ch[2];
- int samples_left;
- int max_samples;
int pos;
SavedContext sc, extra_sc;
} WavpackFrameContext;
@@ -125,6 +123,7 @@ typedef struct WavpackFrameContext {
typedef struct WavpackContext {
AVCodecContext *avctx;
+ AVFrame frame;
WavpackFrameContext *fdec[WV_MAX_FRAME_DECODERS];
int fdec_num;
@@ -133,7 +132,6 @@ typedef struct WavpackContext {
int mkv_mode;
int block;
int samples;
- int samples_left;
int ch_offset;
} WavpackContext;
@@ -485,7 +483,6 @@ static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
static void wv_reset_saved_context(WavpackFrameContext *s)
{
s->pos = 0;
- s->samples_left = 0;
s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
}
@@ -502,8 +499,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
float *dstfl = dst;
const int channel_pad = s->avctx->channels - 2;
- if(s->samples_left == s->samples)
- s->one = s->zero = s->zeroes = 0;
+ s->one = s->zero = s->zeroes = 0;
do{
L = wv_get_value(s, gb, 0, &last);
if(last) break;
@@ -594,13 +590,8 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
dst16 += channel_pad;
}
count++;
- }while(!last && count < s->max_samples);
+ } while (!last && count < s->samples);
- if (last)
- s->samples_left = 0;
- else
- s->samples_left -= count;
- if(!s->samples_left){
wv_reset_saved_context(s);
if(crc != s->CRC){
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
@@ -610,15 +601,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
return -1;
}
- }else{
- s->pos = pos;
- s->sc.crc = crc;
- s->sc.bits_used = get_bits_count(&s->gb);
- if(s->got_extra_bits){
- s->extra_sc.crc = crc_extra_bits;
- s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits);
- }
- }
+
return count * 2;
}
@@ -635,8 +618,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
float *dstfl = dst;
const int channel_stride = s->avctx->channels;
- if(s->samples_left == s->samples)
- s->one = s->zero = s->zeroes = 0;
+ s->one = s->zero = s->zeroes = 0;
do{
T = wv_get_value(s, gb, 0, &last);
S = 0;
@@ -675,13 +657,8 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
dst16 += channel_stride;
}
count++;
- }while(!last && count < s->max_samples);
+ } while (!last && count < s->samples);
- if (last)
- s->samples_left = 0;
- else
- s->samples_left -= count;
- if(!s->samples_left){
wv_reset_saved_context(s);
if(crc != s->CRC){
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
@@ -691,15 +668,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
return -1;
}
- }else{
- s->pos = pos;
- s->sc.crc = crc;
- s->sc.bits_used = get_bits_count(&s->gb);
- if(s->got_extra_bits){
- s->extra_sc.crc = crc_extra_bits;
- s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits);
- }
- }
+
return count;
}
@@ -743,6 +712,9 @@ static av_cold int wavpack_decode_init(AVCodecContext *avctx)
s->fdec_num = 0;
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -759,7 +731,7 @@ static av_cold int wavpack_decode_end(AVCodecContext *avctx)
}
static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
- void *data, int *data_size,
+ void *data, int *got_frame_ptr,
const uint8_t *buf, int buf_size)
{
WavpackContext *wc = avctx->priv_data;
@@ -774,7 +746,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
int bpp, chan, chmask;
if (buf_size == 0){
- *data_size = 0;
+ *got_frame_ptr = 0;
return 0;
}
@@ -789,18 +761,16 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
return -1;
}
- if(!s->samples_left){
memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
memset(s->ch, 0, sizeof(s->ch));
s->extra_bits = 0;
s->and = s->or = s->shift = 0;
s->got_extra_bits = 0;
- }
if(!wc->mkv_mode){
s->samples = AV_RL32(buf); buf += 4;
if(!s->samples){
- *data_size = 0;
+ *got_frame_ptr = 0;
return 0;
}
}else{
@@ -829,13 +799,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
wc->ch_offset += 1 + s->stereo;
- s->max_samples = *data_size / (bpp * avctx->channels);
- s->max_samples = FFMIN(s->max_samples, s->samples);
- if(s->samples_left > 0){
- s->max_samples = FFMIN(s->max_samples, s->samples_left);
- buf = buf_end;
- }
-
// parse metadata blocks
while(buf < buf_end){
id = *buf++;
@@ -1064,7 +1027,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
}
if(id & WP_IDF_ODD) buf++;
}
- if(!s->samples_left){
+
if(!got_terms){
av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
return -1;
@@ -1101,16 +1064,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->got_extra_bits = 0;
}
}
- s->samples_left = s->samples;
- }else{
- init_get_bits(&s->gb, orig_buf + s->sc.offset, s->sc.size);
- skip_bits_long(&s->gb, s->sc.bits_used);
- if(s->got_extra_bits){
- init_get_bits(&s->gb_extra_bits, orig_buf + s->extra_sc.offset,
- s->extra_sc.size);
- skip_bits_long(&s->gb_extra_bits, s->extra_sc.bits_used);
- }
- }
if(s->stereo_in){
if(avctx->sample_fmt == AV_SAMPLE_FMT_S16)
@@ -1167,7 +1120,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
}
}
- wc->samples_left = s->samples_left;
+ *got_frame_ptr = 1;
return samplecount * bpp;
}
@@ -1181,23 +1134,40 @@ static void wavpack_decode_flush(AVCodecContext *avctx)
wv_reset_saved_context(s->fdec[i]);
}
-static int wavpack_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
WavpackContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- int frame_size;
+ int frame_size, ret;
int samplecount = 0;
s->block = 0;
- s->samples_left = 0;
s->ch_offset = 0;
+ /* determine number of samples */
if(s->mkv_mode){
s->samples = AV_RL32(buf); buf += 4;
+ } else {
+ if (s->multichannel)
+ s->samples = AV_RL32(buf + 4);
+ else
+ s->samples = AV_RL32(buf);
+ }
+ if (s->samples <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n",
+ s->samples);
+ return AVERROR(EINVAL);
+ }
+
+ /* get output buffer */
+ s->frame.nb_samples = s->samples;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+
while(buf_size > 0){
if(!s->multichannel){
frame_size = buf_size;
@@ -1216,17 +1186,19 @@ static int wavpack_decode_frame(AVCodecContext *avctx,
wavpack_decode_flush(avctx);
return -1;
}
- if((samplecount = wavpack_decode_block(avctx, s->block, data,
- data_size, buf, frame_size)) < 0) {
+ if((samplecount = wavpack_decode_block(avctx, s->block, s->frame.data[0],
+ got_frame_ptr, buf, frame_size)) < 0) {
wavpack_decode_flush(avctx);
return -1;
}
s->block++;
buf += frame_size; buf_size -= frame_size;
}
- *data_size = samplecount * avctx->channels;
- return s->samples_left > 0 ? 0 : avpkt->size;
+ if (*got_frame_ptr)
+ *(AVFrame *)data = s->frame;
+
+ return avpkt->size;
}
AVCodec ff_wavpack_decoder = {
@@ -1238,6 +1210,6 @@ AVCodec ff_wavpack_decoder = {
.close = wavpack_decode_end,
.decode = wavpack_decode_frame,
.flush = wavpack_decode_flush,
- .capabilities = CODEC_CAP_SUBFRAMES,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("WavPack"),
};
diff --git a/libavcodec/wma.h b/libavcodec/wma.h
index f11d5507dc..4acbf04bbf 100644
--- a/libavcodec/wma.h
+++ b/libavcodec/wma.h
@@ -65,6 +65,7 @@ typedef struct CoefVLCTable {
typedef struct WMACodecContext {
AVCodecContext* avctx;
+ AVFrame frame;
GetBitContext gb;
PutBitContext pb;
int sample_rate;
diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c
index 1e3b7e32a5..5600f9ba90 100644
--- a/libavcodec/wmadec.c
+++ b/libavcodec/wmadec.c
@@ -124,6 +124,10 @@ static int wma_decode_init(AVCodecContext * avctx)
}
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -797,14 +801,13 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples)
return 0;
}
-static int wma_decode_superframe(AVCodecContext *avctx,
- void *data, int *data_size,
- AVPacket *avpkt)
+static int wma_decode_superframe(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
WMACodecContext *s = avctx->priv_data;
- int nb_frames, bit_offset, i, pos, len, out_size;
+ int nb_frames, bit_offset, i, pos, len, ret;
uint8_t *q;
int16_t *samples;
@@ -818,8 +821,6 @@ static int wma_decode_superframe(AVCodecContext *avctx,
return 0;
buf_size = s->block_align;
- samples = data;
-
init_get_bits(&s->gb, buf, buf_size*8);
if (s->use_bit_reservoir) {
@@ -830,12 +831,13 @@ static int wma_decode_superframe(AVCodecContext *avctx,
nb_frames = 1;
}
- out_size = nb_frames * s->frame_len * s->nb_channels *
- av_get_bytes_per_sample(avctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
- goto fail;
+ /* get output buffer */
+ s->frame.nb_samples = nb_frames * s->frame_len;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ samples = (int16_t *)s->frame.data[0];
if (s->use_bit_reservoir) {
bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3);
@@ -903,7 +905,9 @@ static int wma_decode_superframe(AVCodecContext *avctx,
//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align);
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
+
return s->block_align;
fail:
/* when error, we reset the bit reservoir */
@@ -928,6 +932,7 @@ AVCodec ff_wmav1_decoder = {
.close = ff_wma_end,
.decode = wma_decode_superframe,
.flush = flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"),
};
@@ -940,5 +945,6 @@ AVCodec ff_wmav2_decoder = {
.close = ff_wma_end,
.decode = wma_decode_superframe,
.flush = flush,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"),
};
diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c
index aaae6e1f3a..c46a983602 100644
--- a/libavcodec/wmaprodec.c
+++ b/libavcodec/wmaprodec.c
@@ -167,6 +167,7 @@ typedef struct {
typedef struct WMAProDecodeCtx {
/* generic decoder variables */
AVCodecContext* avctx; ///< codec context for av_log
+ AVFrame frame; ///< AVFrame for decoded output
DSPContext dsp; ///< accelerated DSP functions
FmtConvertContext fmt_conv;
uint8_t frame_data[MAX_FRAMESIZE +
@@ -209,8 +210,6 @@ typedef struct WMAProDecodeCtx {
uint32_t frame_num; ///< current frame number (not used for decoding)
GetBitContext gb; ///< bitstream reader context
int buf_bit_size; ///< buffer size in bits
- float* samples; ///< current samplebuffer pointer
- float* samples_end; ///< maximum samplebuffer pointer
uint8_t drc_gain; ///< gain for the DRC tool
int8_t skip_frame; ///< skip output step
int8_t parsed_all_subframes; ///< all subframes decoded?
@@ -453,6 +452,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
dump_context(s);
avctx->channel_layout = channel_mask;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
@@ -1279,22 +1282,15 @@ static int decode_subframe(WMAProDecodeCtx *s)
*@return 0 if the trailer bit indicates that this is the last frame,
* 1 if there are additional frames
*/
-static int decode_frame(WMAProDecodeCtx *s)
+static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
{
+ AVCodecContext *avctx = s->avctx;
GetBitContext* gb = &s->gb;
int more_frames = 0;
int len = 0;
- int i;
+ int i, ret;
const float *out_ptr[WMAPRO_MAX_CHANNELS];
-
- /** check for potential output buffer overflow */
- if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) {
- /** return an error if no frame could be decoded at all */
- av_log(s->avctx, AV_LOG_ERROR,
- "not enough space for the output samples\n");
- s->packet_loss = 1;
- return 0;
- }
+ float *samples;
/** get frame length */
if (s->len_prefix)
@@ -1360,10 +1356,19 @@ static int decode_frame(WMAProDecodeCtx *s)
}
}
+ /* get output buffer */
+ s->frame.nb_samples = s->samples_per_frame;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ s->packet_loss = 1;
+ return 0;
+ }
+ samples = (float *)s->frame.data[0];
+
/** interleave samples and write them to the output buffer */
for (i = 0; i < s->num_channels; i++)
out_ptr[i] = s->channel[i].out;
- s->fmt_conv.float_interleave(s->samples, out_ptr, s->samples_per_frame,
+ s->fmt_conv.float_interleave(samples, out_ptr, s->samples_per_frame,
s->num_channels);
for (i = 0; i < s->num_channels; i++) {
@@ -1375,8 +1380,10 @@ static int decode_frame(WMAProDecodeCtx *s)
if (s->skip_frame) {
s->skip_frame = 0;
- } else
- s->samples += s->num_channels * s->samples_per_frame;
+ *got_frame_ptr = 0;
+ } else {
+ *got_frame_ptr = 1;
+ }
if (s->len_prefix) {
if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
@@ -1473,8 +1480,8 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
*@param avpkt input packet
*@return number of bytes that were read from the input buffer
*/
-static int decode_packet(AVCodecContext *avctx,
- void *data, int *data_size, AVPacket* avpkt)
+static int decode_packet(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket* avpkt)
{
WMAProDecodeCtx *s = avctx->priv_data;
GetBitContext* gb = &s->pgb;
@@ -1483,9 +1490,7 @@ static int decode_packet(AVCodecContext *avctx,
int num_bits_prev_frame;
int packet_sequence_number;
- s->samples = data;
- s->samples_end = (float*)((int8_t*)data + *data_size);
- *data_size = 0;
+ *got_frame_ptr = 0;
if (s->packet_done || s->packet_loss) {
s->packet_done = 0;
@@ -1532,7 +1537,7 @@ static int decode_packet(AVCodecContext *avctx,
/** decode the cross packet frame if it is valid */
if (!s->packet_loss)
- decode_frame(s);
+ decode_frame(s, got_frame_ptr);
} else if (s->num_saved_bits - s->frame_offset) {
av_dlog(avctx, "ignoring %x previously saved bits\n",
s->num_saved_bits - s->frame_offset);
@@ -1555,7 +1560,7 @@ static int decode_packet(AVCodecContext *avctx,
(frame_size = show_bits(gb, s->log2_frame_size)) &&
frame_size <= remaining_bits(s, gb)) {
save_bits(s, gb, frame_size, 0);
- s->packet_done = !decode_frame(s);
+ s->packet_done = !decode_frame(s, got_frame_ptr);
} else if (!s->len_prefix
&& s->num_saved_bits > get_bits_count(&s->gb)) {
/** when the frames do not have a length prefix, we don't know
@@ -1565,7 +1570,7 @@ static int decode_packet(AVCodecContext *avctx,
therefore we save the incoming packet first, then we append
the "previous frame" data from the next packet so that
we get a buffer that only contains full frames */
- s->packet_done = !decode_frame(s);
+ s->packet_done = !decode_frame(s, got_frame_ptr);
} else
s->packet_done = 1;
}
@@ -1577,10 +1582,14 @@ static int decode_packet(AVCodecContext *avctx,
save_bits(s, gb, remaining_bits(s, gb), 0);
}
- *data_size = (int8_t *)s->samples - (int8_t *)data;
s->packet_offset = get_bits_count(gb) & 7;
+ if (s->packet_loss)
+ return AVERROR_INVALIDDATA;
+
+ if (*got_frame_ptr)
+ *(AVFrame *)data = s->frame;
- return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3;
+ return get_bits_count(gb) >> 3;
}
/**
@@ -1611,7 +1620,7 @@ AVCodec ff_wmapro_decoder = {
.init = decode_init,
.close = decode_end,
.decode = decode_packet,
- .capabilities = CODEC_CAP_SUBFRAMES,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.flush= flush,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
};
diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c
index d6d4cb2963..6f3a6b2372 100644
--- a/libavcodec/wmavoice.c
+++ b/libavcodec/wmavoice.c
@@ -131,6 +131,7 @@ typedef struct {
* @name Global values specified in the stream header / extradata or used all over.
* @{
*/
+ AVFrame frame;
GetBitContext gb; ///< packet bitreader. During decoder init,
///< it contains the extradata from the
///< demuxer. During decoding, it contains
@@ -438,6 +439,9 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
ctx->sample_fmt = AV_SAMPLE_FMT_FLT;
+ avcodec_get_frame_defaults(&s->frame);
+ ctx->coded_frame = &s->frame;
+
return 0;
}
@@ -1725,17 +1729,17 @@ static int check_bits_for_superframe(GetBitContext *orig_gb,
* @return 0 on success, <0 on error or 1 if there was not enough data to
* fully parse the superframe
*/
-static int synth_superframe(AVCodecContext *ctx,
- float *samples, int *data_size)
+static int synth_superframe(AVCodecContext *ctx, int *got_frame_ptr)
{
WMAVoiceContext *s = ctx->priv_data;
GetBitContext *gb = &s->gb, s_gb;
- int n, res, out_size, n_samples = 480;
+ int n, res, n_samples = 480;
double lsps[MAX_FRAMES][MAX_LSPS];
const double *mean_lsf = s->lsps == 16 ?
wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12];
float synth[MAX_LSPS + MAX_SFRAMESIZE];
+ float *samples;
memcpy(synth, s->synth_history,
s->lsps * sizeof(*synth));
@@ -1749,7 +1753,7 @@ static int synth_superframe(AVCodecContext *ctx,
}
if ((res = check_bits_for_superframe(gb, s)) == 1) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return 1;
}
@@ -1792,13 +1796,14 @@ static int synth_superframe(AVCodecContext *ctx,
stabilize_lsps(lsps[n], s->lsps);
}
- out_size = n_samples * av_get_bytes_per_sample(ctx->sample_fmt);
- if (*data_size < out_size) {
- av_log(ctx, AV_LOG_ERROR,
- "Output buffer too small (%d given - %d needed)\n",
- *data_size, out_size);
- return -1;
+ /* get output buffer */
+ s->frame.nb_samples = 480;
+ if ((res = ctx->get_buffer(ctx, &s->frame)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return res;
}
+ s->frame.nb_samples = n_samples;
+ samples = (float *)s->frame.data[0];
/* Parse frames, optionally preceeded by per-frame (independent) LSPs. */
for (n = 0; n < 3; n++) {
@@ -1820,7 +1825,7 @@ static int synth_superframe(AVCodecContext *ctx,
lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
&excitation[s->history_nsamples + n * MAX_FRAMESIZE],
&synth[s->lsps + n * MAX_FRAMESIZE]))) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return res;
}
}
@@ -1833,8 +1838,7 @@ static int synth_superframe(AVCodecContext *ctx,
skip_bits(gb, 10 * (res + 1));
}
- /* Specify nr. of output samples */
- *data_size = out_size;
+ *got_frame_ptr = 1;
/* Update history */
memcpy(s->prev_lsps, lsps[2],
@@ -1922,7 +1926,7 @@ static void copy_bits(PutBitContext *pb,
* For more information about frames, see #synth_superframe().
*/
static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
WMAVoiceContext *s = ctx->priv_data;
GetBitContext *gb = &s->gb;
@@ -1935,7 +1939,7 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
* capping the packet size at ctx->block_align. */
for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
if (!size) {
- *data_size = 0;
+ *got_frame_ptr = 0;
return 0;
}
init_get_bits(&s->gb, avpkt->data, size << 3);
@@ -1956,10 +1960,11 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits);
flush_put_bits(&s->pb);
s->sframe_cache_size += s->spillover_nbits;
- if ((res = synth_superframe(ctx, data, data_size)) == 0 &&
- *data_size > 0) {
+ if ((res = synth_superframe(ctx, got_frame_ptr)) == 0 &&
+ *got_frame_ptr) {
cnt += s->spillover_nbits;
s->skip_bits_next = cnt & 7;
+ *(AVFrame *)data = s->frame;
return cnt >> 3;
} else
skip_bits_long (gb, s->spillover_nbits - cnt +
@@ -1974,11 +1979,12 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
s->sframe_cache_size = 0;
s->skip_bits_next = 0;
pos = get_bits_left(gb);
- if ((res = synth_superframe(ctx, data, data_size)) < 0) {
+ if ((res = synth_superframe(ctx, got_frame_ptr)) < 0) {
return res;
- } else if (*data_size > 0) {
+ } else if (*got_frame_ptr) {
int cnt = get_bits_count(gb);
s->skip_bits_next = cnt & 7;
+ *(AVFrame *)data = s->frame;
return cnt >> 3;
} else if ((s->sframe_cache_size = pos) > 0) {
/* rewind bit reader to start of last (incomplete) superframe... */
@@ -2046,7 +2052,7 @@ AVCodec ff_wmavoice_decoder = {
.init = wmavoice_decode_init,
.close = wmavoice_decode_end,
.decode = wmavoice_decode_packet,
- .capabilities = CODEC_CAP_SUBFRAMES,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.flush = wmavoice_flush,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
};
diff --git a/libavcodec/ws-snd1.c b/libavcodec/ws-snd1.c
index dfbe4acab5..b2d086e073 100644
--- a/libavcodec/ws-snd1.c
+++ b/libavcodec/ws-snd1.c
@@ -37,26 +37,37 @@ static const int8_t ws_adpcm_4bit[] = {
0, 1, 2, 3, 4, 5, 6, 8
};
+typedef struct WSSndContext {
+ AVFrame frame;
+} WSSndContext;
+
static av_cold int ws_snd_decode_init(AVCodecContext *avctx)
{
+ WSSndContext *s = avctx->priv_data;
+
if (avctx->channels != 1) {
av_log_ask_for_sample(avctx, "unsupported number of channels\n");
return AVERROR(EINVAL);
}
avctx->sample_fmt = AV_SAMPLE_FMT_U8;
+
+ avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
+
return 0;
}
static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
- int *data_size, AVPacket *avpkt)
+ int *got_frame_ptr, AVPacket *avpkt)
{
+ WSSndContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- int in_size, out_size;
+ int in_size, out_size, ret;
int sample = 128;
- uint8_t *samples = data;
+ uint8_t *samples;
uint8_t *samples_end;
if (!buf_size)
@@ -71,19 +82,24 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
in_size = AV_RL16(&buf[2]);
buf += 4;
- if (out_size > *data_size) {
- av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
- return -1;
- }
if (in_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n");
return -1;
}
+
+ /* get output buffer */
+ s->frame.nb_samples = out_size;
+ if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ samples = s->frame.data[0];
samples_end = samples + out_size;
if (in_size == out_size) {
memcpy(samples, buf, out_size);
- *data_size = out_size;
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
return buf_size;
}
@@ -159,7 +175,9 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
}
}
- *data_size = samples - (uint8_t *)data;
+ s->frame.nb_samples = samples - s->frame.data[0];
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = s->frame;
return buf_size;
}
@@ -168,7 +186,9 @@ AVCodec ff_ws_snd1_decoder = {
.name = "ws_snd1",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_WESTWOOD_SND1,
+ .priv_data_size = sizeof(WSSndContext),
.init = ws_snd_decode_init,
.decode = ws_snd_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"),
};