aboutsummaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorMike Melanson2003-10-01 04:39:38 +0000
committerMike Melanson2003-10-01 04:39:38 +0000
commit2fdf638b0c66c22357d56f7cb205dd241b4a8c58 (patch)
treecdf625b0779d58f7a4c439ac61d6ba193911a81d /libavcodec
parentf2f6134b9e5abb0890867d47ba8c0e293d0ba2fe (diff)
New demuxers: Sega FILM/CPK, Westwood VQA & AUD; new decoders: MS RLE &
Video-1, Apple RPZA, Cinepak, Westwood IMA ADPCM Originally committed as revision 2324 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/Makefile3
-rw-r--r--libavcodec/adpcm.c22
-rw-r--r--libavcodec/allcodecs.c5
-rw-r--r--libavcodec/avcodec.h11
-rw-r--r--libavcodec/cinepak.c456
-rw-r--r--libavcodec/msrle.c219
-rw-r--r--libavcodec/msvideo1.c378
-rw-r--r--libavcodec/rpza.c310
8 files changed, 1402 insertions, 2 deletions
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index b34f0c3b31..bbad77ba69 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -17,7 +17,8 @@ OBJS= common.o utils.o mem.o allcodecs.o \
ratecontrol.o adpcm.o eval.o dv.o error_resilience.o \
fft.o mdct.o mace.o huffyuv.o cyuv.o opts.o raw.o h264.o golomb.o \
vp3.o asv1.o 4xm.o cabac.o ffv1.o ra144.o ra288.o vcr1.o cljr.o \
- roqvideo.o dpcm.o interplayvideo.o xan.o
+ roqvideo.o dpcm.o interplayvideo.o xan.o rpza.o cinepak.o msrle.o \
+ msvideo1.o
ifeq ($(AMR_NB),yes)
ifeq ($(AMR_NB_FIXED),yes)
diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c
index a9b3195d27..84d1e39f77 100644
--- a/libavcodec/adpcm.c
+++ b/libavcodec/adpcm.c
@@ -22,7 +22,7 @@
* @file adpcm.c
* ADPCM codecs.
* First version by Francois Revol revol@free.fr
- * Fringe ADPCM codecs (e.g., DK3 and DK4)
+ * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
* by Mike Melanson (melanson@pcisys.net)
*
* Features and limitations:
@@ -658,6 +658,25 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
*samples++ = c->status[0].predictor - c->status[1].predictor;
}
break;
+ case CODEC_ID_ADPCM_IMA_WS:
+ /* no per-block initialization; just start decoding the data */
+ while (src < buf + buf_size) {
+
+ if (st) {
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ (src[0] >> 4) & 0x0F);
+ *samples++ = adpcm_ima_expand_nibble(&c->status[1],
+ src[0] & 0x0F);
+ } else {
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ (src[0] >> 4) & 0x0F);
+ *samples++ = adpcm_ima_expand_nibble(&c->status[0],
+ src[0] & 0x0F);
+ }
+
+ src++;
+ }
+ break;
default:
*data_size = 0;
return -1;
@@ -692,6 +711,7 @@ ADPCM_CODEC(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt);
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav);
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3);
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4);
+ADPCM_CODEC(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws);
ADPCM_CODEC(CODEC_ID_ADPCM_MS, adpcm_ms);
ADPCM_CODEC(CODEC_ID_ADPCM_4XM, adpcm_4xm);
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 8879bf1635..40af69ee82 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -123,6 +123,10 @@ void avcodec_register_all(void)
register_avcodec(&roq_decoder);
register_avcodec(&interplay_video_decoder);
register_avcodec(&xan_wc3_decoder);
+ register_avcodec(&rpza_decoder);
+ register_avcodec(&cinepak_decoder);
+ register_avcodec(&msrle_decoder);
+ register_avcodec(&msvideo1_decoder);
#ifdef CONFIG_AC3
register_avcodec(&ac3_decoder);
#endif
@@ -163,6 +167,7 @@ PCM_CODEC(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt);
PCM_CODEC(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav);
PCM_CODEC(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3);
PCM_CODEC(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4);
+PCM_CODEC(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws);
PCM_CODEC(CODEC_ID_ADPCM_MS, adpcm_ms);
PCM_CODEC(CODEC_ID_ADPCM_4XM, adpcm_4xm);
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index ad19cdcd55..61b0aecd2c 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -74,6 +74,11 @@ enum CodecID {
CODEC_ID_INTERPLAY_VIDEO,
CODEC_ID_XAN_WC3,
CODEC_ID_XAN_WC4,
+ CODEC_ID_RPZA,
+ CODEC_ID_CINEPAK,
+ CODEC_ID_WS_VQA,
+ CODEC_ID_MSRLE,
+ CODEC_ID_MSVIDEO1,
/* various pcm "codecs" */
CODEC_ID_PCM_S16LE,
@@ -90,6 +95,7 @@ enum CodecID {
CODEC_ID_ADPCM_IMA_WAV,
CODEC_ID_ADPCM_IMA_DK3,
CODEC_ID_ADPCM_IMA_DK4,
+ CODEC_ID_ADPCM_IMA_WS,
CODEC_ID_ADPCM_MS,
CODEC_ID_ADPCM_4XM,
@@ -1419,6 +1425,10 @@ extern AVCodec mdec_decoder;
extern AVCodec roq_decoder;
extern AVCodec interplay_video_decoder;
extern AVCodec xan_wc3_decoder;
+extern AVCodec rpza_decoder;
+extern AVCodec cinepak_decoder;
+extern AVCodec msrle_decoder;
+extern AVCodec msvideo1_decoder;
extern AVCodec ra_144_decoder;
extern AVCodec ra_288_decoder;
extern AVCodec roq_dpcm_decoder;
@@ -1445,6 +1455,7 @@ PCM_CODEC(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt);
PCM_CODEC(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav);
PCM_CODEC(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3);
PCM_CODEC(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4);
+PCM_CODEC(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws);
PCM_CODEC(CODEC_ID_ADPCM_MS, adpcm_ms);
PCM_CODEC(CODEC_ID_ADPCM_4XM, adpcm_4xm);
diff --git a/libavcodec/cinepak.c b/libavcodec/cinepak.c
new file mode 100644
index 0000000000..c34a2d5ff3
--- /dev/null
+++ b/libavcodec/cinepak.c
@@ -0,0 +1,456 @@
+/*
+ * Cinepak Video Decoder
+ * Copyright (C) 2003 the ffmpeg project
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/**
+ * @file cinepak.c
+ * Cinepak video decoder
+ * by Ewald Snel <ewald@rambo.its.tudelft.nl>
+ * For more information on the Cinepak algorithm, visit:
+ * http://www.csse.monash.edu.au/~timf/
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "common.h"
+#include "avcodec.h"
+#include "dsputil.h"
+
+#define PALETTE_COUNT 256
+
+#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1])
+#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \
+ (((uint8_t*)(x))[1] << 16) | \
+ (((uint8_t*)(x))[2] << 8) | \
+ ((uint8_t*)(x))[3])
+
+typedef struct {
+ uint8_t y0, y1, y2, y3;
+ uint8_t u, v;
+} cvid_codebook_t;
+
+#define MAX_STRIPS 32
+
+typedef struct {
+ uint16_t id;
+ uint16_t x1, y1;
+ uint16_t x2, y2;
+ cvid_codebook_t v4_codebook[256];
+ cvid_codebook_t v1_codebook[256];
+} cvid_strip_t;
+
+typedef struct CinepakContext {
+
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ AVFrame frame;
+ AVFrame prev_frame;
+
+ unsigned char *data;
+ int size;
+
+ unsigned char palette[PALETTE_COUNT * 4];
+ int palette_video;
+ cvid_strip_t strips[MAX_STRIPS];
+
+} CinepakContext;
+
+static void cinepak_decode_codebook (cvid_codebook_t *codebook,
+ int chunk_id, int size, uint8_t *data)
+{
+ uint8_t *eod = (data + size);
+ uint32_t flag, mask;
+ int i, n;
+
+ /* check if this chunk contains 4- or 6-element vectors */
+ n = (chunk_id & 0x0400) ? 4 : 6;
+ flag = 0;
+ mask = 0;
+
+ for (i=0; i < 256; i++) {
+ if ((chunk_id & 0x0100) && !(mask >>= 1)) {
+ if ((data + 4) > eod)
+ break;
+
+ flag = BE_32 (data);
+ data += 4;
+ mask = 0x80000000;
+ }
+
+ if (!(chunk_id & 0x0100) || (flag & mask)) {
+ if ((data + n) > eod)
+ break;
+
+ if (n == 6) {
+ codebook[i].y0 = *data++;
+ codebook[i].y1 = *data++;
+ codebook[i].y2 = *data++;
+ codebook[i].y3 = *data++;
+ codebook[i].u = 128 + *data++;
+ codebook[i].v = 128 + *data++;
+ } else {
+ /* this codebook type indicates either greyscale or
+ * palettized video; if palettized, U & V components will
+ * not be used so it is safe to set them to 128 for the
+ * benefit of greyscale rendering in YUV420P */
+ codebook[i].y0 = *data++;
+ codebook[i].y1 = *data++;
+ codebook[i].y2 = *data++;
+ codebook[i].y3 = *data++;
+ codebook[i].u = 128;
+ codebook[i].v = 128;
+ }
+ }
+ }
+}
+
+static int cinepak_decode_vectors (CinepakContext *s, cvid_strip_t *strip,
+ int chunk_id, int size, uint8_t *data)
+{
+ uint8_t *eod = (data + size);
+ uint32_t flag, mask;
+ cvid_codebook_t *codebook;
+ unsigned int i, j, x, y;
+ uint32_t iy[4];
+ uint32_t iu[2];
+ uint32_t iv[2];
+
+ flag = 0;
+ mask = 0;
+
+ for (y=strip->y1; y < strip->y2; y+=4) {
+
+ iy[0] = strip->x1 + (y * s->frame.linesize[0]);
+ iy[1] = iy[0] + s->frame.linesize[0];
+ iy[2] = iy[1] + s->frame.linesize[0];
+ iy[3] = iy[2] + s->frame.linesize[0];
+ iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]);
+ iu[1] = iu[0] + s->frame.linesize[1];
+ iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]);
+ iv[1] = iv[0] + s->frame.linesize[2];
+
+ for (x=strip->x1; x < strip->x2; x+=4) {
+ if ((chunk_id & 0x0100) && !(mask >>= 1)) {
+ if ((data + 4) > eod)
+ return -1;
+
+ flag = BE_32 (data);
+ data += 4;
+ mask = 0x80000000;
+ }
+
+ if (!(chunk_id & 0x0100) || (flag & mask)) {
+ if (!(chunk_id & 0x0200) && !(mask >>= 1)) {
+ if ((data + 4) > eod)
+ return -1;
+
+ flag = BE_32 (data);
+ data += 4;
+ mask = 0x80000000;
+ }
+
+ if ((chunk_id & 0x0200) || (~flag & mask)) {
+ if (data >= eod)
+ return -1;
+
+ codebook = &strip->v1_codebook[*data++];
+ s->frame.data[0][iy[0] + 0] = codebook->y0;
+ s->frame.data[0][iy[0] + 1] = codebook->y0;
+ s->frame.data[0][iy[1] + 0] = codebook->y0;
+ s->frame.data[0][iy[1] + 1] = codebook->y0;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[0]] = codebook->u;
+ s->frame.data[2][iv[0]] = codebook->v;
+ }
+
+ s->frame.data[0][iy[0] + 2] = codebook->y0;
+ s->frame.data[0][iy[0] + 3] = codebook->y0;
+ s->frame.data[0][iy[1] + 2] = codebook->y0;
+ s->frame.data[0][iy[1] + 3] = codebook->y0;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[0] + 1] = codebook->u;
+ s->frame.data[2][iv[0] + 1] = codebook->v;
+ }
+
+ s->frame.data[0][iy[2] + 0] = codebook->y0;
+ s->frame.data[0][iy[2] + 1] = codebook->y0;
+ s->frame.data[0][iy[3] + 0] = codebook->y0;
+ s->frame.data[0][iy[3] + 1] = codebook->y0;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[1]] = codebook->u;
+ s->frame.data[2][iv[1]] = codebook->v;
+ }
+
+ s->frame.data[0][iy[2] + 2] = codebook->y0;
+ s->frame.data[0][iy[2] + 3] = codebook->y0;
+ s->frame.data[0][iy[3] + 2] = codebook->y0;
+ s->frame.data[0][iy[3] + 3] = codebook->y0;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[1] + 1] = codebook->u;
+ s->frame.data[2][iv[1] + 1] = codebook->v;
+ }
+
+ } else if (flag & mask) {
+ if ((data + 4) > eod)
+ return -1;
+
+ codebook = &strip->v4_codebook[*data++];
+ s->frame.data[0][iy[0] + 0] = codebook->y0;
+ s->frame.data[0][iy[0] + 1] = codebook->y1;
+ s->frame.data[0][iy[1] + 0] = codebook->y2;
+ s->frame.data[0][iy[1] + 1] = codebook->y3;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[0]] = codebook->u;
+ s->frame.data[2][iv[0]] = codebook->v;
+ }
+
+ codebook = &strip->v4_codebook[*data++];
+ s->frame.data[0][iy[0] + 2] = codebook->y0;
+ s->frame.data[0][iy[0] + 3] = codebook->y1;
+ s->frame.data[0][iy[1] + 2] = codebook->y2;
+ s->frame.data[0][iy[1] + 3] = codebook->y3;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[0] + 1] = codebook->u;
+ s->frame.data[2][iv[0] + 1] = codebook->v;
+ }
+
+ codebook = &strip->v4_codebook[*data++];
+ s->frame.data[0][iy[2] + 0] = codebook->y0;
+ s->frame.data[0][iy[2] + 1] = codebook->y1;
+ s->frame.data[0][iy[3] + 0] = codebook->y2;
+ s->frame.data[0][iy[3] + 1] = codebook->y3;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[1]] = codebook->u;
+ s->frame.data[2][iv[1]] = codebook->v;
+ }
+
+ codebook = &strip->v4_codebook[*data++];
+ s->frame.data[0][iy[2] + 2] = codebook->y0;
+ s->frame.data[0][iy[2] + 3] = codebook->y1;
+ s->frame.data[0][iy[3] + 2] = codebook->y2;
+ s->frame.data[0][iy[3] + 3] = codebook->y3;
+ if (!s->palette_video) {
+ s->frame.data[1][iu[1] + 1] = codebook->u;
+ s->frame.data[2][iv[1] + 1] = codebook->v;
+ }
+
+ }
+ } else {
+ /* copy from the previous frame */
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ s->frame.data[0][iy[i] + j] =
+ s->prev_frame.data[0][iy[i] + j];
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ s->frame.data[1][iu[i] + j] =
+ s->prev_frame.data[1][iu[i] + j];
+ s->frame.data[2][iv[i] + j] =
+ s->prev_frame.data[2][iv[i] + j];
+ }
+ }
+ }
+
+ iy[0] += 4; iy[1] += 4;
+ iy[2] += 4; iy[3] += 4;
+ iu[0] += 2; iu[1] += 2;
+ iv[0] += 2; iv[1] += 2;
+ }
+ }
+
+ return 0;
+}
+
+static int cinepak_decode_strip (CinepakContext *s,
+ cvid_strip_t *strip, uint8_t *data, int size)
+{
+ uint8_t *eod = (data + size);
+ int chunk_id, chunk_size;
+
+ /* coordinate sanity checks */
+ if (strip->x1 >= s->avctx->width || strip->x2 > s->avctx->width ||
+ strip->y1 >= s->avctx->height || strip->y2 > s->avctx->height ||
+ strip->x1 >= strip->x2 || strip->y1 >= strip->y2)
+ return -1;
+
+ while ((data + 4) <= eod) {
+ chunk_id = BE_16 (&data[0]);
+ chunk_size = BE_16 (&data[2]) - 4;
+ data += 4;
+ chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size;
+
+ switch (chunk_id) {
+
+ case 0x2000:
+ case 0x2100:
+ case 0x2400:
+ case 0x2500:
+ cinepak_decode_codebook (strip->v4_codebook, chunk_id,
+ chunk_size, data);
+ break;
+
+ case 0x2200:
+ case 0x2300:
+ case 0x2600:
+ case 0x2700:
+ cinepak_decode_codebook (strip->v1_codebook, chunk_id,
+ chunk_size, data);
+ break;
+
+ case 0x3000:
+ case 0x3100:
+ case 0x3200:
+ return cinepak_decode_vectors (s, strip, chunk_id,
+ chunk_size, data);
+ }
+
+ data += chunk_size;
+ }
+
+ return -1;
+}
+
+static int cinepak_decode (CinepakContext *s)
+{
+ uint8_t *eod = (s->data + s->size);
+ int i, result, strip_size, frame_flags, num_strips;
+ int y0 = 0;
+
+ if (s->size < 10)
+ return -1;
+
+ frame_flags = s->data[0];
+ num_strips = BE_16 (&s->data[8]);
+ s->data += 10;
+
+ if (num_strips > MAX_STRIPS)
+ num_strips = MAX_STRIPS;
+
+ for (i=0; i < num_strips; i++) {
+ if ((s->data + 12) > eod)
+ return -1;
+
+ s->strips[i].id = BE_16 (s->data);
+ s->strips[i].y1 = y0;
+ s->strips[i].x1 = 0;
+ s->strips[i].y2 = y0 + BE_16 (&s->data[8]);
+ s->strips[i].x2 = s->avctx->width;
+
+ strip_size = BE_16 (&s->data[2]) - 12;
+ s->data += 12;
+ strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size;
+
+ if ((i > 0) && !(frame_flags & 0x01)) {
+ memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook,
+ sizeof(s->strips[i].v4_codebook));
+ memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook,
+ sizeof(s->strips[i].v1_codebook));
+ }
+
+ result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size);
+
+ if (result != 0)
+ return result;
+
+ s->data += strip_size;
+ y0 = s->strips[i].y2;
+ }
+ return 0;
+}
+
+static int cinepak_decode_init(AVCodecContext *avctx)
+{
+ CinepakContext *s = (CinepakContext *)avctx->priv_data;
+/*
+ int i;
+ unsigned char r, g, b;
+ unsigned char *raw_palette;
+ unsigned int *palette32;
+*/
+
+ s->avctx = avctx;
+
+// check for paletted data
+s->palette_video = 0;
+
+
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+ avctx->has_b_frames = 0;
+ dsputil_init(&s->dsp, avctx);
+
+ s->frame.data[0] = s->prev_frame.data[0] = NULL;
+
+ return 0;
+}
+
+static int cinepak_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ CinepakContext *s = (CinepakContext *)avctx->priv_data;
+
+ s->data = buf;
+ s->size = buf_size;
+
+ if (avctx->get_buffer(avctx, &s->frame)) {
+ printf (" Cinepak: get_buffer() failed\n");
+ return -1;
+ }
+
+ cinepak_decode(s);
+
+ if (s->prev_frame.data[0])
+ avctx->release_buffer(avctx, &s->prev_frame);
+
+ /* shuffle frames */
+ s->prev_frame = s->frame;
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data = s->frame;
+
+ /* report that the buffer was completely consumed */
+ return buf_size;
+}
+
+static int cinepak_decode_end(AVCodecContext *avctx)
+{
+ CinepakContext *s = (CinepakContext *)avctx->priv_data;
+
+ if (s->prev_frame.data[0])
+ avctx->release_buffer(avctx, &s->prev_frame);
+
+ return 0;
+}
+
+AVCodec cinepak_decoder = {
+ "cinepak",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_CINEPAK,
+ sizeof(CinepakContext),
+ cinepak_decode_init,
+ NULL,
+ cinepak_decode_end,
+ cinepak_decode_frame,
+ CODEC_CAP_DR1,
+};
diff --git a/libavcodec/msrle.c b/libavcodec/msrle.c
new file mode 100644
index 0000000000..8943ef3cdf
--- /dev/null
+++ b/libavcodec/msrle.c
@@ -0,0 +1,219 @@
+/*
+ * Micrsoft RLE Video Decoder
+ * Copyright (C) 2003 the ffmpeg project
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/**
+ * @file msrle.c
+ * MS RLE Video Decoder by Mike Melanson (melanson@pcisys.net)
+ * For more information about the MS RLE format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ *
+ * The MS RLE decoder outputs PAL8 colorspace data.
+ *
+ * Note that this decoder expects the palette colors from the end of the
+ * BITMAPINFO header passed through extradata.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "common.h"
+#include "avcodec.h"
+#include "dsputil.h"
+
+typedef struct MsrleContext {
+ AVCodecContext *avctx;
+ AVFrame frame;
+ AVFrame prev_frame;
+
+ unsigned char *buf;
+ int size;
+
+ unsigned int palette[256];
+} MsrleContext;
+
+#define FETCH_NEXT_STREAM_BYTE() \
+ if (stream_ptr >= s->size) \
+ { \
+ printf(" MS RLE: stream ptr just went out of bounds (1)\n"); \
+ return; \
+ } \
+ stream_byte = s->buf[stream_ptr++];
+
+static void msrle_decode_pal8(MsrleContext *s)
+{
+ int stream_ptr = 0;
+ unsigned char rle_code;
+ unsigned char extra_byte;
+ unsigned char stream_byte;
+ int pixel_ptr = 0;
+ int row_dec = s->frame.linesize[0];
+ int row_ptr = (s->avctx->height - 1) * row_dec;
+ int frame_size = row_dec * s->avctx->height;
+
+ while (row_ptr >= 0) {
+ FETCH_NEXT_STREAM_BYTE();
+ rle_code = stream_byte;
+ if (rle_code == 0) {
+ /* fetch the next byte to see how to handle escape code */
+ FETCH_NEXT_STREAM_BYTE();
+ if (stream_byte == 0) {
+ /* line is done, goto the next one */
+ row_ptr -= row_dec;
+ pixel_ptr = 0;
+ } else if (stream_byte == 1) {
+ /* decode is done */
+ return;
+ } else if (stream_byte == 2) {
+ /* reposition frame decode coordinates */
+ FETCH_NEXT_STREAM_BYTE();
+ pixel_ptr += stream_byte;
+ FETCH_NEXT_STREAM_BYTE();
+ row_ptr -= stream_byte * row_dec;
+ } else {
+ /* copy pixels from encoded stream */
+ if ((row_ptr + pixel_ptr + stream_byte > frame_size) ||
+ (row_ptr < 0)) {
+ printf(" MS RLE: frame ptr just went out of bounds (1)\n");
+ return;
+ }
+
+ rle_code = stream_byte;
+ extra_byte = stream_byte & 0x01;
+ if (stream_ptr + rle_code + extra_byte > s->size) {
+ printf(" MS RLE: stream ptr just went out of bounds (2)\n");
+ return;
+ }
+
+ while (rle_code--) {
+ FETCH_NEXT_STREAM_BYTE();
+ s->frame.data[0][row_ptr + pixel_ptr] = stream_byte;
+ pixel_ptr++;
+ }
+
+ /* if the RLE code is odd, skip a byte in the stream */
+ if (extra_byte)
+ stream_ptr++;
+ }
+ } else {
+ /* decode a run of data */
+ if ((row_ptr + pixel_ptr + stream_byte > frame_size) ||
+ (row_ptr < 0)) {
+ printf(" MS RLE: frame ptr just went out of bounds (2)\n");
+ return;
+ }
+
+ FETCH_NEXT_STREAM_BYTE();
+
+ while(rle_code--) {
+ s->frame.data[0][row_ptr + pixel_ptr] = stream_byte;
+ pixel_ptr++;
+ }
+ }
+ }
+
+ /* make the palette available */
+ memcpy(s->frame.data[1], s->palette, 256 * 4);
+
+ /* one last sanity check on the way out */
+ if (stream_ptr < s->size)
+ printf(" MS RLE: ended frame decode with bytes left over (%d < %d)\n",
+ stream_ptr, s->size);
+}
+
+static int msrle_decode_init(AVCodecContext *avctx)
+{
+ MsrleContext *s = (MsrleContext *)avctx->priv_data;
+ int i, j;
+ unsigned char *palette;
+
+ s->avctx = avctx;
+
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ avctx->has_b_frames = 0;
+ s->frame.data[0] = s->prev_frame.data[0] = NULL;
+
+ /* convert palette */
+ palette = (unsigned char *)s->avctx->extradata;
+ memset (s->palette, 0, 256 * 4);
+ for (i = 0, j = 0; i < s->avctx->extradata_size / 4; i++, j += 4)
+ s->palette[i] =
+ (palette[j + 2] << 16) |
+ (palette[j + 1] << 8) |
+ (palette[j + 0] << 0);
+
+ return 0;
+}
+
+static int msrle_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ MsrleContext *s = (MsrleContext *)avctx->priv_data;
+
+ s->buf = buf;
+ s->size = buf_size;
+
+ if (avctx->get_buffer(avctx, &s->frame)) {
+ printf (" MS RLE: get_buffer() failed\n");
+ return -1;
+ }
+
+ /* grossly inefficient, but...oh well */
+ memcpy(s->frame.data[0], s->prev_frame.data[0],
+ s->frame.linesize[0] * s->avctx->height);
+
+ msrle_decode_pal8(s);
+
+ if (s->frame.data[0])
+ avctx->release_buffer(avctx, &s->frame);
+
+ /* shuffle frames */
+ s->prev_frame = s->frame;
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data = s->frame;
+
+ /* report that the buffer was completely consumed */
+ return buf_size;
+}
+
+static int msrle_decode_end(AVCodecContext *avctx)
+{
+ MsrleContext *s = (MsrleContext *)avctx->priv_data;
+
+ /* release the last frame */
+ if (s->prev_frame.data[0])
+ avctx->release_buffer(avctx, &s->prev_frame);
+
+ return 0;
+}
+
+AVCodec msrle_decoder = {
+ "msrle",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MSRLE,
+ sizeof(MsrleContext),
+ msrle_decode_init,
+ NULL,
+ msrle_decode_end,
+ msrle_decode_frame,
+ CODEC_CAP_DR1,
+};
diff --git a/libavcodec/msvideo1.c b/libavcodec/msvideo1.c
new file mode 100644
index 0000000000..71d8af9870
--- /dev/null
+++ b/libavcodec/msvideo1.c
@@ -0,0 +1,378 @@
+/*
+ * Microsoft Video-1 Decoder
+ * Copyright (C) 2003 the ffmpeg project
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/**
+ * @file msvideo1.c
+ * Microsoft Video-1 Decoder by Mike Melanson (melanson@pcisys.net)
+ * For more information about the MS Video-1 format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ *
+ * This decoder outputs either PAL8 or RGB555 data, depending on the
+ * whether a RGB palette was passed through via extradata; if the extradata
+ * is present, then the data is PAL8; RGB555 otherwise.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "common.h"
+#include "avcodec.h"
+#include "dsputil.h"
+
+#define PALETTE_COUNT 256
+#define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0])
+#define CHECK_STREAM_PTR(n) \
+ if ((stream_ptr + n) > s->size ) { \
+ printf (" MS Video-1 warning: stream_ptr out of bounds (%d >= %d)\n", \
+ stream_ptr + n, s->size); \
+ return; \
+ }
+
+#define COPY_PREV_BLOCK() \
+ pixel_ptr = block_ptr; \
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) { \
+ for (pixel_x = 0; pixel_x < 4; pixel_x++, pixel_ptr++) \
+ pixels[pixel_ptr] = prev_pixels[pixel_ptr]; \
+ pixel_ptr -= row_dec; \
+ }
+
+typedef struct Msvideo1Context {
+
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ AVFrame frame;
+ AVFrame prev_frame;
+
+ unsigned char *buf;
+ int size;
+
+ int mode_8bit; /* if it's not 8-bit, it's 16-bit */
+ unsigned char palette[PALETTE_COUNT * 4];
+
+} Msvideo1Context;
+
+static int msvideo1_decode_init(AVCodecContext *avctx)
+{
+ Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data;
+ int i;
+ unsigned char r, g, b;
+ unsigned char *raw_palette;
+ unsigned int *palette32;
+
+ s->avctx = avctx;
+
+ /* figure out the colorspace based on the presence of a palette in
+ * extradata */
+ if (s->avctx->extradata_size) {
+ s->mode_8bit = 1;
+ /* load up the palette */
+ palette32 = (unsigned int *)s->palette;
+ raw_palette = (unsigned char *)s->avctx->extradata;
+ for (i = 0; i < s->avctx->extradata_size / 4; i++) {
+ b = *raw_palette++;
+ g = *raw_palette++;
+ r = *raw_palette++;
+ raw_palette++;
+ palette32[i] = (r << 16) | (g << 8) | (b);
+ }
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ } else {
+ s->mode_8bit = 0;
+ avctx->pix_fmt = PIX_FMT_RGB555;
+ }
+
+ avctx->has_b_frames = 0;
+ dsputil_init(&s->dsp, avctx);
+
+ s->frame.data[0] = s->prev_frame.data[0] = NULL;
+
+ return 0;
+}
+
+static void msvideo1_decode_8bit(Msvideo1Context *s)
+{
+ int block_ptr, pixel_ptr;
+ int total_blocks;
+ int pixel_x, pixel_y; /* pixel width and height iterators */
+ int block_x, block_y; /* block width and height iterators */
+ int blocks_wide, blocks_high; /* width and height in 4x4 blocks */
+ int block_inc;
+ int row_dec;
+
+ /* decoding parameters */
+ int stream_ptr;
+ unsigned char byte_a, byte_b;
+ unsigned short flags;
+ int skip_blocks;
+ unsigned char colors[8];
+ unsigned char *pixels = s->frame.data[0];
+ unsigned char *prev_pixels = s->prev_frame.data[0];
+ int stride = s->frame.linesize[0];
+
+ stream_ptr = 0;
+ skip_blocks = 0;
+ blocks_wide = s->avctx->width / 4;
+ blocks_high = s->avctx->height / 4;
+ total_blocks = blocks_wide * blocks_high;
+ block_inc = 4;
+ row_dec = stride + 4;
+
+ for (block_y = blocks_high; block_y > 0; block_y--) {
+ block_ptr = ((block_y * 4) - 1) * stride;
+ for (block_x = blocks_wide; block_x > 0; block_x--) {
+ /* check if this block should be skipped */
+ if (skip_blocks) {
+ COPY_PREV_BLOCK();
+ block_ptr += block_inc;
+ skip_blocks--;
+ total_blocks--;
+ continue;
+ }
+
+ pixel_ptr = block_ptr;
+
+ /* get the next two bytes in the encoded data stream */
+ CHECK_STREAM_PTR(2);
+ byte_a = s->buf[stream_ptr++];
+ byte_b = s->buf[stream_ptr++];
+
+ /* check if the decode is finished */
+ if ((byte_a == 0) && (byte_b == 0) && (total_blocks == 0))
+ return;
+ else if ((byte_b & 0xFC) == 0x84) {
+ /* skip code, but don't count the current block */
+ skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1;
+ COPY_PREV_BLOCK();
+ } else if (byte_b < 0x80) {
+ /* 2-color encoding */
+ flags = (byte_b << 8) | byte_a;
+
+ CHECK_STREAM_PTR(2);
+ colors[0] = s->buf[stream_ptr++];
+ colors[1] = s->buf[stream_ptr++];
+
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
+ pixels[pixel_ptr++] = colors[(flags & 0x1) ^ 1];
+ pixel_ptr -= row_dec;
+ }
+ } else if (byte_b >= 0x90) {
+ /* 8-color encoding */
+ flags = (byte_b << 8) | byte_a;
+
+ CHECK_STREAM_PTR(8);
+ memcpy(colors, &s->buf[stream_ptr], 8);
+ stream_ptr += 8;
+
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
+ pixels[pixel_ptr++] =
+ colors[((pixel_y & 0x2) << 1) +
+ (pixel_x & 0x2) + ((flags & 0x1) ^ 1)];
+ pixel_ptr -= row_dec;
+ }
+ } else {
+ /* 1-color encoding */
+ colors[0] = byte_a;
+
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++)
+ pixels[pixel_ptr++] = colors[0];
+ pixel_ptr -= row_dec;
+ }
+ }
+
+ block_ptr += block_inc;
+ total_blocks--;
+ }
+ }
+
+ /* make the palette available on the way out */
+ if (s->avctx->pix_fmt == PIX_FMT_PAL8)
+ memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
+}
+
+static void msvideo1_decode_16bit(Msvideo1Context *s)
+{
+ int block_ptr, pixel_ptr;
+ int total_blocks;
+ int pixel_x, pixel_y; /* pixel width and height iterators */
+ int block_x, block_y; /* block width and height iterators */
+ int blocks_wide, blocks_high; /* width and height in 4x4 blocks */
+ int block_inc;
+ int row_dec;
+
+ /* decoding parameters */
+ int stream_ptr;
+ unsigned char byte_a, byte_b;
+ unsigned short flags;
+ int skip_blocks;
+ unsigned short colors[8];
+ unsigned short *pixels = (unsigned short *)s->frame.data[0];
+ unsigned short *prev_pixels = (unsigned short *)s->prev_frame.data[0];
+ int stride = s->frame.linesize[0] / 2;
+
+ stream_ptr = 0;
+ skip_blocks = 0;
+ blocks_wide = s->avctx->width / 4;
+ blocks_high = s->avctx->height / 4;
+ total_blocks = blocks_wide * blocks_high;
+ block_inc = 4;
+ row_dec = stride + 4;
+
+ for (block_y = blocks_high; block_y > 0; block_y--) {
+ block_ptr = ((block_y * 4) - 1) * stride;
+ for (block_x = blocks_wide; block_x > 0; block_x--) {
+ /* check if this block should be skipped */
+ if (skip_blocks) {
+ COPY_PREV_BLOCK();
+ block_ptr += block_inc;
+ skip_blocks--;
+ total_blocks--;
+ continue;
+ }
+
+ pixel_ptr = block_ptr;
+
+ /* get the next two bytes in the encoded data stream */
+ CHECK_STREAM_PTR(2);
+ byte_a = s->buf[stream_ptr++];
+ byte_b = s->buf[stream_ptr++];
+
+ /* check if the decode is finished */
+ if ((byte_a == 0) && (byte_b == 0) && (total_blocks == 0)) {
+ return;
+ } else if ((byte_b & 0xFC) == 0x84) {
+ /* skip code, but don't count the current block */
+ skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1;
+ COPY_PREV_BLOCK();
+ } else if (byte_b < 0x80) {
+ /* 2- or 8-color encoding modes */
+ flags = (byte_b << 8) | byte_a;
+
+ CHECK_STREAM_PTR(4);
+ colors[0] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ colors[1] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+
+ if (colors[0] & 0x8000) {
+ /* 8-color encoding */
+ CHECK_STREAM_PTR(12);
+ colors[2] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ colors[3] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ colors[4] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ colors[5] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ colors[6] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ colors[7] = LE_16(&s->buf[stream_ptr]);
+ stream_ptr += 2;
+
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
+ pixels[pixel_ptr++] =
+ colors[((pixel_y & 0x2) << 1) +
+ (pixel_x & 0x2) + ((flags & 0x1) ^ 1)];
+ pixel_ptr -= row_dec;
+ }
+ } else {
+ /* 2-color encoding */
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
+ pixels[pixel_ptr++] = colors[(flags & 0x1) ^ 1];
+ pixel_ptr -= row_dec;
+ }
+ }
+ } else {
+ /* otherwise, it's a 1-color block */
+ colors[0] = (byte_b << 8) | byte_a;
+
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++)
+ pixels[pixel_ptr++] = colors[0];
+ pixel_ptr -= row_dec;
+ }
+ }
+
+ block_ptr += block_inc;
+ total_blocks--;
+ }
+ }
+}
+
+static int msvideo1_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data;
+
+ s->buf = buf;
+ s->size = buf_size;
+
+ if (avctx->get_buffer(avctx, &s->frame)) {
+ printf (" MS Video-1 Video: get_buffer() failed\n");
+ return -1;
+ }
+
+ if (s->mode_8bit)
+ msvideo1_decode_8bit(s);
+ else
+ msvideo1_decode_16bit(s);
+
+ if (s->prev_frame.data[0])
+ avctx->release_buffer(avctx, &s->prev_frame);
+
+ /* shuffle frames */
+ s->prev_frame = s->frame;
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data = s->frame;
+
+ /* report that the buffer was completely consumed */
+ return buf_size;
+}
+
+static int msvideo1_decode_end(AVCodecContext *avctx)
+{
+ Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data;
+
+ if (s->prev_frame.data[0])
+ avctx->release_buffer(avctx, &s->prev_frame);
+
+ return 0;
+}
+
+AVCodec msvideo1_decoder = {
+ "msvideo1",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MSVIDEO1,
+ sizeof(Msvideo1Context),
+ msvideo1_decode_init,
+ NULL,
+ msvideo1_decode_end,
+ msvideo1_decode_frame,
+ CODEC_CAP_DR1,
+};
diff --git a/libavcodec/rpza.c b/libavcodec/rpza.c
new file mode 100644
index 0000000000..8027cfd9ab
--- /dev/null
+++ b/libavcodec/rpza.c
@@ -0,0 +1,310 @@
+/*
+ * Quicktime Video (RPZA) Video Decoder
+ * Copyright (C) 2003 the ffmpeg project
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/**
+ * @file rpza.c
+ * QT RPZA Video Decoder by Roberto Togni <rtogni@bresciaonline.it>
+ * For more information about the RPZA format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ *
+ * The RPZA decoder outputs RGB555 colorspace data.
+ *
+ * Note that this decoder reads big endian RGB555 pixel values from the
+ * bytestream, arranges them in the host's endian order, and outputs
+ * them to the final rendered map in the same host endian order. This is
+ * intended behavior as the ffmpeg documentation states that RGB555 pixels
+ * shall be stored in native CPU endianness.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "common.h"
+#include "avcodec.h"
+#include "dsputil.h"
+
+typedef struct RpzaContext {
+
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ AVFrame frame;
+ AVFrame prev_frame;
+
+ unsigned char *buf;
+ int size;
+
+} RpzaContext;
+
+#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1])
+#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \
+ (((uint8_t*)(x))[1] << 16) | \
+ (((uint8_t*)(x))[2] << 8) | \
+ ((uint8_t*)(x))[3])
+
+#define ADVANCE_BLOCK() \
+{ \
+ pixel_ptr += 4; \
+ if (pixel_ptr >= width) \
+ { \
+ pixel_ptr = 0; \
+ row_ptr += stride * 4; \
+ } \
+ total_blocks--; \
+ if (total_blocks < 0) \
+ { \
+ printf("warning: block counter just went negative (this should not happen)\n"); \
+ return; \
+ } \
+}
+
+static void rpza_decode_stream(RpzaContext *s)
+{
+ int width = s->avctx->width;
+ int stride = s->frame.linesize[0] / 2;
+ int row_inc = stride - 4;
+ int stream_ptr = 0;
+ int chunk_size;
+ unsigned char opcode;
+ int n_blocks;
+ unsigned short colorA = 0, colorB;
+ unsigned short color4[4];
+ unsigned char index, idx;
+ unsigned short ta, tb;
+ unsigned short *pixels = (unsigned short *)s->frame.data[0];
+ unsigned short *prev_pixels = (unsigned short *)s->prev_frame.data[0];
+
+ int row_ptr = 0;
+ int pixel_ptr = 0;
+ int block_ptr;
+ int pixel_x, pixel_y;
+ int total_blocks;
+
+ /* First byte is always 0xe1. Warn if it's different */
+ if (s->buf[stream_ptr] != 0xe1)
+ printf("First chunk byte is 0x%02x instead of 0x1e\n",
+ s->buf[stream_ptr]);
+
+ /* Get chunk size, ingnoring first byte */
+ chunk_size = BE_32(&s->buf[stream_ptr]) & 0x00FFFFFF;
+ stream_ptr += 4;
+
+ /* If length mismatch use size from MOV file and try to decode anyway */
+ if (chunk_size != s->size)
+ printf("MOV chunk size != encoded chunk size; using MOV chunk size\n");
+
+ chunk_size = s->size;
+
+ /* Number of 4x4 blocks in frame. */
+ total_blocks = (s->avctx->width * s->avctx->height) / (4 * 4);
+
+ /* Process chunk data */
+ while (stream_ptr < chunk_size) {
+ opcode = s->buf[stream_ptr++]; /* Get opcode */
+
+ n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */
+
+ /* If opcode MSbit is 0, we need more data to decide what to do */
+ if ((opcode & 0x80) == 0) {
+ colorA = (opcode << 8) | (s->buf[stream_ptr++]);
+ opcode = 0;
+ if ((s->buf[stream_ptr] & 0x80) != 0) {
+ /* Must behave as opcode 110xxxxx, using colorA computed
+ * above. Use fake opcode 0x20 to enter switch block at
+ * the right place */
+ opcode = 0x20;
+ n_blocks = 1;
+ }
+ }
+
+ switch (opcode & 0xe0) {
+
+ /* Skip blocks */
+ case 0x80:
+ while (n_blocks--) {
+ block_ptr = row_ptr + pixel_ptr;
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++){
+ pixels[block_ptr] = prev_pixels[block_ptr];
+ block_ptr++;
+ }
+ block_ptr += row_inc;
+ }
+ ADVANCE_BLOCK();
+ }
+ break;
+
+ /* Fill blocks with one color */
+ case 0xa0:
+ colorA = BE_16 (&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ while (n_blocks--) {
+ block_ptr = row_ptr + pixel_ptr;
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++){
+ pixels[block_ptr] = colorA;
+ block_ptr++;
+ }
+ block_ptr += row_inc;
+ }
+ ADVANCE_BLOCK();
+ }
+ break;
+
+ /* Fill blocks with 4 colors */
+ case 0xc0:
+ colorA = BE_16 (&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ case 0x20:
+ colorB = BE_16 (&s->buf[stream_ptr]);
+ stream_ptr += 2;
+
+ /* sort out the colors */
+ color4[0] = colorB;
+ color4[1] = 0;
+ color4[2] = 0;
+ color4[3] = colorA;
+
+ /* red components */
+ ta = (colorA >> 10) & 0x1F;
+ tb = (colorB >> 10) & 0x1F;
+ color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10;
+ color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10;
+
+ /* green components */
+ ta = (colorA >> 5) & 0x1F;
+ tb = (colorB >> 5) & 0x1F;
+ color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5;
+ color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5;
+
+ /* blue components */
+ ta = colorA & 0x1F;
+ tb = colorB & 0x1F;
+ color4[1] |= ((11 * ta + 21 * tb) >> 5);
+ color4[2] |= ((21 * ta + 11 * tb) >> 5);
+
+ while (n_blocks--) {
+ block_ptr = row_ptr + pixel_ptr;
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ index = s->buf[stream_ptr++];
+ for (pixel_x = 0; pixel_x < 4; pixel_x++){
+ idx = (index >> (2 * (3 - pixel_x))) & 0x03;
+ pixels[block_ptr] = color4[idx];
+ block_ptr++;
+ }
+ block_ptr += row_inc;
+ }
+ ADVANCE_BLOCK();
+ }
+ break;
+
+ /* Fill block with 16 colors */
+ case 0x00:
+ block_ptr = row_ptr + pixel_ptr;
+ for (pixel_y = 0; pixel_y < 4; pixel_y++) {
+ for (pixel_x = 0; pixel_x < 4; pixel_x++){
+ /* We already have color of upper left pixel */
+ if ((pixel_y != 0) || (pixel_x !=0)) {
+ colorA = BE_16 (&s->buf[stream_ptr]);
+ stream_ptr += 2;
+ }
+ pixels[block_ptr] = colorA;
+ block_ptr++;
+ }
+ block_ptr += row_inc;
+ }
+ ADVANCE_BLOCK();
+ break;
+
+ /* Unknown opcode */
+ default:
+ printf("Unknown opcode %d in rpza chunk."
+ " Skip remaining %d bytes of chunk data.\n", opcode,
+ chunk_size - stream_ptr);
+ return;
+ } /* Opcode switch */
+ }
+}
+
+static int rpza_decode_init(AVCodecContext *avctx)
+{
+ RpzaContext *s = (RpzaContext *)avctx->priv_data;
+
+ s->avctx = avctx;
+ avctx->pix_fmt = PIX_FMT_RGB555;
+ avctx->has_b_frames = 0;
+ dsputil_init(&s->dsp, avctx);
+
+ s->frame.data[0] = s->prev_frame.data[0] = NULL;
+
+ return 0;
+}
+
+static int rpza_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ RpzaContext *s = (RpzaContext *)avctx->priv_data;
+
+ s->buf = buf;
+ s->size = buf_size;
+
+ if (avctx->get_buffer(avctx, &s->frame)) {
+ printf (" RPZA Video: get_buffer() failed\n");
+ return -1;
+ }
+
+ rpza_decode_stream(s);
+
+ if (s->prev_frame.data[0])
+ avctx->release_buffer(avctx, &s->prev_frame);
+
+ /* shuffle frames */
+ s->prev_frame = s->frame;
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data = s->frame;
+
+ /* always report that the buffer was completely consumed */
+ return buf_size;
+}
+
+static int rpza_decode_end(AVCodecContext *avctx)
+{
+ RpzaContext *s = (RpzaContext *)avctx->priv_data;
+
+ if (s->prev_frame.data[0])
+ avctx->release_buffer(avctx, &s->prev_frame);
+
+ return 0;
+}
+
+AVCodec rpza_decoder = {
+ "rpza",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_RPZA,
+ sizeof(RpzaContext),
+ rpza_decode_init,
+ NULL,
+ rpza_decode_end,
+ rpza_decode_frame,
+ CODEC_CAP_DR1,
+};