diff options
author | Kostya Shishkov | 2012-01-01 17:44:08 +0100 |
---|---|---|
committer | Kostya Shishkov | 2012-01-03 17:08:49 +0100 |
commit | 490dcda6b6051066ad7d83979d81db5506f9b3eb (patch) | |
tree | 5cd5fbacfb47c47991a0143bd42a5db5d4acc6d5 /libavcodec/utvideo.c | |
parent | c04a954da6ce3816870b821f43d5543d940b1fb2 (diff) |
utvideo: proper median prediction for interlaced videos
Diffstat (limited to 'libavcodec/utvideo.c')
-rw-r--r-- | libavcodec/utvideo.c | 101 |
1 files changed, 94 insertions, 7 deletions
diff --git a/libavcodec/utvideo.c b/libavcodec/utvideo.c index 4c3b2a1621..d105d29148 100644 --- a/libavcodec/utvideo.c +++ b/libavcodec/utvideo.c @@ -282,6 +282,77 @@ static void restore_median(uint8_t *src, int step, int stride, } } +/* UtVideo interlaced mode treats every two lines as a single one, + * so restoring function should take care of possible padding between + * two parts of the same "line". + */ +static void restore_median_il(uint8_t *src, int step, int stride, + int width, int height, int slices, int rmode) +{ + int i, j, slice; + int A, B, C; + uint8_t *bsrc; + int slice_start, slice_height; + const int cmask = ~(rmode ? 3 : 1); + const int stride2 = stride << 1; + + for (slice = 0; slice < slices; slice++) { + slice_start = ((slice * height) / slices) & cmask; + slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start; + slice_height >>= 1; + + bsrc = src + slice_start * stride; + + // first line - left neighbour prediction + bsrc[0] += 0x80; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + bsrc[i] += A; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + bsrc[stride + i] += A; + A = bsrc[stride + i]; + } + bsrc += stride2; + if (slice_height == 1) + continue; + // second line - first element has top predition, the rest uses median + C = bsrc[-stride2]; + bsrc[0] += C; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + B = bsrc[i - stride2]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[stride + i]; + } + bsrc += stride2; + // the rest of lines use continuous median prediction + for (j = 2; j < slice_height; j++) { + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride2]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i + stride]; + } + bsrc += stride2; + } + } +} + static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; @@ -381,10 +452,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac c->frame_pred == PRED_LEFT); if (ret) return ret; - if (c->frame_pred == PRED_MEDIAN) - restore_median(c->pic.data[i], 1, c->pic.linesize[i], - avctx->width >> !!i, avctx->height >> !!i, - c->slices, !i); + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, avctx->height >> !!i, + c->slices, !i); + } else { + restore_median_il(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, + avctx->height >> !!i, + c->slices, !i); + } + } } break; case PIX_FMT_YUV422P: @@ -395,9 +474,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac c->frame_pred == PRED_LEFT); if (ret) return ret; - if (c->frame_pred == PRED_MEDIAN) - restore_median(c->pic.data[i], 1, c->pic.linesize[i], - avctx->width >> !!i, avctx->height, c->slices, 0); + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, avctx->height, + c->slices, 0); + } else { + restore_median_il(c->pic.data[i], 1, c->pic.linesize[i], + avctx->width >> !!i, avctx->height, + c->slices, 0); + } + } } break; } |