aboutsummaryrefslogtreecommitdiff
path: root/libavcodec/svq1.c
diff options
context:
space:
mode:
authorDiego Biurrun2005-12-17 18:14:38 +0000
committerDiego Biurrun2005-12-17 18:14:38 +0000
commit115329f16062074e11ccf3b89ead6176606c9696 (patch)
treee98aa993905a702688bf821737ab9a443969fc28 /libavcodec/svq1.c
parentd76319b1ab716320f6e6a4d690b85fe4504ebd5b (diff)
COSMETICS: Remove all trailing whitespace.
Originally committed as revision 4749 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/svq1.c')
-rw-r--r--libavcodec/svq1.c128
1 files changed, 64 insertions, 64 deletions
diff --git a/libavcodec/svq1.c b/libavcodec/svq1.c
index b94472e343..2107a44296 100644
--- a/libavcodec/svq1.c
+++ b/libavcodec/svq1.c
@@ -1,8 +1,8 @@
/*
- *
+ *
* Copyright (C) 2002 the xine project
* Copyright (C) 2002 the ffmpeg project
- *
+ *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
@@ -71,7 +71,7 @@ typedef struct SVQ1Context {
AVFrame last_picture;
PutBitContext pb;
GetBitContext gb;
-
+
PutBitContext reorder_pb[6]; //why ooh why this sick breadth first order, everything is slower and more complex
int frame_width;
@@ -84,7 +84,7 @@ typedef struct SVQ1Context {
/* U & V plane (C planes) block dimensions */
int c_block_width;
int c_block_height;
-
+
uint16_t *mb_type;
uint32_t *dummy;
int16_t (*motion_val8[3])[2];
@@ -353,7 +353,7 @@ static int svq1_decode_motion_vector (GetBitContext *bitbuf, svq1_pmv_t *mv, svq
/* get motion code */
diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
- if(diff<0)
+ if(diff<0)
return -1;
else if(diff){
if(get_bits1(bitbuf)) diff= -diff;
@@ -415,7 +415,7 @@ static int svq1_motion_inter_block (MpegEncContext *s, GetBitContext *bitbuf,
motion[0].y =
motion[(x / 8) + 2].y =
motion[(x / 8) + 3].y = mv.y;
-
+
if(y + (mv.y >> 1)<0)
mv.y= 0;
if(x + (mv.x >> 1)<0)
@@ -427,7 +427,7 @@ static int svq1_motion_inter_block (MpegEncContext *s, GetBitContext *bitbuf,
if(x + (mv.x >> 1)<0 || y + (mv.y >> 1)<0 || x + (mv.x >> 1) + 16 > w || y + (mv.y >> 1) + 16> h)
av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mv.x >> 1), y + (mv.y >> 1));
#endif
-
+
src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1))*pitch];
dst = current;
@@ -497,7 +497,7 @@ static int svq1_motion_inter_4v_block (MpegEncContext *s, GetBitContext *bitbuf,
for (i=0; i < 4; i++) {
int mvx= pmv[i]->x + (i&1)*16;
int mvy= pmv[i]->y + (i>>1)*16;
-
+
///XXX /FIXME cliping or padding?
if(y + (mvy >> 1)<0)
mvy= 0;
@@ -512,7 +512,7 @@ static int svq1_motion_inter_4v_block (MpegEncContext *s, GetBitContext *bitbuf,
#endif
src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1))*pitch];
dst = current;
-
+
s->dsp.put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst,src,pitch,8);
/* select next block */
@@ -639,9 +639,9 @@ static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
/* frame type */
s->pict_type= get_bits (bitbuf, 2)+1;
- if(s->pict_type==4)
+ if(s->pict_type==4)
return -1;
-
+
if (s->pict_type == I_TYPE) {
/* unknown fields */
@@ -702,18 +702,18 @@ static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
skip_bits (bitbuf, 8);
}
}
-
+
return 0;
}
-static int svq1_decode_frame(AVCodecContext *avctx,
+static int svq1_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
MpegEncContext *s=avctx->priv_data;
uint8_t *current, *previous;
int result, i, x, y, width, height;
- AVFrame *pict = data;
+ AVFrame *pict = data;
/* initialize bit buffer */
init_get_bits(&s->gb,buf,buf_size*8);
@@ -742,16 +742,16 @@ static int svq1_decode_frame(AVCodecContext *avctx,
#endif
return result;
}
-
+
//FIXME this avoids some confusion for "B frames" without 2 references
//this should be removed after libavcodec can handle more flexible picture types & ordering
if(s->pict_type==B_TYPE && s->last_picture_ptr==NULL) return buf_size;
-
+
if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
|| avctx->skip_frame >= AVDISCARD_ALL)
- return buf_size;
+ return buf_size;
if(MPV_frame_start(s, avctx) < 0)
return -1;
@@ -818,12 +818,12 @@ static int svq1_decode_frame(AVCodecContext *avctx,
}
}
}
-
+
*pict = *(AVFrame*)&s->current_picture;
MPV_frame_end(s);
-
+
*data_size=sizeof(AVFrame);
return buf_size;
}
@@ -911,7 +911,7 @@ static void svq1_write_header(SVQ1Context *s, int frame_type)
break;
}
}
-
+
if (i == 7)
{
put_bits(&s->pb, 3, 7);
@@ -984,14 +984,14 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
int best_vector_sum=-999, best_vector_mean=-999;
const int stage= count-1;
const int8_t *vector;
-
+
for(i=0; i<16; i++){
int sum= codebook_sum[stage*16 + i];
int sqr=0;
int diff, mean, score;
-
+
vector = codebook + stage*size*16 + i*size;
-
+
for(j=0; j<size; j++){
int v= vector[j];
sqr += (v - block[stage][j])*(v - block[stage][j]);
@@ -1015,11 +1015,11 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
block[stage+1][j] = block[stage][j] - vector[j];
}
block_sum[stage+1]= block_sum[stage] - best_vector_sum;
- best_vector_score +=
+ best_vector_score +=
lambda*(+ 1 + 4*count
+ multistage_vlc[1+count][1]
+ mean_vlc[best_vector_mean][1]);
-
+
if(best_vector_score < best_score){
best_score= best_vector_score;
best_count= count;
@@ -1027,7 +1027,7 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
}
}
}
-
+
split=0;
if(best_score > threshold && level){
int score=0;
@@ -1040,7 +1040,7 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
score += encode_block(s, src , ref , decoded , stride, level-1, threshold>>1, lambda, intra);
score += encode_block(s, src + offset, ref + offset, decoded + offset, stride, level-1, threshold>>1, lambda, intra);
score += lambda;
-
+
if(score < best_score){
best_score= score;
split=1;
@@ -1058,9 +1058,9 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
assert(best_mean >= -256 && best_mean<256);
assert(best_count >=0 && best_count<7);
assert(level<4 || best_count==0);
-
+
/* output the encoding */
- put_bits(&s->reorder_pb[level],
+ put_bits(&s->reorder_pb[level],
multistage_vlc[1 + best_count][1],
multistage_vlc[1 + best_count][0]);
put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
@@ -1070,7 +1070,7 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
assert(best_vector[i]>=0 && best_vector[i]<16);
put_bits(&s->reorder_pb[level], 4, best_vector[i]);
}
-
+
for(y=0; y<h; y++){
for(x=0; x<w; x++){
decoded[x + y*stride]= src[x + y*stride] - block[best_count][x + w*y] + best_mean;
@@ -1107,8 +1107,8 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
s->m.last_picture_ptr = &s->m.last_picture;
s->m.last_picture.data[0]= ref_plane;
s->m.linesize=
- s->m.last_picture.linesize[0]=
- s->m.new_picture.linesize[0]=
+ s->m.last_picture.linesize[0]=
+ s->m.new_picture.linesize[0]=
s->m.current_picture.linesize[0]= stride;
s->m.width= width;
s->m.height= height;
@@ -1123,37 +1123,37 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
s->m.flags= s->avctx->flags;
// s->m.out_format = FMT_H263;
// s->m.unrestricted_mv= 1;
-
+
s->m.lambda= s->picture.quality;
s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
-
+
if(!s->motion_val8[plane]){
s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t));
s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t));
}
s->m.mb_type= s->mb_type;
-
+
//dummies, to avoid segfaults
s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
s->m.current_picture.mb_var= (uint16_t*)s->dummy;
s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
s->m.current_picture.mb_type= s->dummy;
-
+
s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2;
s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
s->m.dsp= s->dsp; //move
ff_init_me(&s->m);
-
+
s->m.me.dia_size= s->avctx->dia_size;
s->m.first_slice_line=1;
for (y = 0; y < block_height; y++) {
uint8_t src[stride*16];
-
+
s->m.new_picture.data[0]= src - y*16*stride; //ugly
s->m.mb_y= y;
-
+
for(i=0; i<16 && i + 16*y<height; i++){
memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
for(x=width; x<16*block_width; x++)
@@ -1161,25 +1161,25 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
}
for(; i<16 && i + 16*y<16*block_height; i++)
memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
-
+
for (x = 0; x < block_width; x++) {
s->m.mb_x= x;
ff_init_block_index(&s->m);
ff_update_block_index(&s->m);
-
+
ff_estimate_p_frame_motion(&s->m, x, y);
}
s->m.first_slice_line=0;
}
-
+
ff_fix_long_p_mvs(&s->m);
ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0);
}
-
+
s->m.first_slice_line=1;
for (y = 0; y < block_height; y++) {
uint8_t src[stride*16];
-
+
for(i=0; i<16 && i + 16*y<height; i++){
memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
for(x=width; x<16*block_width; x++)
@@ -1197,7 +1197,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
uint8_t *ref= ref_plane + offset;
int score[4]={0,0,0,0}, best;
uint8_t temp[16*stride];
-
+
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
return -1;
@@ -1206,7 +1206,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
s->m.mb_x= x;
ff_init_block_index(&s->m);
ff_update_block_index(&s->m);
-
+
if(s->picture.pict_type == I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
for(i=0; i<6; i++){
init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32);
@@ -1223,9 +1223,9 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
}
}else
score[0]= INT_MAX;
-
+
best=0;
-
+
if(s->picture.pict_type == P_TYPE){
const uint8_t *vlc= svq1_block_type_vlc[SVQ1_BLOCK_INTER];
int mx, my, pred_x, pred_y, dxy;
@@ -1237,8 +1237,8 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
-
- s->m.pb= s->reorder_pb[5];
+
+ s->m.pb= s->reorder_pb[5];
mx= motion_ptr[0];
my= motion_ptr[1];
assert(mx>=-32 && mx<=31);
@@ -1249,11 +1249,11 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
ff_h263_encode_motion(&s->m, my - pred_y, 1);
s->reorder_pb[5]= s->m.pb;
score[1] += lambda*put_bits_count(&s->reorder_pb[5]);
-
+
dxy= (mx&1) + 2*(my&1);
-
+
s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16);
-
+
score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0);
best= score[1] <= score[0];
@@ -1282,7 +1282,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0;
}
}
-
+
s->rd_total += score[best];
for(i=5; i>=0; i--){
@@ -1315,17 +1315,17 @@ static int svq1_encode_init(AVCodecContext *avctx)
s->avctx= avctx;
s->m.avctx= avctx;
- s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t));
+ s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t));
s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
h263_encode_init(&s->m); //mv_penalty
-
+
return 0;
}
-static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
+static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
int buf_size, void *data)
{
SVQ1Context * const s = avctx->priv_data;
@@ -1338,16 +1338,16 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
return -1;
}
-
+
if(!s->current_picture.data[0]){
avctx->get_buffer(avctx, &s->current_picture);
avctx->get_buffer(avctx, &s->last_picture);
}
-
+
temp= s->current_picture;
s->current_picture= s->last_picture;
s->last_picture= temp;
-
+
init_put_bits(&s->pb, buf, buf_size);
*p = *pict;
@@ -1358,7 +1358,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
for(i=0; i<3; i++){
if(svq1_encode_plane(s, i,
s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i],
- s->frame_width / (i?4:1), s->frame_height / (i?4:1),
+ s->frame_width / (i?4:1), s->frame_height / (i?4:1),
s->picture.linesize[i], s->current_picture.linesize[i]) < 0)
return -1;
}
@@ -1366,7 +1366,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
// align_put_bits(&s->pb);
while(put_bits_count(&s->pb) & 31)
put_bits(&s->pb, 1, 0);
-
+
flush_put_bits(&s->pb);
return (put_bits_count(&s->pb) / 8);
@@ -1378,8 +1378,8 @@ static int svq1_encode_end(AVCodecContext *avctx)
int i;
av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", s->rd_total/(double)(avctx->width*avctx->height*avctx->frame_number));
-
- av_freep(&s->m.me.scratchpad);
+
+ av_freep(&s->m.me.scratchpad);
av_freep(&s->m.me.map);
av_freep(&s->m.me.score_map);
av_freep(&s->mb_type);