aboutsummaryrefslogtreecommitdiff
path: root/libavformat/avisynth.c
diff options
context:
space:
mode:
authorStephen Hutchinson2013-10-28 16:52:45 -0400
committerMichael Niedermayer2013-10-28 22:25:26 +0100
commit2c18bfe6af847eb01ad37a690ba39617b712b3d9 (patch)
treed3cb599847b67f092031990d7bf7751e34ee979e /libavformat/avisynth.c
parent1549122d26988f32eca4e990e2b0334788c7ea8c (diff)
avisynth: Cosmetics
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavformat/avisynth.c')
-rw-r--r--libavformat/avisynth.c201
1 files changed, 116 insertions, 85 deletions
diff --git a/libavformat/avisynth.c b/libavformat/avisynth.c
index 4bfb51c651..60f627bb28 100644
--- a/libavformat/avisynth.c
+++ b/libavformat/avisynth.c
@@ -19,9 +19,9 @@
*/
#include "libavutil/internal.h"
+#include "libavcodec/internal.h"
#include "avformat.h"
#include "internal.h"
-#include "libavcodec/internal.h"
/* Enable function pointer definitions for runtime loading. */
#define AVSC_NO_DECLSPEC
@@ -43,11 +43,11 @@
#else
#include <dlfcn.h>
#include "compat/avisynth/avxsynth_c.h"
- #if defined (__APPLE__)
- #define AVISYNTH_LIB "libavxsynth.dylib"
- #else
- #define AVISYNTH_LIB "libavxsynth.so"
- #endif
+ #if defined (__APPLE__)
+ #define AVISYNTH_LIB "libavxsynth.dylib"
+ #else
+ #define AVISYNTH_LIB "libavxsynth.so"
+ #endif
#define LoadLibrary(x) dlopen(x, RTLD_NOW | RTLD_GLOBAL)
#define GetProcAddress dlsym
@@ -64,7 +64,7 @@
typedef struct {
void *library;
-#define AVSC_DECLARE_FUNC(name) name##_func name
+#define AVSC_DECLARE_FUNC(name) name ## _func name
AVSC_DECLARE_FUNC(avs_bit_blt);
AVSC_DECLARE_FUNC(avs_clip_get_error);
AVSC_DECLARE_FUNC(avs_create_script_environment);
@@ -102,21 +102,23 @@ struct AviSynthContext {
};
typedef struct AviSynthContext AviSynthContext;
-static const int avs_planes_packed[1] = {0};
-static const int avs_planes_grey[1] = {AVS_PLANAR_Y};
-static const int avs_planes_yuv[3] = {AVS_PLANAR_Y, AVS_PLANAR_U, AVS_PLANAR_V};
+static const int avs_planes_packed[1] = { 0 };
+static const int avs_planes_grey[1] = { AVS_PLANAR_Y };
+static const int avs_planes_yuv[3] = { AVS_PLANAR_Y, AVS_PLANAR_U,
+ AVS_PLANAR_V };
/* A conflict between C++ global objects, atexit, and dynamic loading requires
* us to register our own atexit handler to prevent double freeing. */
static AviSynthLibrary *avs_library = NULL;
-static int avs_atexit_called = 0;
+static int avs_atexit_called = 0;
/* Linked list of AviSynthContexts. An atexit handler destroys this list. */
static AviSynthContext *avs_ctx_list = NULL;
static av_cold void avisynth_atexit_handler(void);
-static av_cold int avisynth_load_library(void) {
+static av_cold int avisynth_load_library(void)
+{
avs_library = av_mallocz(sizeof(AviSynthLibrary));
if (!avs_library)
return AVERROR_UNKNOWN;
@@ -125,12 +127,12 @@ static av_cold int avisynth_load_library(void) {
if (!avs_library->library)
goto init_fail;
-#define LOAD_AVS_FUNC(name, continue_on_fail) \
-{ \
- avs_library->name = (void*)GetProcAddress(avs_library->library, #name); \
- if(!continue_on_fail && !avs_library->name) \
- goto fail; \
-}
+#define LOAD_AVS_FUNC(name, continue_on_fail) \
+ avs_library->name = \
+ (void *)GetProcAddress(avs_library->library, #name); \
+ if (!continue_on_fail && !avs_library->name) \
+ goto fail;
+
LOAD_AVS_FUNC(avs_bit_blt, 0);
LOAD_AVS_FUNC(avs_clip_get_error, 0);
LOAD_AVS_FUNC(avs_create_script_environment, 0);
@@ -160,14 +162,14 @@ init_fail:
/* Note that avisynth_context_create and avisynth_context_destroy
* do not allocate or free the actual context! That is taken care of
* by libavformat. */
-static av_cold int avisynth_context_create(AVFormatContext *s) {
+static av_cold int avisynth_context_create(AVFormatContext *s)
+{
AviSynthContext *avs = (AviSynthContext *)s->priv_data;
int ret;
- if (!avs_library) {
+ if (!avs_library)
if (ret = avisynth_load_library())
return ret;
- }
avs->env = avs_library->avs_create_script_environment(3);
if (avs_library->avs_get_error) {
@@ -181,16 +183,17 @@ static av_cold int avisynth_context_create(AVFormatContext *s) {
if (!avs_ctx_list) {
avs_ctx_list = avs;
} else {
- avs->next = avs_ctx_list;
+ avs->next = avs_ctx_list;
avs_ctx_list = avs;
}
return 0;
}
-static av_cold void avisynth_context_destroy(AviSynthContext *avs) {
+static av_cold void avisynth_context_destroy(AviSynthContext *avs)
+{
if (avs_atexit_called)
- return;
+ return;
if (avs == avs_ctx_list) {
avs_ctx_list = avs->next;
@@ -211,7 +214,8 @@ static av_cold void avisynth_context_destroy(AviSynthContext *avs) {
}
}
-static av_cold void avisynth_atexit_handler(void) {
+static av_cold void avisynth_atexit_handler(void)
+{
AviSynthContext *avs = avs_ctx_list;
while (avs) {
@@ -226,38 +230,41 @@ static av_cold void avisynth_atexit_handler(void) {
}
/* Create AVStream from audio and video data. */
-static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st) {
+static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st)
+{
AviSynthContext *avs = s->priv_data;
int planar = 0; // 0: packed, 1: YUV, 2: Y8
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
- st->codec->codec_id = CODEC_ID_RAWVIDEO;
- st->codec->width = avs->vi->width;
- st->codec->height = avs->vi->height;
-
- st->time_base = (AVRational) {avs->vi->fps_denominator, avs->vi->fps_numerator};
- st->avg_frame_rate = (AVRational) {avs->vi->fps_numerator, avs->vi->fps_denominator};
- st->start_time = 0;
- st->duration = avs->vi->num_frames;
- st->nb_frames = avs->vi->num_frames;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = avs->vi->width;
+ st->codec->height = avs->vi->height;
+
+ st->time_base = (AVRational) { avs->vi->fps_denominator,
+ avs->vi->fps_numerator };
+ st->avg_frame_rate = (AVRational) { avs->vi->fps_numerator,
+ avs->vi->fps_denominator };
+ st->start_time = 0;
+ st->duration = avs->vi->num_frames;
+ st->nb_frames = avs->vi->num_frames;
switch (avs->vi->pixel_type) {
#ifdef USING_AVISYNTH
case AVS_CS_YV24:
st->codec->pix_fmt = AV_PIX_FMT_YUV444P;
- planar = 1;
+ planar = 1;
break;
case AVS_CS_YV16:
st->codec->pix_fmt = AV_PIX_FMT_YUV422P;
- planar = 1;
+ planar = 1;
break;
case AVS_CS_YV411:
st->codec->pix_fmt = AV_PIX_FMT_YUV411P;
- planar = 1;
+ planar = 1;
break;
case AVS_CS_Y8:
st->codec->pix_fmt = AV_PIX_FMT_GRAY8;
- planar = 2;
+ planar = 2;
break;
#endif
case AVS_CS_BGR24:
@@ -271,14 +278,15 @@ static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st) {
break;
case AVS_CS_YV12:
st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
- planar = 1;
+ planar = 1;
break;
case AVS_CS_I420: // Is this even used anywhere?
st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
- planar = 1;
+ planar = 1;
break;
default:
- av_log(s, AV_LOG_ERROR, "unknown AviSynth colorspace %d\n", avs->vi->pixel_type);
+ av_log(s, AV_LOG_ERROR,
+ "unknown AviSynth colorspace %d\n", avs->vi->pixel_type);
avs->error = 1;
return AVERROR_UNKNOWN;
}
@@ -286,26 +294,28 @@ static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st) {
switch (planar) {
case 2: // Y8
avs->n_planes = 1;
- avs->planes = avs_planes_grey;
+ avs->planes = avs_planes_grey;
break;
case 1: // YUV
avs->n_planes = 3;
- avs->planes = avs_planes_yuv;
+ avs->planes = avs_planes_yuv;
break;
default:
avs->n_planes = 1;
- avs->planes = avs_planes_packed;
+ avs->planes = avs_planes_packed;
}
return 0;
}
-static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st) {
+static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st)
+{
AviSynthContext *avs = s->priv_data;
- st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
+ st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = avs->vi->audio_samples_per_second;
- st->codec->channels = avs->vi->nchannels;
- st->time_base = (AVRational) {1, avs->vi->audio_samples_per_second};
+ st->codec->channels = avs->vi->nchannels;
+ st->time_base = (AVRational) { 1,
+ avs->vi->audio_samples_per_second };
switch (avs->vi->sample_type) {
case AVS_SAMPLE_INT8:
@@ -324,14 +334,16 @@ static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st) {
st->codec->codec_id = CODEC_ID_PCM_F32LE;
break;
default:
- av_log(s, AV_LOG_ERROR, "unknown AviSynth sample type %d\n", avs->vi->sample_type);
+ av_log(s, AV_LOG_ERROR,
+ "unknown AviSynth sample type %d\n", avs->vi->sample_type);
avs->error = 1;
return AVERROR_UNKNOWN;
}
return 0;
}
-static int avisynth_create_stream(AVFormatContext *s) {
+static int avisynth_create_stream(AVFormatContext *s)
+{
AviSynthContext *avs = s->priv_data;
AVStream *st;
int ret;
@@ -356,7 +368,8 @@ static int avisynth_create_stream(AVFormatContext *s) {
return 0;
}
-static int avisynth_open_file(AVFormatContext *s) {
+static int avisynth_open_file(AVFormatContext *s)
+{
AviSynthContext *avs = (AviSynthContext *)s->priv_data;
AVS_Value arg, val;
int ret;
@@ -371,7 +384,8 @@ static int avisynth_open_file(AVFormatContext *s) {
#ifdef USING_AVISYNTH
/* Convert UTF-8 to ANSI code page */
MultiByteToWideChar(CP_UTF8, 0, s->filename, -1, filename_wc, MAX_PATH * 4);
- WideCharToMultiByte(CP_THREAD_ACP, 0, filename_wc, -1, filename_ansi, MAX_PATH * 4, NULL, NULL);
+ WideCharToMultiByte(CP_THREAD_ACP, 0, filename_wc, -1, filename_ansi,
+ MAX_PATH * 4, NULL, NULL);
arg = avs_new_value_string(filename_ansi);
#else
arg = avs_new_value_string(s->filename);
@@ -389,7 +403,7 @@ static int avisynth_open_file(AVFormatContext *s) {
}
avs->clip = avs_library->avs_take_clip(val, avs->env);
- avs->vi = avs_library->avs_get_video_info(avs->clip);
+ avs->vi = avs_library->avs_get_video_info(avs->clip);
/* Release the AVS_Value as it will go out of scope. */
avs_library->avs_release_value(val);
@@ -404,7 +418,9 @@ fail:
return ret;
}
-static void avisynth_next_stream(AVFormatContext *s, AVStream **st, AVPacket *pkt, int *discard) {
+static void avisynth_next_stream(AVFormatContext *s, AVStream **st,
+ AVPacket *pkt, int *discard)
+{
AviSynthContext *avs = s->priv_data;
pkt->stream_index = avs->curr_stream++;
@@ -420,7 +436,9 @@ static void avisynth_next_stream(AVFormatContext *s, AVStream **st, AVPacket *pk
}
/* Copy AviSynth clip data into an AVPacket. */
-static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard) {
+static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt,
+ int discard)
+{
AviSynthContext *avs = s->priv_data;
AVS_VideoFrame *frame;
unsigned char *dst_p;
@@ -436,26 +454,26 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int dis
if (discard)
return 0;
- pkt->pts = n;
- pkt->dts = n;
+ pkt->pts = n;
+ pkt->dts = n;
pkt->duration = 1;
// Define the bpp values for the new AviSynth 2.6 colorspaces
- if (avs_is_yv24(avs->vi)) {
+ if (avs_is_yv24(avs->vi))
bits = 24;
- } else if (avs_is_yv16(avs->vi)) {
+ else if (avs_is_yv16(avs->vi))
bits = 16;
- } else if (avs_is_yv411(avs->vi)) {
+ else if (avs_is_yv411(avs->vi))
bits = 12;
- } else if (avs_is_y8(avs->vi)) {
+ else if (avs_is_y8(avs->vi))
bits = 8;
- } else {
+ else
bits = avs_bits_per_pixel(avs->vi);
- }
/* Without the cast to int64_t, calculation overflows at about 9k x 9k
* resolution. */
- pkt->size = (((int64_t)avs->vi->width * (int64_t)avs->vi->height) * bits) / 8;
+ pkt->size = (((int64_t)avs->vi->width *
+ (int64_t)avs->vi->height) * bits) / 8;
if (!pkt->size)
return AVERROR_UNKNOWN;
pkt->data = av_malloc(pkt->size);
@@ -479,14 +497,14 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int dis
#ifdef USING_AVISYNTH
if (avs_library->avs_get_version(avs->clip) == 3) {
- rowsize = avs_get_row_size_p_25(frame, plane);
+ rowsize = avs_get_row_size_p_25(frame, plane);
planeheight = avs_get_height_p_25(frame, plane);
} else {
- rowsize = avs_get_row_size_p(frame, plane);
+ rowsize = avs_get_row_size_p(frame, plane);
planeheight = avs_get_height_p(frame, plane);
}
#else
- rowsize = avs_get_row_size_p(frame, plane);
+ rowsize = avs_get_row_size_p(frame, plane);
planeheight = avs_get_height_p(frame, plane);
#endif
@@ -496,7 +514,8 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int dis
pitch = -pitch;
}
- avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight);
+ avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch,
+ rowsize, planeheight);
dst_p += rowsize * planeheight;
}
@@ -504,7 +523,9 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int dis
return 0;
}
-static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int discard) {
+static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt,
+ int discard)
+{
AviSynthContext *avs = s->priv_data;
AVRational fps, samplerate;
int samples;
@@ -514,14 +535,15 @@ static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int dis
if (avs->curr_sample >= avs->vi->num_audio_samples)
return AVERROR_EOF;
- fps.num = avs->vi->fps_numerator;
- fps.den = avs->vi->fps_denominator;
+ fps.num = avs->vi->fps_numerator;
+ fps.den = avs->vi->fps_denominator;
samplerate.num = avs->vi->audio_samples_per_second;
samplerate.den = 1;
if (avs_has_video(avs->vi)) {
if (avs->curr_frame < avs->vi->num_frames)
- samples = av_rescale_q(avs->curr_frame, samplerate, fps) - avs->curr_sample;
+ samples = av_rescale_q(avs->curr_frame, samplerate, fps) -
+ avs->curr_sample;
else
samples = av_rescale_q(1, samplerate, fps);
} else {
@@ -539,16 +561,17 @@ static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int dis
samples = avs->vi->num_audio_samples - avs->curr_sample;
/* This must happen even if the stream is discarded to prevent desync. */
- n = avs->curr_sample;
+ n = avs->curr_sample;
avs->curr_sample += samples;
if (discard)
return 0;
- pkt->pts = n;
- pkt->dts = n;
+ pkt->pts = n;
+ pkt->dts = n;
pkt->duration = samples;
- pkt->size = avs_bytes_per_channel_sample(avs->vi) * samples * avs->vi->nchannels;
+ pkt->size = avs_bytes_per_channel_sample(avs->vi) *
+ samples * avs->vi->nchannels;
if (!pkt->size)
return AVERROR_UNKNOWN;
pkt->data = av_malloc(pkt->size);
@@ -566,7 +589,8 @@ static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int dis
return 0;
}
-static av_cold int avisynth_read_header(AVFormatContext *s) {
+static av_cold int avisynth_read_header(AVFormatContext *s)
+{
int ret;
// Calling library must implement a lock for thread-safe opens.
@@ -582,7 +606,8 @@ static av_cold int avisynth_read_header(AVFormatContext *s) {
return 0;
}
-static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt) {
+static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
AviSynthContext *avs = s->priv_data;
AVStream *st;
int discard = 0;
@@ -613,7 +638,8 @@ static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt) {
}
}
-static av_cold int avisynth_read_close(AVFormatContext *s) {
+static av_cold int avisynth_read_close(AVFormatContext *s)
+{
if (avpriv_lock_avformat())
return AVERROR_UNKNOWN;
@@ -622,7 +648,9 @@ static av_cold int avisynth_read_close(AVFormatContext *s) {
return 0;
}
-static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
+static int avisynth_read_seek(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
+{
AviSynthContext *avs = s->priv_data;
AVStream *st;
AVRational fps, samplerate;
@@ -630,13 +658,16 @@ static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t time
if (avs->error)
return AVERROR_UNKNOWN;
- fps = (AVRational) {avs->vi->fps_numerator, avs->vi->fps_denominator};
- samplerate = (AVRational) {avs->vi->audio_samples_per_second, 1};
+ fps = (AVRational) { avs->vi->fps_numerator,
+ avs->vi->fps_denominator };
+ samplerate = (AVRational) { avs->vi->audio_samples_per_second, 1 };
st = s->streams[stream_index];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
/* AviSynth frame counts are signed int. */
- if ((timestamp >= avs->vi->num_frames) || (timestamp > INT_MAX) || (timestamp < 0))
+ if ((timestamp >= avs->vi->num_frames) ||
+ (timestamp > INT_MAX) ||
+ (timestamp < 0))
return AVERROR_EOF;
avs->curr_frame = timestamp;
if (avs_has_audio(avs->vi))
@@ -646,7 +677,7 @@ static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t time
return AVERROR_EOF;
/* Force frame granularity for seeking. */
if (avs_has_video(avs->vi)) {
- avs->curr_frame = av_rescale_q(timestamp, fps, samplerate);
+ avs->curr_frame = av_rescale_q(timestamp, fps, samplerate);
avs->curr_sample = av_rescale_q(avs->curr_frame, samplerate, fps);
} else {
avs->curr_sample = timestamp;