aboutsummaryrefslogtreecommitdiff
path: root/libavdevice
diff options
context:
space:
mode:
Diffstat (limited to 'libavdevice')
-rw-r--r--libavdevice/Makefile19
-rw-r--r--libavdevice/alldevices.c19
-rw-r--r--libavdevice/alsa-audio-common.c85
-rw-r--r--libavdevice/alsa-audio-dec.c62
-rw-r--r--libavdevice/alsa-audio-enc.c25
-rw-r--r--libavdevice/alsa-audio.h21
-rw-r--r--libavdevice/avdevice.c14
-rw-r--r--libavdevice/avdevice.h10
-rw-r--r--libavdevice/bktr.c29
-rw-r--r--libavdevice/caca.c240
-rw-r--r--libavdevice/dshow.c1095
-rw-r--r--libavdevice/dshow_capture.h279
-rw-r--r--libavdevice/dshow_common.c190
-rw-r--r--libavdevice/dshow_enummediatypes.c103
-rw-r--r--libavdevice/dshow_enumpins.c105
-rw-r--r--libavdevice/dshow_filter.c202
-rw-r--r--libavdevice/dshow_pin.c362
-rw-r--r--libavdevice/dv1394.c12
-rw-r--r--libavdevice/dv1394.h8
-rw-r--r--libavdevice/fbdev.c45
-rw-r--r--libavdevice/iec61883.c497
-rw-r--r--libavdevice/jack_audio.c14
-rw-r--r--libavdevice/lavfi.c435
-rw-r--r--libavdevice/libcdio.c2
-rw-r--r--libavdevice/libdc1394.c8
-rw-r--r--libavdevice/openal-dec.c252
-rw-r--r--libavdevice/oss_audio.c17
-rw-r--r--libavdevice/pulse.c2
-rw-r--r--libavdevice/sdl.c236
-rw-r--r--libavdevice/sndio_common.c10
-rw-r--r--libavdevice/sndio_common.h10
-rw-r--r--libavdevice/sndio_dec.c8
-rw-r--r--libavdevice/sndio_enc.c11
-rw-r--r--libavdevice/timefilter.c50
-rw-r--r--libavdevice/timefilter.h27
-rw-r--r--libavdevice/v4l.c363
-rw-r--r--libavdevice/v4l2-common.c96
-rw-r--r--libavdevice/v4l2-common.h62
-rw-r--r--libavdevice/v4l2.c792
-rw-r--r--libavdevice/v4l2enc.c110
-rw-r--r--libavdevice/version.h16
-rw-r--r--libavdevice/vfwcap.c53
-rw-r--r--libavdevice/x11grab.c81
-rw-r--r--libavdevice/xv.c217
44 files changed, 5649 insertions, 645 deletions
diff --git a/libavdevice/Makefile b/libavdevice/Makefile
index 8fdae2d44e..1c6a463c6a 100644
--- a/libavdevice/Makefile
+++ b/libavdevice/Makefile
@@ -1,5 +1,8 @@
+include $(SUBDIR)../config.mak
+
NAME = avdevice
FFLIBS = avformat avcodec avutil
+FFLIBS-$(CONFIG_LAVFI_INDEV) += avfilter
HEADERS = avdevice.h \
version.h \
@@ -11,26 +14,38 @@ OBJS-$(HAVE_MSVCRT) += file_open.o
# input/output devices
OBJS-$(CONFIG_ALSA_INDEV) += alsa-audio-common.o \
- alsa-audio-dec.o
+ alsa-audio-dec.o timefilter.o
OBJS-$(CONFIG_ALSA_OUTDEV) += alsa-audio-common.o \
alsa-audio-enc.o
OBJS-$(CONFIG_BKTR_INDEV) += bktr.o
+OBJS-$(CONFIG_CACA_OUTDEV) += caca.o
+OBJS-$(CONFIG_DSHOW_INDEV) += dshow.o dshow_enummediatypes.o \
+ dshow_enumpins.o dshow_filter.o \
+ dshow_pin.o dshow_common.o
OBJS-$(CONFIG_DV1394_INDEV) += dv1394.o
OBJS-$(CONFIG_FBDEV_INDEV) += fbdev.o
+OBJS-$(CONFIG_IEC61883_INDEV) += iec61883.o
OBJS-$(CONFIG_JACK_INDEV) += jack_audio.o timefilter.o
+OBJS-$(CONFIG_LAVFI_INDEV) += lavfi.o
+OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o
OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o
OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o
OBJS-$(CONFIG_PULSE_INDEV) += pulse.o
+OBJS-$(CONFIG_SDL_OUTDEV) += sdl.o
OBJS-$(CONFIG_SNDIO_INDEV) += sndio_common.o sndio_dec.o
OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_common.o sndio_enc.o
-OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o
+OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o v4l2-common.o timefilter.o
+OBJS-$(CONFIG_V4L2_OUTDEV) += v4l2enc.o v4l2-common.o
+OBJS-$(CONFIG_V4L_INDEV) += v4l.o
OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o
OBJS-$(CONFIG_X11GRAB_INDEV) += x11grab.o
+OBJS-$(CONFIG_XV_OUTDEV) += xv.o
# external libraries
OBJS-$(CONFIG_LIBCDIO_INDEV) += libcdio.o
OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o
+SKIPHEADERS-$(CONFIG_DSHOW_INDEV) += dshow_capture.h
SKIPHEADERS-$(HAVE_ALSA_ASOUNDLIB_H) += alsa-audio.h
SKIPHEADERS-$(HAVE_SNDIO_H) += sndio_common.h
diff --git a/libavdevice/alldevices.c b/libavdevice/alldevices.c
index 155f7a8ee6..fc8d3ce03e 100644
--- a/libavdevice/alldevices.c
+++ b/libavdevice/alldevices.c
@@ -1,25 +1,24 @@
/*
* Register all the grabbing devices.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
-#include "libavformat/avformat.h"
#include "avdevice.h"
#define REGISTER_OUTDEV(X, x) \
@@ -49,15 +48,23 @@ void avdevice_register_all(void)
/* devices */
REGISTER_INOUTDEV(ALSA, alsa);
REGISTER_INDEV (BKTR, bktr);
+ REGISTER_OUTDEV (CACA, caca);
+ REGISTER_INDEV (DSHOW, dshow);
REGISTER_INDEV (DV1394, dv1394);
REGISTER_INDEV (FBDEV, fbdev);
+ REGISTER_INDEV (IEC61883, iec61883);
REGISTER_INDEV (JACK, jack);
+ REGISTER_INDEV (LAVFI, lavfi);
+ REGISTER_INDEV (OPENAL, openal);
REGISTER_INOUTDEV(OSS, oss);
REGISTER_INDEV (PULSE, pulse);
+ REGISTER_OUTDEV (SDL, sdl);
REGISTER_INOUTDEV(SNDIO, sndio);
- REGISTER_INDEV (V4L2, v4l2);
+ REGISTER_INOUTDEV(V4L2, v4l2);
+// REGISTER_INDEV (V4L, v4l
REGISTER_INDEV (VFWCAP, vfwcap);
REGISTER_INDEV (X11GRAB, x11grab);
+ REGISTER_OUTDEV (XV, xv);
/* external libraries */
REGISTER_INDEV (LIBCDIO, libcdio);
diff --git a/libavdevice/alsa-audio-common.c b/libavdevice/alsa-audio-common.c
index 21f1594241..4e63397380 100644
--- a/libavdevice/alsa-audio-common.c
+++ b/libavdevice/alsa-audio-common.c
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,7 +29,7 @@
*/
#include <alsa/asoundlib.h>
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
@@ -62,48 +62,45 @@ static av_cold snd_pcm_format_t codec_id_to_pcm_format(int codec_id)
}
}
-#define REORDER_OUT_50(NAME, TYPE) \
-static void alsa_reorder_ ## NAME ## _out_50(const void *in_v, void *out_v, int n) \
-{ \
- const TYPE *in = in_v; \
- TYPE *out = out_v; \
-\
- while (n-- > 0) { \
+#define MAKE_REORDER_FUNC(NAME, TYPE, CHANNELS, LAYOUT, MAP) \
+static void alsa_reorder_ ## NAME ## _ ## LAYOUT(const void *in_v, \
+ void *out_v, \
+ int n) \
+{ \
+ const TYPE *in = in_v; \
+ TYPE *out = out_v; \
+ \
+ while (n-- > 0) { \
+ MAP \
+ in += CHANNELS; \
+ out += CHANNELS; \
+ } \
+}
+
+#define MAKE_REORDER_FUNCS(CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(int8, int8_t, CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(int16, int16_t, CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(int32, int32_t, CHANNELS, LAYOUT, MAP) \
+ MAKE_REORDER_FUNC(f32, float, CHANNELS, LAYOUT, MAP)
+
+MAKE_REORDER_FUNCS(5, out_50, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[3]; \
out[3] = in[4]; \
out[4] = in[2]; \
- in += 5; \
- out += 5; \
- } \
-}
+ );
-#define REORDER_OUT_51(NAME, TYPE) \
-static void alsa_reorder_ ## NAME ## _out_51(const void *in_v, void *out_v, int n) \
-{ \
- const TYPE *in = in_v; \
- TYPE *out = out_v; \
-\
- while (n-- > 0) { \
+MAKE_REORDER_FUNCS(6, out_51, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
- in += 6; \
- out += 6; \
- } \
-}
+ );
-#define REORDER_OUT_71(NAME, TYPE) \
-static void alsa_reorder_ ## NAME ## _out_71(const void *in_v, void *out_v, int n) \
-{ \
- const TYPE *in = in_v; \
- TYPE *out = out_v; \
-\
- while (n-- > 0) { \
+MAKE_REORDER_FUNCS(8, out_71, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
@@ -112,23 +109,7 @@ static void alsa_reorder_ ## NAME ## _out_71(const void *in_v, void *out_v, int
out[5] = in[3]; \
out[6] = in[6]; \
out[7] = in[7]; \
- in += 8; \
- out += 8; \
- } \
-}
-
-REORDER_OUT_50(int8, int8_t)
-REORDER_OUT_51(int8, int8_t)
-REORDER_OUT_71(int8, int8_t)
-REORDER_OUT_50(int16, int16_t)
-REORDER_OUT_51(int16, int16_t)
-REORDER_OUT_71(int16, int16_t)
-REORDER_OUT_50(int32, int32_t)
-REORDER_OUT_51(int32, int32_t)
-REORDER_OUT_71(int32, int32_t)
-REORDER_OUT_50(f32, float)
-REORDER_OUT_51(f32, float)
-REORDER_OUT_71(f32, float)
+ );
#define FORMAT_I8 0
#define FORMAT_I16 1
@@ -320,6 +301,8 @@ av_cold int ff_alsa_close(AVFormatContext *s1)
AlsaData *s = s1->priv_data;
av_freep(&s->reorder_buf);
+ if (CONFIG_ALSA_INDEV)
+ ff_timefilter_destroy(s->timefilter);
snd_pcm_close(s->h);
return 0;
}
diff --git a/libavdevice/alsa-audio-dec.c b/libavdevice/alsa-audio-dec.c
index 5b32ed980c..03154b0b7c 100644
--- a/libavdevice/alsa-audio-dec.c
+++ b/libavdevice/alsa-audio-dec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -46,10 +46,12 @@
*/
#include <alsa/asoundlib.h>
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/time.h"
+#include "avdevice.h"
#include "alsa-audio.h"
static av_cold int audio_read_header(AVFormatContext *s1)
@@ -58,7 +60,6 @@ static av_cold int audio_read_header(AVFormatContext *s1)
AVStream *st;
int ret;
enum AVCodecID codec_id;
- snd_pcm_sw_params_t *sw_params;
st = avformat_new_stream(s1, NULL);
if (!st) {
@@ -74,35 +75,17 @@ static av_cold int audio_read_header(AVFormatContext *s1)
return AVERROR(EIO);
}
- if (snd_pcm_type(s->h) != SND_PCM_TYPE_HW)
- av_log(s1, AV_LOG_WARNING,
- "capture with some ALSA plugins, especially dsnoop, "
- "may hang.\n");
-
- ret = snd_pcm_sw_params_malloc(&sw_params);
- if (ret < 0) {
- av_log(s1, AV_LOG_ERROR, "cannot allocate software parameters structure (%s)\n",
- snd_strerror(ret));
- goto fail;
- }
-
- snd_pcm_sw_params_current(s->h, sw_params);
- snd_pcm_sw_params_set_tstamp_mode(s->h, sw_params, SND_PCM_TSTAMP_ENABLE);
-
- ret = snd_pcm_sw_params(s->h, sw_params);
- snd_pcm_sw_params_free(sw_params);
- if (ret < 0) {
- av_log(s1, AV_LOG_ERROR, "cannot install ALSA software parameters (%s)\n",
- snd_strerror(ret));
- goto fail;
- }
-
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = codec_id;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ /* microseconds instead of seconds, MHz instead of Hz */
+ s->timefilter = ff_timefilter_new(1000000.0 / s->sample_rate,
+ s->period_size, 1.5E-6);
+ if (!s->timefilter)
+ goto fail;
return 0;
@@ -114,16 +97,15 @@ fail:
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
- AVStream *st = s1->streams[0];
int res;
- snd_htimestamp_t timestamp;
- snd_pcm_uframes_t ts_delay;
+ int64_t dts;
+ snd_pcm_sframes_t delay = 0;
- if (av_new_packet(pkt, s->period_size) < 0) {
+ if (av_new_packet(pkt, s->period_size * s->frame_size) < 0) {
return AVERROR(EIO);
}
- while ((res = snd_pcm_readi(s->h, pkt->data, pkt->size / s->frame_size)) < 0) {
+ while ((res = snd_pcm_readi(s->h, pkt->data, s->period_size)) < 0) {
if (res == -EAGAIN) {
av_free_packet(pkt);
@@ -136,14 +118,14 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
return AVERROR(EIO);
}
+ ff_timefilter_reset(s->timefilter);
}
- snd_pcm_htimestamp(s->h, &ts_delay, &timestamp);
- ts_delay += res;
- pkt->pts = timestamp.tv_sec * 1000000LL
- + (timestamp.tv_nsec * st->codec->sample_rate
- - ts_delay * 1000000000LL + st->codec->sample_rate * 500LL)
- / (st->codec->sample_rate * 1000LL);
+ dts = av_gettime();
+ snd_pcm_delay(s->h, &delay);
+ dts -= av_rescale(delay + res, 1000000, s->sample_rate);
+ pkt->pts = ff_timefilter_update(s->timefilter, dts, s->last_period);
+ s->last_period = res;
pkt->size = res * s->frame_size;
diff --git a/libavdevice/alsa-audio-enc.c b/libavdevice/alsa-audio-enc.c
index bb4575fa02..0f4e4a2c7a 100644
--- a/libavdevice/alsa-audio-enc.c
+++ b/libavdevice/alsa-audio-enc.c
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,8 +38,10 @@
*/
#include <alsa/asoundlib.h>
-#include "libavformat/avformat.h"
+#include "libavutil/time.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
#include "alsa-audio.h"
static av_cold int audio_write_header(AVFormatContext *s1)
@@ -61,6 +63,7 @@ static av_cold int audio_write_header(AVFormatContext *s1)
st->codec->sample_rate, sample_rate);
goto fail;
}
+ avpriv_set_pts_info(st, 64, 1, sample_rate);
return res;
@@ -101,6 +104,17 @@ static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
return 0;
}
+static void
+audio_get_output_timestamp(AVFormatContext *s1, int stream,
+ int64_t *dts, int64_t *wall)
+{
+ AlsaData *s = s1->priv_data;
+ snd_pcm_sframes_t delay = 0;
+ *wall = av_gettime();
+ snd_pcm_delay(s->h, &delay);
+ *dts = s1->streams[0]->cur_dts - delay;
+}
+
AVOutputFormat ff_alsa_muxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio output"),
@@ -110,5 +124,6 @@ AVOutputFormat ff_alsa_muxer = {
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = ff_alsa_close,
+ .get_output_timestamp = audio_get_output_timestamp,
.flags = AVFMT_NOFILE,
};
diff --git a/libavdevice/alsa-audio.h b/libavdevice/alsa-audio.h
index 26eaee6acf..44b7c72fc0 100644
--- a/libavdevice/alsa-audio.h
+++ b/libavdevice/alsa-audio.h
@@ -3,20 +3,20 @@
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,23 +32,28 @@
#include <alsa/asoundlib.h>
#include "config.h"
-#include "libavformat/avformat.h"
#include "libavutil/log.h"
+#include "timefilter.h"
+#include "avdevice.h"
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
-#define ALSA_BUFFER_SIZE_MAX 32768
+typedef void (*ff_reorder_func)(const void *, void *, int);
+
+#define ALSA_BUFFER_SIZE_MAX 65536
typedef struct AlsaData {
AVClass *class;
snd_pcm_t *h;
- int frame_size; ///< preferred size for reads and writes
- int period_size; ///< bytes per sample * channels
+ int frame_size; ///< bytes per sample * channels
+ int period_size; ///< preferred size for reads and writes, in frames
int sample_rate; ///< sample rate set by user
int channels; ///< number of channels set by user
+ int last_period;
+ TimeFilter *timefilter;
void (*reorder_func)(const void *, void *, int);
void *reorder_buf;
int reorder_buf_size; ///< in frames
diff --git a/libavdevice/avdevice.c b/libavdevice/avdevice.c
index 5a5c762c8b..b9b18f2576 100644
--- a/libavdevice/avdevice.c
+++ b/libavdevice/avdevice.c
@@ -1,36 +1,38 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
#include "avdevice.h"
#include "config.h"
unsigned avdevice_version(void)
{
+ av_assert0(LIBAVDEVICE_VERSION_MICRO >= 100);
return LIBAVDEVICE_VERSION_INT;
}
const char * avdevice_configuration(void)
{
- return LIBAV_CONFIGURATION;
+ return FFMPEG_CONFIGURATION;
}
const char * avdevice_license(void)
{
#define LICENSE_PREFIX "libavdevice license: "
- return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+ return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
diff --git a/libavdevice/avdevice.h b/libavdevice/avdevice.h
index 39166a570a..93a044f270 100644
--- a/libavdevice/avdevice.h
+++ b/libavdevice/avdevice.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -43,6 +43,8 @@
* @}
*/
+#include "libavformat/avformat.h"
+
/**
* Return the LIBAVDEVICE_VERSION_INT constant.
*/
diff --git a/libavdevice/bktr.c b/libavdevice/bktr.c
index 06f4d860c2..4e25aa6ad8 100644
--- a/libavdevice/bktr.c
+++ b/libavdevice/bktr.c
@@ -7,24 +7,23 @@
* and
* simple_grab.c Copyright (c) 1999 Roger Hardiman
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
@@ -50,6 +49,7 @@
#include <sys/time.h>
#include <signal.h>
#include <stdint.h>
+#include "avdevice.h"
typedef struct {
AVClass *class;
@@ -58,7 +58,6 @@ typedef struct {
int width, height;
uint64_t per_frame;
int standard;
- char *video_size; /**< String describing video size, set by a private option. */
char *framerate; /**< Set by a private option. */
} VideoData;
@@ -104,7 +103,7 @@ static av_cold int bktr_init(const char *video_device, int width, int height,
long ioctl_frequency;
char *arg;
int c;
- struct sigaction act = { 0 }, old;
+ struct sigaction act = { {0} }, old;
if (idev < 0 || idev > 4)
{
@@ -248,15 +247,9 @@ static int grab_read_header(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
AVStream *st;
- int width, height;
AVRational framerate;
int ret = 0;
- if ((ret = av_parse_video_size(&width, &height, s->video_size)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse video size '%s'.\n", s->video_size);
- goto out;
- }
-
if (!s->framerate)
switch (s->standard) {
case PAL: s->framerate = av_strdup("pal"); break;
@@ -279,20 +272,18 @@ static int grab_read_header(AVFormatContext *s1)
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
- s->width = width;
- s->height = height;
s->per_frame = ((uint64_t)1000000 * framerate.den) / framerate.num;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
- st->codec->width = width;
- st->codec->height = height;
+ st->codec->width = s->width;
+ st->codec->height = s->height;
st->codec->time_base.den = framerate.num;
st->codec->time_base.num = framerate.den;
- if (bktr_init(s1->filename, width, height, s->standard,
+ if (bktr_init(s1->filename, s->width, s->height, s->standard,
&s->video_fd, &s->tuner_fd, -1, 0.0) < 0) {
ret = AVERROR(EIO);
goto out;
@@ -333,7 +324,7 @@ static const AVOption options[] = {
{ "PALN", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALM", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
- { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC },
+ { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
diff --git a/libavdevice/caca.c b/libavdevice/caca.c
new file mode 100644
index 0000000000..0a7470120f
--- /dev/null
+++ b/libavdevice/caca.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2012 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <caca.h>
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avdevice.h"
+
+typedef struct CACAContext {
+ AVClass *class;
+ AVFormatContext *ctx;
+ char *window_title;
+ int window_width, window_height;
+
+ caca_canvas_t *canvas;
+ caca_display_t *display;
+ caca_dither_t *dither;
+
+ char *algorithm, *antialias;
+ char *charset, *color;
+ char *driver;
+
+ char *list_dither;
+ int list_drivers;
+} CACAContext;
+
+static int caca_write_trailer(AVFormatContext *s)
+{
+ CACAContext *c = s->priv_data;
+
+ av_freep(&c->window_title);
+
+ if (c->display) {
+ caca_free_display(c->display);
+ c->display = NULL;
+ }
+ if (c->dither) {
+ caca_free_dither(c->dither);
+ c->dither = NULL;
+ }
+ if (c->canvas) {
+ caca_free_canvas(c->canvas);
+ c->canvas = NULL;
+ }
+ return 0;
+}
+
+static void list_drivers(CACAContext *c)
+{
+ const char *const *drivers = caca_get_display_driver_list();
+ int i;
+
+ av_log(c->ctx, AV_LOG_INFO, "Available drivers:\n");
+ for (i = 0; drivers[i]; i += 2)
+ av_log(c->ctx, AV_LOG_INFO, "%s : %s\n", drivers[i], drivers[i + 1]);
+}
+
+#define DEFINE_LIST_DITHER(thing, thing_str) \
+static void list_dither_## thing(CACAContext *c) \
+{ \
+ const char *const *thing = caca_get_dither_## thing ##_list(c->dither); \
+ int i; \
+ \
+ av_log(c->ctx, AV_LOG_INFO, "Available %s:\n", thing_str); \
+ for (i = 0; thing[i]; i += 2) \
+ av_log(c->ctx, AV_LOG_INFO, "%s : %s\n", thing[i], thing[i + 1]); \
+}
+
+DEFINE_LIST_DITHER(color, "colors");
+DEFINE_LIST_DITHER(charset, "charsets");
+DEFINE_LIST_DITHER(algorithm, "algorithms");
+DEFINE_LIST_DITHER(antialias, "antialias");
+
+static int caca_write_header(AVFormatContext *s)
+{
+ CACAContext *c = s->priv_data;
+ AVStream *st = s->streams[0];
+ AVCodecContext *encctx = st->codec;
+ int ret, bpp;
+
+ c->ctx = s;
+ if (c->list_drivers) {
+ list_drivers(c);
+ return AVERROR_EXIT;
+ }
+ if (c->list_dither) {
+ if (!strcmp(c->list_dither, "colors")) {
+ list_dither_color(c);
+ } else if (!strcmp(c->list_dither, "charsets")) {
+ list_dither_charset(c);
+ } else if (!strcmp(c->list_dither, "algorithms")) {
+ list_dither_algorithm(c);
+ } else if (!strcmp(c->list_dither, "antialiases")) {
+ list_dither_antialias(c);
+ } else {
+ av_log(s, AV_LOG_ERROR,
+ "Invalid argument '%s', for 'list_dither' option\n"
+ "Argument must be one of 'algorithms, 'antialiases', 'charsets', 'colors'\n",
+ c->list_dither);
+ return AVERROR(EINVAL);
+ }
+ return AVERROR_EXIT;
+ }
+
+ if ( s->nb_streams > 1
+ || encctx->codec_type != AVMEDIA_TYPE_VIDEO
+ || encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (encctx->pix_fmt != AV_PIX_FMT_RGB24) {
+ av_log(s, AV_LOG_ERROR,
+ "Unsupported pixel format '%s', choose rgb24\n",
+ av_get_pix_fmt_name(encctx->pix_fmt));
+ return AVERROR(EINVAL);
+ }
+
+ c->canvas = caca_create_canvas(c->window_width, c->window_height);
+ if (!c->canvas) {
+ av_log(s, AV_LOG_ERROR, "Failed to create canvas\n");
+ ret = AVERROR(errno);
+ goto fail;
+ }
+
+ bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(encctx->pix_fmt));
+ c->dither = caca_create_dither(bpp, encctx->width, encctx->height,
+ bpp / 8 * encctx->width,
+ 0x0000ff, 0x00ff00, 0xff0000, 0);
+ if (!c->dither) {
+ av_log(s, AV_LOG_ERROR, "Failed to create dither\n");
+ ret = AVERROR(errno);
+ goto fail;
+ }
+
+#define CHECK_DITHER_OPT(opt) \
+ if (caca_set_dither_##opt(c->dither, c->opt) < 0) { \
+ ret = AVERROR(errno); \
+ av_log(s, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", \
+ c->opt, #opt); \
+ goto fail; \
+ }
+ CHECK_DITHER_OPT(algorithm);
+ CHECK_DITHER_OPT(antialias);
+ CHECK_DITHER_OPT(charset);
+ CHECK_DITHER_OPT(color);
+
+ c->display = caca_create_display_with_driver(c->canvas, c->driver);
+ if (!c->display) {
+ av_log(s, AV_LOG_ERROR, "Failed to create display\n");
+ list_drivers(c);
+ ret = AVERROR(errno);
+ goto fail;
+ }
+
+ if (!c->window_width || !c->window_height) {
+ c->window_width = caca_get_canvas_width(c->canvas);
+ c->window_height = caca_get_canvas_height(c->canvas);
+ }
+
+ if (!c->window_title)
+ c->window_title = av_strdup(s->filename);
+ caca_set_display_title(c->display, c->window_title);
+ caca_set_display_time(c->display, av_rescale_q(1, st->codec->time_base, AV_TIME_BASE_Q));
+
+ return 0;
+
+fail:
+ caca_write_trailer(s);
+ return ret;
+}
+
+static int caca_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ CACAContext *c = s->priv_data;
+
+ caca_dither_bitmap(c->canvas, 0, 0, c->window_width, c->window_height, c->dither, pkt->data);
+ caca_refresh_display(c->display);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(CACAContext,x)
+#define ENC AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+ { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL }, 0, 0, ENC},
+ { "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
+ { "driver", "set display driver", OFFSET(driver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
+ { "algorithm", "set dithering algorithm", OFFSET(algorithm), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "antialias", "set antialias method", OFFSET(antialias), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "charset", "set charset used to render output", OFFSET(charset), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "color", "set color used to render output", OFFSET(color), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
+ { "list_drivers", "list available drivers", OFFSET(list_drivers), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, ENC, "list_drivers" },
+ { "true", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, ENC, "list_drivers" },
+ { "false", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, ENC, "list_drivers" },
+ { "list_dither", "list available dither options", OFFSET(list_dither), AV_OPT_TYPE_STRING, {.dbl=0}, 0, 1, ENC, "list_dither" },
+ { "algorithms", NULL, 0, AV_OPT_TYPE_CONST, {.str = "algorithms"}, 0, 0, ENC, "list_dither" },
+ { "antialiases", NULL, 0, AV_OPT_TYPE_CONST, {.str = "antialiases"},0, 0, ENC, "list_dither" },
+ { "charsets", NULL, 0, AV_OPT_TYPE_CONST, {.str = "charsets"}, 0, 0, ENC, "list_dither" },
+ { "colors", NULL, 0, AV_OPT_TYPE_CONST, {.str = "colors"}, 0, 0, ENC, "list_dither" },
+ { NULL },
+};
+
+static const AVClass caca_class = {
+ .class_name = "caca_outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVOutputFormat ff_caca_muxer = {
+ .name = "caca",
+ .long_name = NULL_IF_CONFIG_SMALL("caca (color ASCII art) output device"),
+ .priv_data_size = sizeof(CACAContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = caca_write_header,
+ .write_packet = caca_write_packet,
+ .write_trailer = caca_write_trailer,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &caca_class,
+};
diff --git a/libavdevice/dshow.c b/libavdevice/dshow.c
new file mode 100644
index 0000000000..37efabe52a
--- /dev/null
+++ b/libavdevice/dshow.c
@@ -0,0 +1,1095 @@
+/*
+ * Directshow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "libavformat/internal.h"
+#include "libavformat/riff.h"
+#include "avdevice.h"
+#include "dshow_capture.h"
+#include "libavcodec/raw.h"
+
+struct dshow_ctx {
+ const AVClass *class;
+
+ IGraphBuilder *graph;
+
+ char *device_name[2];
+ int video_device_number;
+ int audio_device_number;
+
+ int list_options;
+ int list_devices;
+ int audio_buffer_size;
+
+ IBaseFilter *device_filter[2];
+ IPin *device_pin[2];
+ libAVFilter *capture_filter[2];
+ libAVPin *capture_pin[2];
+
+ HANDLE mutex;
+ HANDLE event[2]; /* event[0] is set by DirectShow
+ * event[1] is set by callback() */
+ AVPacketList *pktl;
+
+ int eof;
+
+ int64_t curbufsize;
+ unsigned int video_frame_num;
+
+ IMediaControl *control;
+ IMediaEvent *media_event;
+
+ enum AVPixelFormat pixel_format;
+ enum AVCodecID video_codec_id;
+ char *framerate;
+
+ int requested_width;
+ int requested_height;
+ AVRational requested_framerate;
+
+ int sample_rate;
+ int sample_size;
+ int channels;
+};
+
+static enum AVPixelFormat dshow_pixfmt(DWORD biCompression, WORD biBitCount)
+{
+ switch(biCompression) {
+ case BI_BITFIELDS:
+ case BI_RGB:
+ switch(biBitCount) { /* 1-8 are untested */
+ case 1:
+ return AV_PIX_FMT_MONOWHITE;
+ case 4:
+ return AV_PIX_FMT_RGB4;
+ case 8:
+ return AV_PIX_FMT_RGB8;
+ case 16:
+ return AV_PIX_FMT_RGB555;
+ case 24:
+ return AV_PIX_FMT_BGR24;
+ case 32:
+ return AV_PIX_FMT_RGB32;
+ }
+ }
+ return avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, biCompression); // all others
+}
+
+static int
+dshow_read_close(AVFormatContext *s)
+{
+ struct dshow_ctx *ctx = s->priv_data;
+ AVPacketList *pktl;
+
+ if (ctx->control) {
+ IMediaControl_Stop(ctx->control);
+ IMediaControl_Release(ctx->control);
+ }
+
+ if (ctx->media_event)
+ IMediaEvent_Release(ctx->media_event);
+
+ if (ctx->graph) {
+ IEnumFilters *fenum;
+ int r;
+ r = IGraphBuilder_EnumFilters(ctx->graph, &fenum);
+ if (r == S_OK) {
+ IBaseFilter *f;
+ IEnumFilters_Reset(fenum);
+ while (IEnumFilters_Next(fenum, 1, &f, NULL) == S_OK) {
+ if (IGraphBuilder_RemoveFilter(ctx->graph, f) == S_OK)
+ IEnumFilters_Reset(fenum); /* When a filter is removed,
+ * the list must be reset. */
+ IBaseFilter_Release(f);
+ }
+ IEnumFilters_Release(fenum);
+ }
+ IGraphBuilder_Release(ctx->graph);
+ }
+
+ if (ctx->capture_pin[VideoDevice])
+ libAVPin_Release(ctx->capture_pin[VideoDevice]);
+ if (ctx->capture_pin[AudioDevice])
+ libAVPin_Release(ctx->capture_pin[AudioDevice]);
+ if (ctx->capture_filter[VideoDevice])
+ libAVFilter_Release(ctx->capture_filter[VideoDevice]);
+ if (ctx->capture_filter[AudioDevice])
+ libAVFilter_Release(ctx->capture_filter[AudioDevice]);
+
+ if (ctx->device_pin[VideoDevice])
+ IPin_Release(ctx->device_pin[VideoDevice]);
+ if (ctx->device_pin[AudioDevice])
+ IPin_Release(ctx->device_pin[AudioDevice]);
+ if (ctx->device_filter[VideoDevice])
+ IBaseFilter_Release(ctx->device_filter[VideoDevice]);
+ if (ctx->device_filter[AudioDevice])
+ IBaseFilter_Release(ctx->device_filter[AudioDevice]);
+
+ if (ctx->device_name[0])
+ av_free(ctx->device_name[0]);
+ if (ctx->device_name[1])
+ av_free(ctx->device_name[1]);
+
+ if(ctx->mutex)
+ CloseHandle(ctx->mutex);
+ if(ctx->event[0])
+ CloseHandle(ctx->event[0]);
+ if(ctx->event[1])
+ CloseHandle(ctx->event[1]);
+
+ pktl = ctx->pktl;
+ while (pktl) {
+ AVPacketList *next = pktl->next;
+ av_destruct_packet(&pktl->pkt);
+ av_free(pktl);
+ pktl = next;
+ }
+
+ CoUninitialize();
+
+ return 0;
+}
+
+static char *dup_wchar_to_utf8(wchar_t *w)
+{
+ char *s = NULL;
+ int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
+ s = av_malloc(l);
+ if (s)
+ WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
+ return s;
+}
+
+static int shall_we_drop(AVFormatContext *s)
+{
+ struct dshow_ctx *ctx = s->priv_data;
+ static const uint8_t dropscore[] = {62, 75, 87, 100};
+ const int ndropscores = FF_ARRAY_ELEMS(dropscore);
+ unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
+
+ if(dropscore[++ctx->video_frame_num%ndropscores] <= buffer_fullness) {
+ av_log(s, AV_LOG_ERROR,
+ "real-time buffer %d%% full! frame dropped!\n", buffer_fullness);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+callback(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time)
+{
+ AVFormatContext *s = priv_data;
+ struct dshow_ctx *ctx = s->priv_data;
+ AVPacketList **ppktl, *pktl_next;
+
+// dump_videohdr(s, vdhdr);
+
+ WaitForSingleObject(ctx->mutex, INFINITE);
+
+ if(shall_we_drop(s))
+ goto fail;
+
+ pktl_next = av_mallocz(sizeof(AVPacketList));
+ if(!pktl_next)
+ goto fail;
+
+ if(av_new_packet(&pktl_next->pkt, buf_size) < 0) {
+ av_free(pktl_next);
+ goto fail;
+ }
+
+ pktl_next->pkt.stream_index = index;
+ pktl_next->pkt.pts = time;
+ memcpy(pktl_next->pkt.data, buf, buf_size);
+
+ for(ppktl = &ctx->pktl ; *ppktl ; ppktl = &(*ppktl)->next);
+ *ppktl = pktl_next;
+
+ ctx->curbufsize += buf_size;
+
+ SetEvent(ctx->event[1]);
+ ReleaseMutex(ctx->mutex);
+
+ return;
+fail:
+ ReleaseMutex(ctx->mutex);
+ return;
+}
+
+/**
+ * Cycle through available devices using the device enumerator devenum,
+ * retrieve the device with type specified by devtype and return the
+ * pointer to the object found in *pfilter.
+ * If pfilter is NULL, list all device names.
+ */
+static int
+dshow_cycle_devices(AVFormatContext *avctx, ICreateDevEnum *devenum,
+ enum dshowDeviceType devtype, IBaseFilter **pfilter)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IBaseFilter *device_filter = NULL;
+ IEnumMoniker *classenum = NULL;
+ IMoniker *m = NULL;
+ const char *device_name = ctx->device_name[devtype];
+ int skip = (devtype == VideoDevice) ? ctx->video_device_number
+ : ctx->audio_device_number;
+ int r;
+
+ const GUID *device_guid[2] = { &CLSID_VideoInputDeviceCategory,
+ &CLSID_AudioInputDeviceCategory };
+ const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
+
+ r = ICreateDevEnum_CreateClassEnumerator(devenum, device_guid[devtype],
+ (IEnumMoniker **) &classenum, 0);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enumerate %s devices.\n",
+ devtypename);
+ return AVERROR(EIO);
+ }
+
+ while (!device_filter && IEnumMoniker_Next(classenum, 1, &m, NULL) == S_OK) {
+ IPropertyBag *bag = NULL;
+ char *buf = NULL;
+ VARIANT var;
+
+ r = IMoniker_BindToStorage(m, 0, 0, &IID_IPropertyBag, (void *) &bag);
+ if (r != S_OK)
+ goto fail1;
+
+ var.vt = VT_BSTR;
+ r = IPropertyBag_Read(bag, L"FriendlyName", &var, NULL);
+ if (r != S_OK)
+ goto fail1;
+
+ buf = dup_wchar_to_utf8(var.bstrVal);
+
+ if (pfilter) {
+ if (strcmp(device_name, buf))
+ goto fail1;
+
+ if (!skip--)
+ IMoniker_BindToObject(m, 0, 0, &IID_IBaseFilter, (void *) &device_filter);
+ } else {
+ av_log(avctx, AV_LOG_INFO, " \"%s\"\n", buf);
+ }
+
+fail1:
+ if (buf)
+ av_free(buf);
+ if (bag)
+ IPropertyBag_Release(bag);
+ IMoniker_Release(m);
+ }
+
+ IEnumMoniker_Release(classenum);
+
+ if (pfilter) {
+ if (!device_filter) {
+ av_log(avctx, AV_LOG_ERROR, "Could not find %s device.\n",
+ devtypename);
+ return AVERROR(EIO);
+ }
+ *pfilter = device_filter;
+ }
+
+ return 0;
+}
+
+/**
+ * Cycle through available formats using the specified pin,
+ * try to set parameters specified through AVOptions and if successful
+ * return 1 in *pformat_set.
+ * If pformat_set is NULL, list all pin capabilities.
+ */
+static void
+dshow_cycle_formats(AVFormatContext *avctx, enum dshowDeviceType devtype,
+ IPin *pin, int *pformat_set)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IAMStreamConfig *config = NULL;
+ AM_MEDIA_TYPE *type = NULL;
+ int format_set = 0;
+ void *caps = NULL;
+ int i, n, size;
+
+ if (IPin_QueryInterface(pin, &IID_IAMStreamConfig, (void **) &config) != S_OK)
+ return;
+ if (IAMStreamConfig_GetNumberOfCapabilities(config, &n, &size) != S_OK)
+ goto end;
+
+ caps = av_malloc(size);
+ if (!caps)
+ goto end;
+
+ for (i = 0; i < n && !format_set; i++) {
+ IAMStreamConfig_GetStreamCaps(config, i, &type, (void *) caps);
+
+#if DSHOWDEBUG
+ ff_print_AM_MEDIA_TYPE(type);
+#endif
+
+ if (devtype == VideoDevice) {
+ VIDEO_STREAM_CONFIG_CAPS *vcaps = caps;
+ BITMAPINFOHEADER *bih;
+ int64_t *fr;
+#if DSHOWDEBUG
+ ff_print_VIDEO_STREAM_CONFIG_CAPS(vcaps);
+#endif
+ if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
+ VIDEOINFOHEADER *v = (void *) type->pbFormat;
+ fr = &v->AvgTimePerFrame;
+ bih = &v->bmiHeader;
+ } else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
+ VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
+ fr = &v->AvgTimePerFrame;
+ bih = &v->bmiHeader;
+ } else {
+ goto next;
+ }
+ if (!pformat_set) {
+ enum AVPixelFormat pix_fmt = dshow_pixfmt(bih->biCompression, bih->biBitCount);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ enum AVCodecID codec_id = ff_codec_get_id(avformat_get_riff_video_tags(), bih->biCompression);
+ AVCodec *codec = avcodec_find_decoder(codec_id);
+ if (codec_id == AV_CODEC_ID_NONE || !codec) {
+ av_log(avctx, AV_LOG_INFO, " unknown compression type 0x%X", (int) bih->biCompression);
+ } else {
+ av_log(avctx, AV_LOG_INFO, " vcodec=%s", codec->name);
+ }
+ } else {
+ av_log(avctx, AV_LOG_INFO, " pixel_format=%s", av_get_pix_fmt_name(pix_fmt));
+ }
+ av_log(avctx, AV_LOG_INFO, " min s=%ldx%ld fps=%g max s=%ldx%ld fps=%g\n",
+ vcaps->MinOutputSize.cx, vcaps->MinOutputSize.cy,
+ 1e7 / vcaps->MaxFrameInterval,
+ vcaps->MaxOutputSize.cx, vcaps->MaxOutputSize.cy,
+ 1e7 / vcaps->MinFrameInterval);
+ continue;
+ }
+ if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) {
+ if (ctx->video_codec_id != ff_codec_get_id(avformat_get_riff_video_tags(), bih->biCompression))
+ goto next;
+ }
+ if (ctx->pixel_format != AV_PIX_FMT_NONE &&
+ ctx->pixel_format != dshow_pixfmt(bih->biCompression, bih->biBitCount)) {
+ goto next;
+ }
+ if (ctx->framerate) {
+ int64_t framerate = ((int64_t) ctx->requested_framerate.den*10000000)
+ / ctx->requested_framerate.num;
+ if (framerate > vcaps->MaxFrameInterval ||
+ framerate < vcaps->MinFrameInterval)
+ goto next;
+ *fr = framerate;
+ }
+ if (ctx->requested_width && ctx->requested_height) {
+ if (ctx->requested_width > vcaps->MaxOutputSize.cx ||
+ ctx->requested_width < vcaps->MinOutputSize.cx ||
+ ctx->requested_height > vcaps->MaxOutputSize.cy ||
+ ctx->requested_height < vcaps->MinOutputSize.cy)
+ goto next;
+ bih->biWidth = ctx->requested_width;
+ bih->biHeight = ctx->requested_height;
+ }
+ } else {
+ AUDIO_STREAM_CONFIG_CAPS *acaps = caps;
+ WAVEFORMATEX *fx;
+#if DSHOWDEBUG
+ ff_print_AUDIO_STREAM_CONFIG_CAPS(acaps);
+#endif
+ if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
+ fx = (void *) type->pbFormat;
+ } else {
+ goto next;
+ }
+ if (!pformat_set) {
+ av_log(avctx, AV_LOG_INFO, " min ch=%lu bits=%lu rate=%6lu max ch=%lu bits=%lu rate=%6lu\n",
+ acaps->MinimumChannels, acaps->MinimumBitsPerSample, acaps->MinimumSampleFrequency,
+ acaps->MaximumChannels, acaps->MaximumBitsPerSample, acaps->MaximumSampleFrequency);
+ continue;
+ }
+ if (ctx->sample_rate) {
+ if (ctx->sample_rate > acaps->MaximumSampleFrequency ||
+ ctx->sample_rate < acaps->MinimumSampleFrequency)
+ goto next;
+ fx->nSamplesPerSec = ctx->sample_rate;
+ }
+ if (ctx->sample_size) {
+ if (ctx->sample_size > acaps->MaximumBitsPerSample ||
+ ctx->sample_size < acaps->MinimumBitsPerSample)
+ goto next;
+ fx->wBitsPerSample = ctx->sample_size;
+ }
+ if (ctx->channels) {
+ if (ctx->channels > acaps->MaximumChannels ||
+ ctx->channels < acaps->MinimumChannels)
+ goto next;
+ fx->nChannels = ctx->channels;
+ }
+ }
+ if (IAMStreamConfig_SetFormat(config, type) != S_OK)
+ goto next;
+ format_set = 1;
+next:
+ if (type->pbFormat)
+ CoTaskMemFree(type->pbFormat);
+ CoTaskMemFree(type);
+ }
+end:
+ IAMStreamConfig_Release(config);
+ if (caps)
+ av_free(caps);
+ if (pformat_set)
+ *pformat_set = format_set;
+}
+
+/**
+ * Set audio device buffer size in milliseconds (which can directly impact
+ * latency, depending on the device).
+ */
+static int
+dshow_set_audio_buffer_size(AVFormatContext *avctx, IPin *pin)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IAMBufferNegotiation *buffer_negotiation = NULL;
+ ALLOCATOR_PROPERTIES props = { -1, -1, -1, -1 };
+ IAMStreamConfig *config = NULL;
+ AM_MEDIA_TYPE *type = NULL;
+ int ret = AVERROR(EIO);
+
+ if (IPin_QueryInterface(pin, &IID_IAMStreamConfig, (void **) &config) != S_OK)
+ goto end;
+ if (IAMStreamConfig_GetFormat(config, &type) != S_OK)
+ goto end;
+ if (!IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx))
+ goto end;
+
+ props.cbBuffer = (((WAVEFORMATEX *) type->pbFormat)->nAvgBytesPerSec)
+ * ctx->audio_buffer_size / 1000;
+
+ if (IPin_QueryInterface(pin, &IID_IAMBufferNegotiation, (void **) &buffer_negotiation) != S_OK)
+ goto end;
+ if (IAMBufferNegotiation_SuggestAllocatorProperties(buffer_negotiation, &props) != S_OK)
+ goto end;
+
+ ret = 0;
+
+end:
+ if (buffer_negotiation)
+ IAMBufferNegotiation_Release(buffer_negotiation);
+ if (type) {
+ if (type->pbFormat)
+ CoTaskMemFree(type->pbFormat);
+ CoTaskMemFree(type);
+ }
+ if (config)
+ IAMStreamConfig_Release(config);
+
+ return ret;
+}
+
+/**
+ * Cycle through available pins using the device_filter device, of type
+ * devtype, retrieve the first output pin and return the pointer to the
+ * object found in *ppin.
+ * If ppin is NULL, cycle through all pins listing audio/video capabilities.
+ */
+static int
+dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype,
+ IBaseFilter *device_filter, IPin **ppin)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IEnumPins *pins = 0;
+ IPin *device_pin = NULL;
+ IPin *pin;
+ int r;
+
+ const GUID *mediatype[2] = { &MEDIATYPE_Video, &MEDIATYPE_Audio };
+ const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
+
+ int set_format = (devtype == VideoDevice && (ctx->framerate ||
+ (ctx->requested_width && ctx->requested_height) ||
+ ctx->pixel_format != AV_PIX_FMT_NONE ||
+ ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO))
+ || (devtype == AudioDevice && (ctx->channels || ctx->sample_rate));
+ int format_set = 0;
+
+ r = IBaseFilter_EnumPins(device_filter, &pins);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enumerate pins.\n");
+ return AVERROR(EIO);
+ }
+
+ if (!ppin) {
+ av_log(avctx, AV_LOG_INFO, "DirectShow %s device options\n",
+ devtypename);
+ }
+ while (!device_pin && IEnumPins_Next(pins, 1, &pin, NULL) == S_OK) {
+ IKsPropertySet *p = NULL;
+ IEnumMediaTypes *types = NULL;
+ PIN_INFO info = {0};
+ AM_MEDIA_TYPE *type;
+ GUID category;
+ DWORD r2;
+
+ IPin_QueryPinInfo(pin, &info);
+ IBaseFilter_Release(info.pFilter);
+
+ if (info.dir != PINDIR_OUTPUT)
+ goto next;
+ if (IPin_QueryInterface(pin, &IID_IKsPropertySet, (void **) &p) != S_OK)
+ goto next;
+ if (IKsPropertySet_Get(p, &AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
+ NULL, 0, &category, sizeof(GUID), &r2) != S_OK)
+ goto next;
+ if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE))
+ goto next;
+
+ if (!ppin) {
+ char *buf = dup_wchar_to_utf8(info.achName);
+ av_log(avctx, AV_LOG_INFO, " Pin \"%s\"\n", buf);
+ av_free(buf);
+ dshow_cycle_formats(avctx, devtype, pin, NULL);
+ goto next;
+ }
+ if (set_format) {
+ dshow_cycle_formats(avctx, devtype, pin, &format_set);
+ if (!format_set) {
+ goto next;
+ }
+ }
+ if (devtype == AudioDevice && ctx->audio_buffer_size) {
+ if (dshow_set_audio_buffer_size(avctx, pin) < 0)
+ goto next;
+ }
+
+ if (IPin_EnumMediaTypes(pin, &types) != S_OK)
+ goto next;
+
+ IEnumMediaTypes_Reset(types);
+ while (!device_pin && IEnumMediaTypes_Next(types, 1, &type, NULL) == S_OK) {
+ if (IsEqualGUID(&type->majortype, mediatype[devtype])) {
+ device_pin = pin;
+ goto next;
+ }
+ CoTaskMemFree(type);
+ }
+
+next:
+ if (types)
+ IEnumMediaTypes_Release(types);
+ if (p)
+ IKsPropertySet_Release(p);
+ if (device_pin != pin)
+ IPin_Release(pin);
+ }
+
+ IEnumPins_Release(pins);
+
+ if (ppin) {
+ if (set_format && !format_set) {
+ av_log(avctx, AV_LOG_ERROR, "Could not set %s options\n", devtypename);
+ return AVERROR(EIO);
+ }
+ if (!device_pin) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Could not find output pin from %s capture device.\n", devtypename);
+ return AVERROR(EIO);
+ }
+ *ppin = device_pin;
+ }
+
+ return 0;
+}
+
+/**
+ * List options for device with type devtype.
+ *
+ * @param devenum device enumerator used for accessing the device
+ */
+static int
+dshow_list_device_options(AVFormatContext *avctx, ICreateDevEnum *devenum,
+ enum dshowDeviceType devtype)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IBaseFilter *device_filter = NULL;
+ int r;
+
+ if ((r = dshow_cycle_devices(avctx, devenum, devtype, &device_filter)) < 0)
+ return r;
+ ctx->device_filter[devtype] = device_filter;
+ if ((r = dshow_cycle_pins(avctx, devtype, device_filter, NULL)) < 0)
+ return r;
+
+ return 0;
+}
+
+static int
+dshow_open_device(AVFormatContext *avctx, ICreateDevEnum *devenum,
+ enum dshowDeviceType devtype)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IBaseFilter *device_filter = NULL;
+ IGraphBuilder *graph = ctx->graph;
+ IPin *device_pin = NULL;
+ libAVPin *capture_pin = NULL;
+ libAVFilter *capture_filter = NULL;
+ int ret = AVERROR(EIO);
+ int r;
+
+ const wchar_t *filter_name[2] = { L"Audio capture filter", L"Video capture filter" };
+
+ if ((r = dshow_cycle_devices(avctx, devenum, devtype, &device_filter)) < 0) {
+ ret = r;
+ goto error;
+ }
+
+ ctx->device_filter [devtype] = device_filter;
+
+ r = IGraphBuilder_AddFilter(graph, device_filter, NULL);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not add device filter to graph.\n");
+ goto error;
+ }
+
+ if ((r = dshow_cycle_pins(avctx, devtype, device_filter, &device_pin)) < 0) {
+ ret = r;
+ goto error;
+ }
+ ctx->device_pin[devtype] = device_pin;
+
+ capture_filter = libAVFilter_Create(avctx, callback, devtype);
+ if (!capture_filter) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create grabber filter.\n");
+ goto error;
+ }
+ ctx->capture_filter[devtype] = capture_filter;
+
+ r = IGraphBuilder_AddFilter(graph, (IBaseFilter *) capture_filter,
+ filter_name[devtype]);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not add capture filter to graph\n");
+ goto error;
+ }
+
+ libAVPin_AddRef(capture_filter->pin);
+ capture_pin = capture_filter->pin;
+ ctx->capture_pin[devtype] = capture_pin;
+
+ r = IGraphBuilder_ConnectDirect(graph, device_pin, (IPin *) capture_pin, NULL);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not connect pins\n");
+ goto error;
+ }
+
+ ret = 0;
+
+error:
+ return ret;
+}
+
+static enum AVCodecID waveform_codec_id(enum AVSampleFormat sample_fmt)
+{
+ switch (sample_fmt) {
+ case AV_SAMPLE_FMT_U8: return AV_CODEC_ID_PCM_U8;
+ case AV_SAMPLE_FMT_S16: return AV_CODEC_ID_PCM_S16LE;
+ case AV_SAMPLE_FMT_S32: return AV_CODEC_ID_PCM_S32LE;
+ default: return AV_CODEC_ID_NONE; /* Should never happen. */
+ }
+}
+
+static enum AVSampleFormat sample_fmt_bits_per_sample(int bits)
+{
+ switch (bits) {
+ case 8: return AV_SAMPLE_FMT_U8;
+ case 16: return AV_SAMPLE_FMT_S16;
+ case 32: return AV_SAMPLE_FMT_S32;
+ default: return AV_SAMPLE_FMT_NONE; /* Should never happen. */
+ }
+}
+
+static int
+dshow_add_device(AVFormatContext *avctx,
+ enum dshowDeviceType devtype)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ AM_MEDIA_TYPE type;
+ AVCodecContext *codec;
+ AVStream *st;
+ int ret = AVERROR(EIO);
+
+ st = avformat_new_stream(avctx, NULL);
+ if (!st) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ st->id = devtype;
+
+ ctx->capture_filter[devtype]->stream_index = st->index;
+
+ libAVPin_ConnectionMediaType(ctx->capture_pin[devtype], &type);
+
+ codec = st->codec;
+ if (devtype == VideoDevice) {
+ BITMAPINFOHEADER *bih = NULL;
+ AVRational time_base;
+
+ if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo)) {
+ VIDEOINFOHEADER *v = (void *) type.pbFormat;
+ time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
+ bih = &v->bmiHeader;
+ } else if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo2)) {
+ VIDEOINFOHEADER2 *v = (void *) type.pbFormat;
+ time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
+ bih = &v->bmiHeader;
+ }
+ if (!bih) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
+ goto error;
+ }
+
+ codec->time_base = time_base;
+ codec->codec_type = AVMEDIA_TYPE_VIDEO;
+ codec->width = bih->biWidth;
+ codec->height = bih->biHeight;
+ codec->pix_fmt = dshow_pixfmt(bih->biCompression, bih->biBitCount);
+ if (bih->biCompression == MKTAG('H', 'D', 'Y', 'C')) {
+ av_log(avctx, AV_LOG_DEBUG, "attempt to use full range for HDYC...\n");
+ codec->color_range = AVCOL_RANGE_MPEG; // just in case it needs this...
+ }
+ if (codec->pix_fmt == AV_PIX_FMT_NONE) {
+ codec->codec_id = ff_codec_get_id(avformat_get_riff_video_tags(), bih->biCompression);
+ if (codec->codec_id == AV_CODEC_ID_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "Unknown compression type. "
+ "Please report type 0x%X.\n", (int) bih->biCompression);
+ return AVERROR_PATCHWELCOME;
+ }
+ codec->bits_per_coded_sample = bih->biBitCount;
+ } else {
+ codec->codec_id = AV_CODEC_ID_RAWVIDEO;
+ if (bih->biCompression == BI_RGB || bih->biCompression == BI_BITFIELDS) {
+ codec->bits_per_coded_sample = bih->biBitCount;
+ codec->extradata = av_malloc(9 + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (codec->extradata) {
+ codec->extradata_size = 9;
+ memcpy(codec->extradata, "BottomUp", 9);
+ }
+ }
+ }
+ } else {
+ WAVEFORMATEX *fx = NULL;
+
+ if (IsEqualGUID(&type.formattype, &FORMAT_WaveFormatEx)) {
+ fx = (void *) type.pbFormat;
+ }
+ if (!fx) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
+ goto error;
+ }
+
+ codec->codec_type = AVMEDIA_TYPE_AUDIO;
+ codec->sample_fmt = sample_fmt_bits_per_sample(fx->wBitsPerSample);
+ codec->codec_id = waveform_codec_id(codec->sample_fmt);
+ codec->sample_rate = fx->nSamplesPerSec;
+ codec->channels = fx->nChannels;
+ }
+
+ avpriv_set_pts_info(st, 64, 1, 10000000);
+
+ ret = 0;
+
+error:
+ return ret;
+}
+
+static int parse_device_name(AVFormatContext *avctx)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ char **device_name = ctx->device_name;
+ char *name = av_strdup(avctx->filename);
+ char *tmp = name;
+ int ret = 1;
+ char *type;
+
+ while ((type = strtok(tmp, "="))) {
+ char *token = strtok(NULL, ":");
+ tmp = NULL;
+
+ if (!strcmp(type, "video")) {
+ device_name[0] = token;
+ } else if (!strcmp(type, "audio")) {
+ device_name[1] = token;
+ } else {
+ device_name[0] = NULL;
+ device_name[1] = NULL;
+ break;
+ }
+ }
+
+ if (!device_name[0] && !device_name[1]) {
+ ret = 0;
+ } else {
+ if (device_name[0])
+ device_name[0] = av_strdup(device_name[0]);
+ if (device_name[1])
+ device_name[1] = av_strdup(device_name[1]);
+ }
+
+ av_free(name);
+ return ret;
+}
+
+static int dshow_read_header(AVFormatContext *avctx)
+{
+ struct dshow_ctx *ctx = avctx->priv_data;
+ IGraphBuilder *graph = NULL;
+ ICreateDevEnum *devenum = NULL;
+ IMediaControl *control = NULL;
+ IMediaEvent *media_event = NULL;
+ HANDLE media_event_handle;
+ HANDLE proc;
+ int ret = AVERROR(EIO);
+ int r;
+
+ CoInitialize(0);
+
+ if (!ctx->list_devices && !parse_device_name(avctx)) {
+ av_log(avctx, AV_LOG_ERROR, "Malformed dshow input string.\n");
+ goto error;
+ }
+
+ ctx->video_codec_id = avctx->video_codec_id ? avctx->video_codec_id
+ : AV_CODEC_ID_RAWVIDEO;
+ if (ctx->pixel_format != AV_PIX_FMT_NONE) {
+ if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(avctx, AV_LOG_ERROR, "Pixel format may only be set when "
+ "video codec is not set or set to rawvideo\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+ }
+ if (ctx->framerate) {
+ r = av_parse_video_rate(&ctx->requested_framerate, ctx->framerate);
+ if (r < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
+ goto error;
+ }
+ }
+
+ r = CoCreateInstance(&CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
+ &IID_IGraphBuilder, (void **) &graph);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create capture graph.\n");
+ goto error;
+ }
+ ctx->graph = graph;
+
+ r = CoCreateInstance(&CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER,
+ &IID_ICreateDevEnum, (void **) &devenum);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not enumerate system devices.\n");
+ goto error;
+ }
+
+ if (ctx->list_devices) {
+ av_log(avctx, AV_LOG_INFO, "DirectShow video devices\n");
+ dshow_cycle_devices(avctx, devenum, VideoDevice, NULL);
+ av_log(avctx, AV_LOG_INFO, "DirectShow audio devices\n");
+ dshow_cycle_devices(avctx, devenum, AudioDevice, NULL);
+ ret = AVERROR_EXIT;
+ goto error;
+ }
+ if (ctx->list_options) {
+ if (ctx->device_name[VideoDevice])
+ dshow_list_device_options(avctx, devenum, VideoDevice);
+ if (ctx->device_name[AudioDevice])
+ dshow_list_device_options(avctx, devenum, AudioDevice);
+ ret = AVERROR_EXIT;
+ goto error;
+ }
+
+ if (ctx->device_name[VideoDevice]) {
+ if ((r = dshow_open_device(avctx, devenum, VideoDevice)) < 0 ||
+ (r = dshow_add_device(avctx, VideoDevice)) < 0) {
+ ret = r;
+ goto error;
+ }
+ }
+ if (ctx->device_name[AudioDevice]) {
+ if ((r = dshow_open_device(avctx, devenum, AudioDevice)) < 0 ||
+ (r = dshow_add_device(avctx, AudioDevice)) < 0) {
+ ret = r;
+ goto error;
+ }
+ }
+
+ ctx->mutex = CreateMutex(NULL, 0, NULL);
+ if (!ctx->mutex) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create Mutex\n");
+ goto error;
+ }
+ ctx->event[1] = CreateEvent(NULL, 1, 0, NULL);
+ if (!ctx->event[1]) {
+ av_log(avctx, AV_LOG_ERROR, "Could not create Event\n");
+ goto error;
+ }
+
+ r = IGraphBuilder_QueryInterface(graph, &IID_IMediaControl, (void **) &control);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media control.\n");
+ goto error;
+ }
+ ctx->control = control;
+
+ r = IGraphBuilder_QueryInterface(graph, &IID_IMediaEvent, (void **) &media_event);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media event.\n");
+ goto error;
+ }
+ ctx->media_event = media_event;
+
+ r = IMediaEvent_GetEventHandle(media_event, (void *) &media_event_handle);
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not get media event handle.\n");
+ goto error;
+ }
+ proc = GetCurrentProcess();
+ r = DuplicateHandle(proc, media_event_handle, proc, &ctx->event[0],
+ 0, 0, DUPLICATE_SAME_ACCESS);
+ if (!r) {
+ av_log(avctx, AV_LOG_ERROR, "Could not duplicate media event handle.\n");
+ goto error;
+ }
+
+ r = IMediaControl_Run(control);
+ if (r == S_FALSE) {
+ OAFilterState pfs;
+ r = IMediaControl_GetState(control, 0, &pfs);
+ }
+ if (r != S_OK) {
+ av_log(avctx, AV_LOG_ERROR, "Could not run filter\n");
+ goto error;
+ }
+
+ ret = 0;
+
+error:
+
+ if (devenum)
+ ICreateDevEnum_Release(devenum);
+
+ if (ret < 0)
+ dshow_read_close(avctx);
+
+ return ret;
+}
+
+/**
+ * Checks media events from DirectShow and returns -1 on error or EOF. Also
+ * purges all events that might be in the event queue to stop the trigger
+ * of event notification.
+ */
+static int dshow_check_event_queue(IMediaEvent *media_event)
+{
+ LONG_PTR p1, p2;
+ long code;
+ int ret = 0;
+
+ while (IMediaEvent_GetEvent(media_event, &code, &p1, &p2, 0) != E_ABORT) {
+ if (code == EC_COMPLETE || code == EC_DEVICE_LOST || code == EC_ERRORABORT)
+ ret = -1;
+ IMediaEvent_FreeEventParams(media_event, code, p1, p2);
+ }
+
+ return ret;
+}
+
+static int dshow_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ struct dshow_ctx *ctx = s->priv_data;
+ AVPacketList *pktl = NULL;
+
+ while (!ctx->eof && !pktl) {
+ WaitForSingleObject(ctx->mutex, INFINITE);
+ pktl = ctx->pktl;
+ if (pktl) {
+ *pkt = pktl->pkt;
+ ctx->pktl = ctx->pktl->next;
+ av_free(pktl);
+ ctx->curbufsize -= pkt->size;
+ }
+ ResetEvent(ctx->event[1]);
+ ReleaseMutex(ctx->mutex);
+ if (!pktl) {
+ if (dshow_check_event_queue(ctx->media_event) < 0) {
+ ctx->eof = 1;
+ } else if (s->flags & AVFMT_FLAG_NONBLOCK) {
+ return AVERROR(EAGAIN);
+ } else {
+ WaitForMultipleObjects(2, ctx->event, 0, INFINITE);
+ }
+ }
+ }
+
+ return ctx->eof ? AVERROR(EIO) : pkt->size;
+}
+
+#define OFFSET(x) offsetof(struct dshow_ctx, x)
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+static const AVOption options[] = {
+ { "video_size", "set video size given a string such as 640x480 or hd720.", OFFSET(requested_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
+ { "pixel_format", "set video pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, -1, AV_PIX_FMT_NB-1, DEC },
+ { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "sample_rate", "set audio sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "sample_size", "set audio sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 16, DEC },
+ { "channels", "set number of audio channels, such as 1 or 2", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, DEC, "list_devices" },
+ { "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, DEC, "list_devices" },
+ { "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, DEC, "list_devices" },
+ { "list_options", "list available options for specified device", OFFSET(list_options), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, DEC, "list_options" },
+ { "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, DEC, "list_options" },
+ { "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, DEC, "list_options" },
+ { "video_device_number", "set video device number for devices with same name (starts at 0)", OFFSET(video_device_number), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "audio_device_number", "set audio device number for devices with same name (starts at 0)", OFFSET(audio_device_number), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { "audio_buffer_size", "set audio device buffer latency size in milliseconds (default is the device's default)", OFFSET(audio_buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
+ { NULL },
+};
+
+static const AVClass dshow_class = {
+ .class_name = "dshow indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVInputFormat ff_dshow_demuxer = {
+ .name = "dshow",
+ .long_name = NULL_IF_CONFIG_SMALL("DirectShow capture"),
+ .priv_data_size = sizeof(struct dshow_ctx),
+ .read_header = dshow_read_header,
+ .read_packet = dshow_read_packet,
+ .read_close = dshow_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &dshow_class,
+};
diff --git a/libavdevice/dshow_capture.h b/libavdevice/dshow_capture.h
new file mode 100644
index 0000000000..aff5019b30
--- /dev/null
+++ b/libavdevice/dshow_capture.h
@@ -0,0 +1,279 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_DSHOW_H
+#define AVDEVICE_DSHOW_H
+
+#define DSHOWDEBUG 0
+
+#include "avdevice.h"
+
+#define COBJMACROS
+#include <windows.h>
+#define NO_DSHOW_STRSAFE
+#include <dshow.h>
+#include <dvdmedia.h>
+
+/* EC_DEVICE_LOST is not defined in MinGW dshow headers. */
+#ifndef EC_DEVICE_LOST
+#define EC_DEVICE_LOST 0x1f
+#endif
+
+long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src);
+void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps);
+void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps);
+void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type);
+void ff_printGUID(const GUID *g);
+
+#if DSHOWDEBUG
+extern const AVClass *ff_dshow_context_class_ptr;
+#define dshowdebug(...) av_log(&ff_dshow_context_class_ptr, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+#define dshowdebug(...)
+#endif
+
+static inline void nothing(void *foo)
+{
+}
+
+struct GUIDoffset {
+ const GUID *iid;
+ int offset;
+};
+
+enum dshowDeviceType {
+ VideoDevice = 0,
+ AudioDevice = 1,
+};
+
+#define DECLARE_QUERYINTERFACE(class, ...) \
+long WINAPI \
+class##_QueryInterface(class *this, const GUID *riid, void **ppvObject) \
+{ \
+ struct GUIDoffset ifaces[] = __VA_ARGS__; \
+ int i; \
+ dshowdebug(AV_STRINGIFY(class)"_QueryInterface(%p, %p, %p)\n", this, riid, ppvObject); \
+ ff_printGUID(riid); \
+ if (!ppvObject) \
+ return E_POINTER; \
+ for (i = 0; i < sizeof(ifaces)/sizeof(ifaces[0]); i++) { \
+ if (IsEqualGUID(riid, ifaces[i].iid)) { \
+ void *obj = (void *) ((uint8_t *) this + ifaces[i].offset); \
+ class##_AddRef(this); \
+ dshowdebug("\tfound %d with offset %d\n", i, ifaces[i].offset); \
+ *ppvObject = (void *) obj; \
+ return S_OK; \
+ } \
+ } \
+ dshowdebug("\tE_NOINTERFACE\n"); \
+ *ppvObject = NULL; \
+ return E_NOINTERFACE; \
+}
+#define DECLARE_ADDREF(class) \
+unsigned long WINAPI \
+class##_AddRef(class *this) \
+{ \
+ dshowdebug(AV_STRINGIFY(class)"_AddRef(%p)\t%ld\n", this, this->ref+1); \
+ return InterlockedIncrement(&this->ref); \
+}
+#define DECLARE_RELEASE(class) \
+unsigned long WINAPI \
+class##_Release(class *this) \
+{ \
+ long ref = InterlockedDecrement(&this->ref); \
+ dshowdebug(AV_STRINGIFY(class)"_Release(%p)\t%ld\n", this, ref); \
+ if (!ref) \
+ class##_Destroy(this); \
+ return ref; \
+}
+
+#define DECLARE_DESTROY(class, func) \
+void class##_Destroy(class *this) \
+{ \
+ dshowdebug(AV_STRINGIFY(class)"_Destroy(%p)\n", this); \
+ func(this); \
+ if (this) { \
+ if (this->vtbl) \
+ CoTaskMemFree(this->vtbl); \
+ CoTaskMemFree(this); \
+ } \
+}
+#define DECLARE_CREATE(class, setup, ...) \
+class *class##_Create(__VA_ARGS__) \
+{ \
+ class *this = CoTaskMemAlloc(sizeof(class)); \
+ void *vtbl = CoTaskMemAlloc(sizeof(*this->vtbl)); \
+ dshowdebug(AV_STRINGIFY(class)"_Create(%p)\n", this); \
+ if (!this || !vtbl) \
+ goto fail; \
+ ZeroMemory(this, sizeof(class)); \
+ ZeroMemory(vtbl, sizeof(*this->vtbl)); \
+ this->ref = 1; \
+ this->vtbl = vtbl; \
+ if (!setup) \
+ goto fail; \
+ dshowdebug("created "AV_STRINGIFY(class)" %p\n", this); \
+ return this; \
+fail: \
+ class##_Destroy(this); \
+ dshowdebug("could not create "AV_STRINGIFY(class)"\n"); \
+ return NULL; \
+}
+
+#define SETVTBL(vtbl, class, fn) \
+ do { (vtbl)->fn = (void *) class##_##fn; } while(0)
+
+/*****************************************************************************
+ * Forward Declarations
+ ****************************************************************************/
+typedef struct libAVPin libAVPin;
+typedef struct libAVMemInputPin libAVMemInputPin;
+typedef struct libAVEnumPins libAVEnumPins;
+typedef struct libAVEnumMediaTypes libAVEnumMediaTypes;
+typedef struct libAVFilter libAVFilter;
+
+/*****************************************************************************
+ * libAVPin
+ ****************************************************************************/
+struct libAVPin {
+ IPinVtbl *vtbl;
+ long ref;
+ libAVFilter *filter;
+ IPin *connectedto;
+ AM_MEDIA_TYPE type;
+ IMemInputPinVtbl *imemvtbl;
+};
+
+long WINAPI libAVPin_QueryInterface (libAVPin *, const GUID *, void **);
+unsigned long WINAPI libAVPin_AddRef (libAVPin *);
+unsigned long WINAPI libAVPin_Release (libAVPin *);
+long WINAPI libAVPin_Connect (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
+long WINAPI libAVPin_ReceiveConnection (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
+long WINAPI libAVPin_Disconnect (libAVPin *);
+long WINAPI libAVPin_ConnectedTo (libAVPin *, IPin **);
+long WINAPI libAVPin_ConnectionMediaType (libAVPin *, AM_MEDIA_TYPE *);
+long WINAPI libAVPin_QueryPinInfo (libAVPin *, PIN_INFO *);
+long WINAPI libAVPin_QueryDirection (libAVPin *, PIN_DIRECTION *);
+long WINAPI libAVPin_QueryId (libAVPin *, wchar_t **);
+long WINAPI libAVPin_QueryAccept (libAVPin *, const AM_MEDIA_TYPE *);
+long WINAPI libAVPin_EnumMediaTypes (libAVPin *, IEnumMediaTypes **);
+long WINAPI libAVPin_QueryInternalConnections(libAVPin *, IPin **, unsigned long *);
+long WINAPI libAVPin_EndOfStream (libAVPin *);
+long WINAPI libAVPin_BeginFlush (libAVPin *);
+long WINAPI libAVPin_EndFlush (libAVPin *);
+long WINAPI libAVPin_NewSegment (libAVPin *, REFERENCE_TIME, REFERENCE_TIME, double);
+
+long WINAPI libAVMemInputPin_QueryInterface (libAVMemInputPin *, const GUID *, void **);
+unsigned long WINAPI libAVMemInputPin_AddRef (libAVMemInputPin *);
+unsigned long WINAPI libAVMemInputPin_Release (libAVMemInputPin *);
+long WINAPI libAVMemInputPin_GetAllocator (libAVMemInputPin *, IMemAllocator **);
+long WINAPI libAVMemInputPin_NotifyAllocator (libAVMemInputPin *, IMemAllocator *, BOOL);
+long WINAPI libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *, ALLOCATOR_PROPERTIES *);
+long WINAPI libAVMemInputPin_Receive (libAVMemInputPin *, IMediaSample *);
+long WINAPI libAVMemInputPin_ReceiveMultiple (libAVMemInputPin *, IMediaSample **, long, long *);
+long WINAPI libAVMemInputPin_ReceiveCanBlock (libAVMemInputPin *);
+
+void libAVPin_Destroy(libAVPin *);
+libAVPin *libAVPin_Create (libAVFilter *filter);
+
+void libAVMemInputPin_Destroy(libAVMemInputPin *);
+
+/*****************************************************************************
+ * libAVEnumPins
+ ****************************************************************************/
+struct libAVEnumPins {
+ IEnumPinsVtbl *vtbl;
+ long ref;
+ int pos;
+ libAVPin *pin;
+ libAVFilter *filter;
+};
+
+long WINAPI libAVEnumPins_QueryInterface(libAVEnumPins *, const GUID *, void **);
+unsigned long WINAPI libAVEnumPins_AddRef (libAVEnumPins *);
+unsigned long WINAPI libAVEnumPins_Release (libAVEnumPins *);
+long WINAPI libAVEnumPins_Next (libAVEnumPins *, unsigned long, IPin **, unsigned long *);
+long WINAPI libAVEnumPins_Skip (libAVEnumPins *, unsigned long);
+long WINAPI libAVEnumPins_Reset (libAVEnumPins *);
+long WINAPI libAVEnumPins_Clone (libAVEnumPins *, libAVEnumPins **);
+
+void libAVEnumPins_Destroy(libAVEnumPins *);
+libAVEnumPins *libAVEnumPins_Create (libAVPin *pin, libAVFilter *filter);
+
+/*****************************************************************************
+ * libAVEnumMediaTypes
+ ****************************************************************************/
+struct libAVEnumMediaTypes {
+ IEnumPinsVtbl *vtbl;
+ long ref;
+ int pos;
+ AM_MEDIA_TYPE type;
+};
+
+long WINAPI libAVEnumMediaTypes_QueryInterface(libAVEnumMediaTypes *, const GUID *, void **);
+unsigned long WINAPI libAVEnumMediaTypes_AddRef (libAVEnumMediaTypes *);
+unsigned long WINAPI libAVEnumMediaTypes_Release (libAVEnumMediaTypes *);
+long WINAPI libAVEnumMediaTypes_Next (libAVEnumMediaTypes *, unsigned long, AM_MEDIA_TYPE **, unsigned long *);
+long WINAPI libAVEnumMediaTypes_Skip (libAVEnumMediaTypes *, unsigned long);
+long WINAPI libAVEnumMediaTypes_Reset (libAVEnumMediaTypes *);
+long WINAPI libAVEnumMediaTypes_Clone (libAVEnumMediaTypes *, libAVEnumMediaTypes **);
+
+void libAVEnumMediaTypes_Destroy(libAVEnumMediaTypes *);
+libAVEnumMediaTypes *libAVEnumMediaTypes_Create(const AM_MEDIA_TYPE *type);
+
+/*****************************************************************************
+ * libAVFilter
+ ****************************************************************************/
+struct libAVFilter {
+ IBaseFilterVtbl *vtbl;
+ long ref;
+ const wchar_t *name;
+ libAVPin *pin;
+ FILTER_INFO info;
+ FILTER_STATE state;
+ IReferenceClock *clock;
+ enum dshowDeviceType type;
+ void *priv_data;
+ int stream_index;
+ int64_t start_time;
+ void (*callback)(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time);
+};
+
+long WINAPI libAVFilter_QueryInterface (libAVFilter *, const GUID *, void **);
+unsigned long WINAPI libAVFilter_AddRef (libAVFilter *);
+unsigned long WINAPI libAVFilter_Release (libAVFilter *);
+long WINAPI libAVFilter_GetClassID (libAVFilter *, CLSID *);
+long WINAPI libAVFilter_Stop (libAVFilter *);
+long WINAPI libAVFilter_Pause (libAVFilter *);
+long WINAPI libAVFilter_Run (libAVFilter *, REFERENCE_TIME);
+long WINAPI libAVFilter_GetState (libAVFilter *, DWORD, FILTER_STATE *);
+long WINAPI libAVFilter_SetSyncSource (libAVFilter *, IReferenceClock *);
+long WINAPI libAVFilter_GetSyncSource (libAVFilter *, IReferenceClock **);
+long WINAPI libAVFilter_EnumPins (libAVFilter *, IEnumPins **);
+long WINAPI libAVFilter_FindPin (libAVFilter *, const wchar_t *, IPin **);
+long WINAPI libAVFilter_QueryFilterInfo(libAVFilter *, FILTER_INFO *);
+long WINAPI libAVFilter_JoinFilterGraph(libAVFilter *, IFilterGraph *, const wchar_t *);
+long WINAPI libAVFilter_QueryVendorInfo(libAVFilter *, wchar_t **);
+
+void libAVFilter_Destroy(libAVFilter *);
+libAVFilter *libAVFilter_Create (void *, void *, enum dshowDeviceType);
+
+#endif /* AVDEVICE_DSHOW_H */
diff --git a/libavdevice/dshow_common.c b/libavdevice/dshow_common.c
new file mode 100644
index 0000000000..f7f0dfbdbb
--- /dev/null
+++ b/libavdevice/dshow_common.c
@@ -0,0 +1,190 @@
+/*
+ * Directshow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src)
+{
+ uint8_t *pbFormat = NULL;
+
+ if (src->cbFormat) {
+ pbFormat = CoTaskMemAlloc(src->cbFormat);
+ if (!pbFormat)
+ return E_OUTOFMEMORY;
+ memcpy(pbFormat, src->pbFormat, src->cbFormat);
+ }
+
+ *dst = *src;
+ dst->pUnk = NULL;
+ dst->pbFormat = pbFormat;
+
+ return S_OK;
+}
+
+void ff_printGUID(const GUID *g)
+{
+#if DSHOWDEBUG
+ const uint32_t *d = (const uint32_t *) &g->Data1;
+ const uint16_t *w = (const uint16_t *) &g->Data2;
+ const uint8_t *c = (const uint8_t *) &g->Data4;
+
+ dshowdebug("0x%08x 0x%04x 0x%04x %02x%02x%02x%02x%02x%02x%02x%02x",
+ d[0], w[0], w[1],
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+#endif
+}
+
+static const char *dshow_context_to_name(void *ptr)
+{
+ return "dshow";
+}
+static const AVClass ff_dshow_context_class = { "DirectShow", dshow_context_to_name };
+const AVClass *ff_dshow_context_class_ptr = &ff_dshow_context_class;
+
+#define dstruct(pctx, sname, var, type) \
+ dshowdebug(" "#var":\t%"type"\n", sname->var)
+
+#if DSHOWDEBUG
+static void dump_bih(void *s, BITMAPINFOHEADER *bih)
+{
+ dshowdebug(" BITMAPINFOHEADER\n");
+ dstruct(s, bih, biSize, "lu");
+ dstruct(s, bih, biWidth, "ld");
+ dstruct(s, bih, biHeight, "ld");
+ dstruct(s, bih, biPlanes, "d");
+ dstruct(s, bih, biBitCount, "d");
+ dstruct(s, bih, biCompression, "lu");
+ dshowdebug(" biCompression:\t\"%.4s\"\n",
+ (char*) &bih->biCompression);
+ dstruct(s, bih, biSizeImage, "lu");
+ dstruct(s, bih, biXPelsPerMeter, "lu");
+ dstruct(s, bih, biYPelsPerMeter, "lu");
+ dstruct(s, bih, biClrUsed, "lu");
+ dstruct(s, bih, biClrImportant, "lu");
+}
+#endif
+
+void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps)
+{
+#if DSHOWDEBUG
+ dshowdebug(" VIDEO_STREAM_CONFIG_CAPS\n");
+ dshowdebug(" guid\t");
+ ff_printGUID(&caps->guid);
+ dshowdebug("\n");
+ dshowdebug(" VideoStandard\t%lu\n", caps->VideoStandard);
+ dshowdebug(" InputSize %ld\t%ld\n", caps->InputSize.cx, caps->InputSize.cy);
+ dshowdebug(" MinCroppingSize %ld\t%ld\n", caps->MinCroppingSize.cx, caps->MinCroppingSize.cy);
+ dshowdebug(" MaxCroppingSize %ld\t%ld\n", caps->MaxCroppingSize.cx, caps->MaxCroppingSize.cy);
+ dshowdebug(" CropGranularityX\t%d\n", caps->CropGranularityX);
+ dshowdebug(" CropGranularityY\t%d\n", caps->CropGranularityY);
+ dshowdebug(" CropAlignX\t%d\n", caps->CropAlignX);
+ dshowdebug(" CropAlignY\t%d\n", caps->CropAlignY);
+ dshowdebug(" MinOutputSize %ld\t%ld\n", caps->MinOutputSize.cx, caps->MinOutputSize.cy);
+ dshowdebug(" MaxOutputSize %ld\t%ld\n", caps->MaxOutputSize.cx, caps->MaxOutputSize.cy);
+ dshowdebug(" OutputGranularityX\t%d\n", caps->OutputGranularityX);
+ dshowdebug(" OutputGranularityY\t%d\n", caps->OutputGranularityY);
+ dshowdebug(" StretchTapsX\t%d\n", caps->StretchTapsX);
+ dshowdebug(" StretchTapsY\t%d\n", caps->StretchTapsY);
+ dshowdebug(" ShrinkTapsX\t%d\n", caps->ShrinkTapsX);
+ dshowdebug(" ShrinkTapsY\t%d\n", caps->ShrinkTapsY);
+ dshowdebug(" MinFrameInterval\t%"PRId64"\n", caps->MinFrameInterval);
+ dshowdebug(" MaxFrameInterval\t%"PRId64"\n", caps->MaxFrameInterval);
+ dshowdebug(" MinBitsPerSecond\t%ld\n", caps->MinBitsPerSecond);
+ dshowdebug(" MaxBitsPerSecond\t%ld\n", caps->MaxBitsPerSecond);
+#endif
+}
+
+void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps)
+{
+#if DSHOWDEBUG
+ dshowdebug(" AUDIO_STREAM_CONFIG_CAPS\n");
+ dshowdebug(" guid\t");
+ ff_printGUID(&caps->guid);
+ dshowdebug("\n");
+ dshowdebug(" MinimumChannels\t%lu\n", caps->MinimumChannels);
+ dshowdebug(" MaximumChannels\t%lu\n", caps->MaximumChannels);
+ dshowdebug(" ChannelsGranularity\t%lu\n", caps->ChannelsGranularity);
+ dshowdebug(" MinimumBitsPerSample\t%lu\n", caps->MinimumBitsPerSample);
+ dshowdebug(" MaximumBitsPerSample\t%lu\n", caps->MaximumBitsPerSample);
+ dshowdebug(" BitsPerSampleGranularity\t%lu\n", caps->BitsPerSampleGranularity);
+ dshowdebug(" MinimumSampleFrequency\t%lu\n", caps->MinimumSampleFrequency);
+ dshowdebug(" MaximumSampleFrequency\t%lu\n", caps->MaximumSampleFrequency);
+ dshowdebug(" SampleFrequencyGranularity\t%lu\n", caps->SampleFrequencyGranularity);
+#endif
+}
+
+void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type)
+{
+#if DSHOWDEBUG
+ dshowdebug(" majortype\t");
+ ff_printGUID(&type->majortype);
+ dshowdebug("\n");
+ dshowdebug(" subtype\t");
+ ff_printGUID(&type->subtype);
+ dshowdebug("\n");
+ dshowdebug(" bFixedSizeSamples\t%d\n", type->bFixedSizeSamples);
+ dshowdebug(" bTemporalCompression\t%d\n", type->bTemporalCompression);
+ dshowdebug(" lSampleSize\t%lu\n", type->lSampleSize);
+ dshowdebug(" formattype\t");
+ ff_printGUID(&type->formattype);
+ dshowdebug("\n");
+ dshowdebug(" pUnk\t%p\n", type->pUnk);
+ dshowdebug(" cbFormat\t%lu\n", type->cbFormat);
+ dshowdebug(" pbFormat\t%p\n", type->pbFormat);
+
+ if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
+ VIDEOINFOHEADER *v = (void *) type->pbFormat;
+ dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
+ v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
+ dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
+ v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
+ dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
+ dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
+ dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
+ dump_bih(NULL, &v->bmiHeader);
+ } else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
+ VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
+ dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
+ v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
+ dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
+ v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
+ dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
+ dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
+ dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
+ dshowdebug(" dwInterlaceFlags: %lu\n", v->dwInterlaceFlags);
+ dshowdebug(" dwCopyProtectFlags: %lu\n", v->dwCopyProtectFlags);
+ dshowdebug(" dwPictAspectRatioX: %lu\n", v->dwPictAspectRatioX);
+ dshowdebug(" dwPictAspectRatioY: %lu\n", v->dwPictAspectRatioY);
+// dshowdebug(" dwReserved1: %lu\n", v->u.dwReserved1); /* mingw-w64 is buggy and doesn't name unnamed unions */
+ dshowdebug(" dwReserved2: %lu\n", v->dwReserved2);
+ dump_bih(NULL, &v->bmiHeader);
+ } else if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
+ WAVEFORMATEX *fx = (void *) type->pbFormat;
+ dshowdebug(" wFormatTag: %u\n", fx->wFormatTag);
+ dshowdebug(" nChannels: %u\n", fx->nChannels);
+ dshowdebug(" nSamplesPerSec: %lu\n", fx->nSamplesPerSec);
+ dshowdebug(" nAvgBytesPerSec: %lu\n", fx->nAvgBytesPerSec);
+ dshowdebug(" nBlockAlign: %u\n", fx->nBlockAlign);
+ dshowdebug(" wBitsPerSample: %u\n", fx->wBitsPerSample);
+ dshowdebug(" cbSize: %u\n", fx->cbSize);
+ }
+#endif
+}
diff --git a/libavdevice/dshow_enummediatypes.c b/libavdevice/dshow_enummediatypes.c
new file mode 100644
index 0000000000..aaed58b449
--- /dev/null
+++ b/libavdevice/dshow_enummediatypes.c
@@ -0,0 +1,103 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+DECLARE_QUERYINTERFACE(libAVEnumMediaTypes,
+ { {&IID_IUnknown,0}, {&IID_IEnumPins,0} })
+DECLARE_ADDREF(libAVEnumMediaTypes)
+DECLARE_RELEASE(libAVEnumMediaTypes)
+
+long WINAPI
+libAVEnumMediaTypes_Next(libAVEnumMediaTypes *this, unsigned long n,
+ AM_MEDIA_TYPE **types, unsigned long *fetched)
+{
+ int count = 0;
+ dshowdebug("libAVEnumMediaTypes_Next(%p)\n", this);
+ if (!types)
+ return E_POINTER;
+ if (!this->pos && n == 1) {
+ if (!IsEqualGUID(&this->type.majortype, &GUID_NULL)) {
+ AM_MEDIA_TYPE *type = av_malloc(sizeof(AM_MEDIA_TYPE));
+ ff_copy_dshow_media_type(type, &this->type);
+ *types = type;
+ count = 1;
+ }
+ this->pos = 1;
+ }
+ if (fetched)
+ *fetched = count;
+ if (!count)
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumMediaTypes_Skip(libAVEnumMediaTypes *this, unsigned long n)
+{
+ dshowdebug("libAVEnumMediaTypes_Skip(%p)\n", this);
+ if (n) /* Any skip will always fall outside of the only valid type. */
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumMediaTypes_Reset(libAVEnumMediaTypes *this)
+{
+ dshowdebug("libAVEnumMediaTypes_Reset(%p)\n", this);
+ this->pos = 0;
+ return S_OK;
+}
+long WINAPI
+libAVEnumMediaTypes_Clone(libAVEnumMediaTypes *this, libAVEnumMediaTypes **enums)
+{
+ libAVEnumMediaTypes *new;
+ dshowdebug("libAVEnumMediaTypes_Clone(%p)\n", this);
+ if (!enums)
+ return E_POINTER;
+ new = libAVEnumMediaTypes_Create(&this->type);
+ if (!new)
+ return E_OUTOFMEMORY;
+ new->pos = this->pos;
+ *enums = new;
+ return S_OK;
+}
+
+static int
+libAVEnumMediaTypes_Setup(libAVEnumMediaTypes *this, const AM_MEDIA_TYPE *type)
+{
+ IEnumPinsVtbl *vtbl = this->vtbl;
+ SETVTBL(vtbl, libAVEnumMediaTypes, QueryInterface);
+ SETVTBL(vtbl, libAVEnumMediaTypes, AddRef);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Release);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Next);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Skip);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Reset);
+ SETVTBL(vtbl, libAVEnumMediaTypes, Clone);
+
+ if (!type) {
+ this->type.majortype = GUID_NULL;
+ } else {
+ ff_copy_dshow_media_type(&this->type, type);
+ }
+
+ return 1;
+}
+DECLARE_CREATE(libAVEnumMediaTypes, libAVEnumMediaTypes_Setup(this, type), const AM_MEDIA_TYPE *type)
+DECLARE_DESTROY(libAVEnumMediaTypes, nothing)
diff --git a/libavdevice/dshow_enumpins.c b/libavdevice/dshow_enumpins.c
new file mode 100644
index 0000000000..e5c11cb54e
--- /dev/null
+++ b/libavdevice/dshow_enumpins.c
@@ -0,0 +1,105 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+DECLARE_QUERYINTERFACE(libAVEnumPins,
+ { {&IID_IUnknown,0}, {&IID_IEnumPins,0} })
+DECLARE_ADDREF(libAVEnumPins)
+DECLARE_RELEASE(libAVEnumPins)
+
+long WINAPI
+libAVEnumPins_Next(libAVEnumPins *this, unsigned long n, IPin **pins,
+ unsigned long *fetched)
+{
+ int count = 0;
+ dshowdebug("libAVEnumPins_Next(%p)\n", this);
+ if (!pins)
+ return E_POINTER;
+ if (!this->pos && n == 1) {
+ libAVPin_AddRef(this->pin);
+ *pins = (IPin *) this->pin;
+ count = 1;
+ this->pos = 1;
+ }
+ if (fetched)
+ *fetched = count;
+ if (!count)
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumPins_Skip(libAVEnumPins *this, unsigned long n)
+{
+ dshowdebug("libAVEnumPins_Skip(%p)\n", this);
+ if (n) /* Any skip will always fall outside of the only valid pin. */
+ return S_FALSE;
+ return S_OK;
+}
+long WINAPI
+libAVEnumPins_Reset(libAVEnumPins *this)
+{
+ dshowdebug("libAVEnumPins_Reset(%p)\n", this);
+ this->pos = 0;
+ return S_OK;
+}
+long WINAPI
+libAVEnumPins_Clone(libAVEnumPins *this, libAVEnumPins **pins)
+{
+ libAVEnumPins *new;
+ dshowdebug("libAVEnumPins_Clone(%p)\n", this);
+ if (!pins)
+ return E_POINTER;
+ new = libAVEnumPins_Create(this->pin, this->filter);
+ if (!new)
+ return E_OUTOFMEMORY;
+ new->pos = this->pos;
+ *pins = new;
+ return S_OK;
+}
+
+static int
+libAVEnumPins_Setup(libAVEnumPins *this, libAVPin *pin, libAVFilter *filter)
+{
+ IEnumPinsVtbl *vtbl = this->vtbl;
+ SETVTBL(vtbl, libAVEnumPins, QueryInterface);
+ SETVTBL(vtbl, libAVEnumPins, AddRef);
+ SETVTBL(vtbl, libAVEnumPins, Release);
+ SETVTBL(vtbl, libAVEnumPins, Next);
+ SETVTBL(vtbl, libAVEnumPins, Skip);
+ SETVTBL(vtbl, libAVEnumPins, Reset);
+ SETVTBL(vtbl, libAVEnumPins, Clone);
+
+ this->pin = pin;
+ this->filter = filter;
+ libAVFilter_AddRef(this->filter);
+
+ return 1;
+}
+static int
+libAVEnumPins_Cleanup(libAVEnumPins *this)
+{
+ libAVFilter_Release(this->filter);
+ return 1;
+}
+DECLARE_CREATE(libAVEnumPins, libAVEnumPins_Setup(this, pin, filter),
+ libAVPin *pin, libAVFilter *filter)
+DECLARE_DESTROY(libAVEnumPins, libAVEnumPins_Cleanup)
diff --git a/libavdevice/dshow_filter.c b/libavdevice/dshow_filter.c
new file mode 100644
index 0000000000..7360adcfcd
--- /dev/null
+++ b/libavdevice/dshow_filter.c
@@ -0,0 +1,202 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+DECLARE_QUERYINTERFACE(libAVFilter,
+ { {&IID_IUnknown,0}, {&IID_IBaseFilter,0} })
+DECLARE_ADDREF(libAVFilter)
+DECLARE_RELEASE(libAVFilter)
+
+long WINAPI
+libAVFilter_GetClassID(libAVFilter *this, CLSID *id)
+{
+ dshowdebug("libAVFilter_GetClassID(%p)\n", this);
+ /* I'm not creating a ClassID just for this. */
+ return E_FAIL;
+}
+long WINAPI
+libAVFilter_Stop(libAVFilter *this)
+{
+ dshowdebug("libAVFilter_Stop(%p)\n", this);
+ this->state = State_Stopped;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_Pause(libAVFilter *this)
+{
+ dshowdebug("libAVFilter_Pause(%p)\n", this);
+ this->state = State_Paused;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_Run(libAVFilter *this, REFERENCE_TIME start)
+{
+ dshowdebug("libAVFilter_Run(%p) %"PRId64"\n", this, start);
+ this->state = State_Running;
+ this->start_time = start;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_GetState(libAVFilter *this, DWORD ms, FILTER_STATE *state)
+{
+ dshowdebug("libAVFilter_GetState(%p)\n", this);
+ if (!state)
+ return E_POINTER;
+ *state = this->state;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_SetSyncSource(libAVFilter *this, IReferenceClock *clock)
+{
+ dshowdebug("libAVFilter_SetSyncSource(%p)\n", this);
+
+ if (this->clock != clock) {
+ if (this->clock)
+ IReferenceClock_Release(this->clock);
+ this->clock = clock;
+ if (clock)
+ IReferenceClock_AddRef(clock);
+ }
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_GetSyncSource(libAVFilter *this, IReferenceClock **clock)
+{
+ dshowdebug("libAVFilter_GetSyncSource(%p)\n", this);
+
+ if (!clock)
+ return E_POINTER;
+ if (this->clock)
+ IReferenceClock_AddRef(this->clock);
+ *clock = this->clock;
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_EnumPins(libAVFilter *this, IEnumPins **enumpin)
+{
+ libAVEnumPins *new;
+ dshowdebug("libAVFilter_EnumPins(%p)\n", this);
+
+ if (!enumpin)
+ return E_POINTER;
+ new = libAVEnumPins_Create(this->pin, this);
+ if (!new)
+ return E_OUTOFMEMORY;
+
+ *enumpin = (IEnumPins *) new;
+ return S_OK;
+}
+long WINAPI
+libAVFilter_FindPin(libAVFilter *this, const wchar_t *id, IPin **pin)
+{
+ libAVPin *found = NULL;
+ dshowdebug("libAVFilter_FindPin(%p)\n", this);
+
+ if (!id || !pin)
+ return E_POINTER;
+ if (!wcscmp(id, L"In")) {
+ found = this->pin;
+ libAVPin_AddRef(found);
+ }
+ *pin = (IPin *) found;
+ if (!found)
+ return VFW_E_NOT_FOUND;
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_QueryFilterInfo(libAVFilter *this, FILTER_INFO *info)
+{
+ dshowdebug("libAVFilter_QueryFilterInfo(%p)\n", this);
+
+ if (!info)
+ return E_POINTER;
+ if (this->info.pGraph)
+ IFilterGraph_AddRef(this->info.pGraph);
+ *info = this->info;
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_JoinFilterGraph(libAVFilter *this, IFilterGraph *graph,
+ const wchar_t *name)
+{
+ dshowdebug("libAVFilter_JoinFilterGraph(%p)\n", this);
+
+ this->info.pGraph = graph;
+ if (name)
+ wcscpy(this->info.achName, name);
+
+ return S_OK;
+}
+long WINAPI
+libAVFilter_QueryVendorInfo(libAVFilter *this, wchar_t **info)
+{
+ dshowdebug("libAVFilter_QueryVendorInfo(%p)\n", this);
+
+ if (!info)
+ return E_POINTER;
+ *info = wcsdup(L"libAV");
+
+ return S_OK;
+}
+
+static int
+libAVFilter_Setup(libAVFilter *this, void *priv_data, void *callback,
+ enum dshowDeviceType type)
+{
+ IBaseFilterVtbl *vtbl = this->vtbl;
+ SETVTBL(vtbl, libAVFilter, QueryInterface);
+ SETVTBL(vtbl, libAVFilter, AddRef);
+ SETVTBL(vtbl, libAVFilter, Release);
+ SETVTBL(vtbl, libAVFilter, GetClassID);
+ SETVTBL(vtbl, libAVFilter, Stop);
+ SETVTBL(vtbl, libAVFilter, Pause);
+ SETVTBL(vtbl, libAVFilter, Run);
+ SETVTBL(vtbl, libAVFilter, GetState);
+ SETVTBL(vtbl, libAVFilter, SetSyncSource);
+ SETVTBL(vtbl, libAVFilter, GetSyncSource);
+ SETVTBL(vtbl, libAVFilter, EnumPins);
+ SETVTBL(vtbl, libAVFilter, FindPin);
+ SETVTBL(vtbl, libAVFilter, QueryFilterInfo);
+ SETVTBL(vtbl, libAVFilter, JoinFilterGraph);
+ SETVTBL(vtbl, libAVFilter, QueryVendorInfo);
+
+ this->pin = libAVPin_Create(this);
+
+ this->priv_data = priv_data;
+ this->callback = callback;
+ this->type = type;
+
+ return 1;
+}
+static int
+libAVFilter_Cleanup(libAVFilter *this)
+{
+ libAVPin_Release(this->pin);
+ return 1;
+}
+DECLARE_CREATE(libAVFilter, libAVFilter_Setup(this, priv_data, callback, type),
+ void *priv_data, void *callback, enum dshowDeviceType type)
+DECLARE_DESTROY(libAVFilter, libAVFilter_Cleanup)
diff --git a/libavdevice/dshow_pin.c b/libavdevice/dshow_pin.c
new file mode 100644
index 0000000000..30e4d9585c
--- /dev/null
+++ b/libavdevice/dshow_pin.c
@@ -0,0 +1,362 @@
+/*
+ * DirectShow capture interface
+ * Copyright (c) 2010 Ramiro Polla
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dshow_capture.h"
+
+#include <stddef.h>
+#define imemoffset offsetof(libAVPin, imemvtbl)
+
+DECLARE_QUERYINTERFACE(libAVPin,
+ { {&IID_IUnknown,0}, {&IID_IPin,0}, {&IID_IMemInputPin,imemoffset} })
+DECLARE_ADDREF(libAVPin)
+DECLARE_RELEASE(libAVPin)
+
+long WINAPI
+libAVPin_Connect(libAVPin *this, IPin *pin, const AM_MEDIA_TYPE *type)
+{
+ dshowdebug("libAVPin_Connect(%p, %p, %p)\n", this, pin, type);
+ /* Input pins receive connections. */
+ return S_FALSE;
+}
+long WINAPI
+libAVPin_ReceiveConnection(libAVPin *this, IPin *pin,
+ const AM_MEDIA_TYPE *type)
+{
+ enum dshowDeviceType devtype = this->filter->type;
+ dshowdebug("libAVPin_ReceiveConnection(%p)\n", this);
+
+ if (!pin)
+ return E_POINTER;
+ if (this->connectedto)
+ return VFW_E_ALREADY_CONNECTED;
+
+ ff_print_AM_MEDIA_TYPE(type);
+ if (devtype == VideoDevice) {
+ if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Video))
+ return VFW_E_TYPE_NOT_ACCEPTED;
+ } else {
+ if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Audio))
+ return VFW_E_TYPE_NOT_ACCEPTED;
+ }
+
+ IPin_AddRef(pin);
+ this->connectedto = pin;
+
+ ff_copy_dshow_media_type(&this->type, type);
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_Disconnect(libAVPin *this)
+{
+ dshowdebug("libAVPin_Disconnect(%p)\n", this);
+
+ if (this->filter->state != State_Stopped)
+ return VFW_E_NOT_STOPPED;
+ if (!this->connectedto)
+ return S_FALSE;
+ IPin_Release(this->connectedto);
+ this->connectedto = NULL;
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_ConnectedTo(libAVPin *this, IPin **pin)
+{
+ dshowdebug("libAVPin_ConnectedTo(%p)\n", this);
+
+ if (!pin)
+ return E_POINTER;
+ if (!this->connectedto)
+ return VFW_E_NOT_CONNECTED;
+ IPin_AddRef(this->connectedto);
+ *pin = this->connectedto;
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_ConnectionMediaType(libAVPin *this, AM_MEDIA_TYPE *type)
+{
+ dshowdebug("libAVPin_ConnectionMediaType(%p)\n", this);
+
+ if (!type)
+ return E_POINTER;
+ if (!this->connectedto)
+ return VFW_E_NOT_CONNECTED;
+
+ return ff_copy_dshow_media_type(type, &this->type);
+}
+long WINAPI
+libAVPin_QueryPinInfo(libAVPin *this, PIN_INFO *info)
+{
+ dshowdebug("libAVPin_QueryPinInfo(%p)\n", this);
+
+ if (!info)
+ return E_POINTER;
+
+ if (this->filter)
+ libAVFilter_AddRef(this->filter);
+
+ info->pFilter = (IBaseFilter *) this->filter;
+ info->dir = PINDIR_INPUT;
+ wcscpy(info->achName, L"Capture");
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryDirection(libAVPin *this, PIN_DIRECTION *dir)
+{
+ dshowdebug("libAVPin_QueryDirection(%p)\n", this);
+ if (!dir)
+ return E_POINTER;
+ *dir = PINDIR_INPUT;
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryId(libAVPin *this, wchar_t **id)
+{
+ dshowdebug("libAVPin_QueryId(%p)\n", this);
+
+ if (!id)
+ return E_POINTER;
+
+ *id = wcsdup(L"libAV Pin");
+
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryAccept(libAVPin *this, const AM_MEDIA_TYPE *type)
+{
+ dshowdebug("libAVPin_QueryAccept(%p)\n", this);
+ return S_FALSE;
+}
+long WINAPI
+libAVPin_EnumMediaTypes(libAVPin *this, IEnumMediaTypes **enumtypes)
+{
+ const AM_MEDIA_TYPE *type = NULL;
+ libAVEnumMediaTypes *new;
+ dshowdebug("libAVPin_EnumMediaTypes(%p)\n", this);
+
+ if (!enumtypes)
+ return E_POINTER;
+ new = libAVEnumMediaTypes_Create(type);
+ if (!new)
+ return E_OUTOFMEMORY;
+
+ *enumtypes = (IEnumMediaTypes *) new;
+ return S_OK;
+}
+long WINAPI
+libAVPin_QueryInternalConnections(libAVPin *this, IPin **pin,
+ unsigned long *npin)
+{
+ dshowdebug("libAVPin_QueryInternalConnections(%p)\n", this);
+ return E_NOTIMPL;
+}
+long WINAPI
+libAVPin_EndOfStream(libAVPin *this)
+{
+ dshowdebug("libAVPin_EndOfStream(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+long WINAPI
+libAVPin_BeginFlush(libAVPin *this)
+{
+ dshowdebug("libAVPin_BeginFlush(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+long WINAPI
+libAVPin_EndFlush(libAVPin *this)
+{
+ dshowdebug("libAVPin_EndFlush(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+long WINAPI
+libAVPin_NewSegment(libAVPin *this, REFERENCE_TIME start, REFERENCE_TIME stop,
+ double rate)
+{
+ dshowdebug("libAVPin_NewSegment(%p)\n", this);
+ /* I don't care. */
+ return S_OK;
+}
+
+static int
+libAVPin_Setup(libAVPin *this, libAVFilter *filter)
+{
+ IPinVtbl *vtbl = this->vtbl;
+ IMemInputPinVtbl *imemvtbl;
+
+ if (!filter)
+ return 0;
+
+ imemvtbl = av_malloc(sizeof(IMemInputPinVtbl));
+ if (!imemvtbl)
+ return 0;
+
+ SETVTBL(imemvtbl, libAVMemInputPin, QueryInterface);
+ SETVTBL(imemvtbl, libAVMemInputPin, AddRef);
+ SETVTBL(imemvtbl, libAVMemInputPin, Release);
+ SETVTBL(imemvtbl, libAVMemInputPin, GetAllocator);
+ SETVTBL(imemvtbl, libAVMemInputPin, NotifyAllocator);
+ SETVTBL(imemvtbl, libAVMemInputPin, GetAllocatorRequirements);
+ SETVTBL(imemvtbl, libAVMemInputPin, Receive);
+ SETVTBL(imemvtbl, libAVMemInputPin, ReceiveMultiple);
+ SETVTBL(imemvtbl, libAVMemInputPin, ReceiveCanBlock);
+
+ this->imemvtbl = imemvtbl;
+
+ SETVTBL(vtbl, libAVPin, QueryInterface);
+ SETVTBL(vtbl, libAVPin, AddRef);
+ SETVTBL(vtbl, libAVPin, Release);
+ SETVTBL(vtbl, libAVPin, Connect);
+ SETVTBL(vtbl, libAVPin, ReceiveConnection);
+ SETVTBL(vtbl, libAVPin, Disconnect);
+ SETVTBL(vtbl, libAVPin, ConnectedTo);
+ SETVTBL(vtbl, libAVPin, ConnectionMediaType);
+ SETVTBL(vtbl, libAVPin, QueryPinInfo);
+ SETVTBL(vtbl, libAVPin, QueryDirection);
+ SETVTBL(vtbl, libAVPin, QueryId);
+ SETVTBL(vtbl, libAVPin, QueryAccept);
+ SETVTBL(vtbl, libAVPin, EnumMediaTypes);
+ SETVTBL(vtbl, libAVPin, QueryInternalConnections);
+ SETVTBL(vtbl, libAVPin, EndOfStream);
+ SETVTBL(vtbl, libAVPin, BeginFlush);
+ SETVTBL(vtbl, libAVPin, EndFlush);
+ SETVTBL(vtbl, libAVPin, NewSegment);
+
+ this->filter = filter;
+
+ return 1;
+}
+DECLARE_CREATE(libAVPin, libAVPin_Setup(this, filter), libAVFilter *filter)
+DECLARE_DESTROY(libAVPin, nothing)
+
+/*****************************************************************************
+ * libAVMemInputPin
+ ****************************************************************************/
+long WINAPI
+libAVMemInputPin_QueryInterface(libAVMemInputPin *this, const GUID *riid,
+ void **ppvObject)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_QueryInterface(%p)\n", this);
+ return libAVPin_QueryInterface(pin, riid, ppvObject);
+}
+unsigned long WINAPI
+libAVMemInputPin_AddRef(libAVMemInputPin *this)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_AddRef(%p)\n", this);
+ return libAVPin_AddRef(pin);
+}
+unsigned long WINAPI
+libAVMemInputPin_Release(libAVMemInputPin *this)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_Release(%p)\n", this);
+ return libAVPin_Release(pin);
+}
+long WINAPI
+libAVMemInputPin_GetAllocator(libAVMemInputPin *this, IMemAllocator **alloc)
+{
+ dshowdebug("libAVMemInputPin_GetAllocator(%p)\n", this);
+ return VFW_E_NO_ALLOCATOR;
+}
+long WINAPI
+libAVMemInputPin_NotifyAllocator(libAVMemInputPin *this, IMemAllocator *alloc,
+ BOOL rdwr)
+{
+ dshowdebug("libAVMemInputPin_NotifyAllocator(%p)\n", this);
+ return S_OK;
+}
+long WINAPI
+libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *this,
+ ALLOCATOR_PROPERTIES *props)
+{
+ dshowdebug("libAVMemInputPin_GetAllocatorRequirements(%p)\n", this);
+ return E_NOTIMPL;
+}
+long WINAPI
+libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ enum dshowDeviceType devtype = pin->filter->type;
+ void *priv_data;
+ uint8_t *buf;
+ int buf_size;
+ int index;
+ int64_t curtime;
+
+ dshowdebug("libAVMemInputPin_Receive(%p)\n", this);
+
+ if (!sample)
+ return E_POINTER;
+
+ if (devtype == VideoDevice) {
+ /* PTS from video devices is unreliable. */
+ IReferenceClock *clock = pin->filter->clock;
+ IReferenceClock_GetTime(clock, &curtime);
+ } else {
+ int64_t dummy;
+ IMediaSample_GetTime(sample, &curtime, &dummy);
+ curtime += pin->filter->start_time;
+ }
+
+ buf_size = IMediaSample_GetActualDataLength(sample);
+ IMediaSample_GetPointer(sample, &buf);
+ priv_data = pin->filter->priv_data;
+ index = pin->filter->stream_index;
+
+ pin->filter->callback(priv_data, index, buf, buf_size, curtime);
+
+ return S_OK;
+}
+long WINAPI
+libAVMemInputPin_ReceiveMultiple(libAVMemInputPin *this,
+ IMediaSample **samples, long n, long *nproc)
+{
+ int i;
+ dshowdebug("libAVMemInputPin_ReceiveMultiple(%p)\n", this);
+
+ for (i = 0; i < n; i++)
+ libAVMemInputPin_Receive(this, samples[i]);
+
+ *nproc = n;
+ return S_OK;
+}
+long WINAPI
+libAVMemInputPin_ReceiveCanBlock(libAVMemInputPin *this)
+{
+ dshowdebug("libAVMemInputPin_ReceiveCanBlock(%p)\n", this);
+ /* I swear I will not block. */
+ return S_FALSE;
+}
+
+void
+libAVMemInputPin_Destroy(libAVMemInputPin *this)
+{
+ libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
+ dshowdebug("libAVMemInputPin_Destroy(%p)\n", this);
+ libAVPin_Destroy(pin);
+}
diff --git a/libavdevice/dv1394.c b/libavdevice/dv1394.c
index d259e1a14f..0af5ea53c7 100644
--- a/libavdevice/dv1394.c
+++ b/libavdevice/dv1394.c
@@ -2,20 +2,20 @@
* Linux DV1394 interface
* Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,7 +30,7 @@
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavformat/dv.h"
#include "dv1394.h"
@@ -186,7 +186,7 @@ restart_poll:
size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
- DV1394_PAL_FRAME_SIZE);
+ DV1394_PAL_FRAME_SIZE, -1);
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
diff --git a/libavdevice/dv1394.h b/libavdevice/dv1394.h
index 9710ff56ea..b76d633ef6 100644
--- a/libavdevice/dv1394.h
+++ b/libavdevice/dv1394.h
@@ -8,20 +8,20 @@
* Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
* Peter Schlaile <udbz@rz.uni-karlsruhe.de>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavdevice/fbdev.c b/libavdevice/fbdev.c
index 22c53a3c1d..1156fb50a6 100644
--- a/libavdevice/fbdev.c
+++ b/libavdevice/fbdev.c
@@ -3,20 +3,20 @@
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -41,7 +41,7 @@
#include "libavutil/time.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavformat/internal.h"
struct rgb_pixfmt_map_entry {
@@ -50,7 +50,7 @@ struct rgb_pixfmt_map_entry {
enum AVPixelFormat pixfmt;
};
-static struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
+static const struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
// bpp, red_offset, green_offset, blue_offset, alpha_offset, pixfmt
{ 32, 0, 8, 16, 24, AV_PIX_FMT_RGBA },
{ 32, 16, 8, 0, 24, AV_PIX_FMT_BGRA },
@@ -58,6 +58,7 @@ static struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
{ 32, 3, 2, 8, 0, AV_PIX_FMT_ABGR },
{ 24, 0, 8, 16, 0, AV_PIX_FMT_RGB24 },
{ 24, 16, 8, 0, 0, AV_PIX_FMT_BGR24 },
+ { 16, 11, 5, 0, 16, AV_PIX_FMT_RGB565 },
};
static enum AVPixelFormat get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo)
@@ -65,7 +66,7 @@ static enum AVPixelFormat get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *v
int i;
for (i = 0; i < FF_ARRAY_ELEMS(rgb_pixfmt_map); i++) {
- struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
+ const struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
if (entry->bits_per_pixel == varinfo->bits_per_pixel &&
entry->red_offset == varinfo->red.offset &&
entry->green_offset == varinfo->green.offset &&
@@ -80,7 +81,6 @@ typedef struct {
AVClass *class; ///< class for private options
int frame_size; ///< size in bytes of a grabbed frame
AVRational framerate_q; ///< framerate
- char *framerate; ///< framerate string set by a private option
int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units)
int fd; ///< framebuffer device file descriptor
@@ -101,12 +101,6 @@ static av_cold int fbdev_read_header(AVFormatContext *avctx)
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDONLY;
- ret = av_parse_video_rate(&fbdev->framerate_q, fbdev->framerate);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", fbdev->framerate);
- return ret;
- }
-
if (!(st = avformat_new_stream(avctx, NULL)))
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */
@@ -163,7 +157,7 @@ static av_cold int fbdev_read_header(AVFormatContext *avctx)
st->codec->width = fbdev->width;
st->codec->height = fbdev->height;
st->codec->pix_fmt = pix_fmt;
- st->codec->time_base = (AVRational){fbdev->framerate_q.den, fbdev->framerate_q.num};
+ st->codec->time_base = av_inv_q(fbdev->framerate_q);
st->codec->bit_rate =
fbdev->width * fbdev->height * fbdev->bytes_per_pixel * av_q2d(fbdev->framerate_q) * 8;
@@ -192,20 +186,22 @@ static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
fbdev->time_frame = av_gettime();
/* wait based on the frame rate */
- curtime = av_gettime();
- delay = fbdev->time_frame - curtime;
- av_dlog(avctx,
- "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
- fbdev->time_frame, curtime, delay);
- if (delay > 0) {
+ while (1) {
+ curtime = av_gettime();
+ delay = fbdev->time_frame - curtime;
+ av_dlog(avctx,
+ "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
+ fbdev->time_frame, curtime, delay);
+ if (delay <= 0) {
+ fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
+ break;
+ }
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
}
- /* compute the time of the next frame */
- fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
if ((ret = av_new_packet(pkt, fbdev->frame_size)) < 0)
return ret;
@@ -222,7 +218,6 @@ static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
pout = pkt->data;
- // TODO it'd be nice if the lines were aligned
for (i = 0; i < fbdev->height; i++) {
memcpy(pout, pin, fbdev->frame_linesize);
pin += fbdev->fixinfo.line_length;
@@ -245,7 +240,7 @@ static av_cold int fbdev_read_close(AVFormatContext *avctx)
#define OFFSET(x) offsetof(FBDevContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
- { "framerate","", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, DEC },
+ { "framerate","", OFFSET(framerate_q), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },
{ NULL },
};
diff --git a/libavdevice/iec61883.c b/libavdevice/iec61883.c
new file mode 100644
index 0000000000..a63566e9d4
--- /dev/null
+++ b/libavdevice/iec61883.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2012 Georg Lippitsch <georg.lippitsch@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libiec61883 interface
+ */
+
+#include <sys/poll.h>
+#include <libraw1394/raw1394.h>
+#include <libavc1394/avc1394.h>
+#include <libavc1394/rom1394.h>
+#include <libiec61883/iec61883.h>
+#include "libavformat/dv.h"
+#include "libavformat/mpegts.h"
+#include "libavutil/opt.h"
+#include "avdevice.h"
+
+#define THREADS HAVE_PTHREADS
+
+#if THREADS
+#include <pthread.h>
+#endif
+
+#define MOTDCT_SPEC_ID 0x00005068
+#define IEC61883_AUTO 0
+#define IEC61883_DV 1
+#define IEC61883_HDV 2
+
+/**
+ * For DV, one packet corresponds exactly to one frame.
+ * For HDV, these are MPEG2 transport stream packets.
+ * The queue is implemented as linked list.
+ */
+typedef struct DVPacket {
+ uint8_t *buf; ///< actual buffer data
+ int len; ///< size of buffer allocated
+ struct DVPacket *next; ///< next DVPacket
+} DVPacket;
+
+struct iec61883_data {
+ AVClass *class;
+ raw1394handle_t raw1394; ///< handle for libraw1394
+ iec61883_dv_fb_t iec61883_dv; ///< handle for libiec61883 when used with DV
+ iec61883_mpeg2_t iec61883_mpeg2; ///< handle for libiec61883 when used with HDV
+
+ DVDemuxContext *dv_demux; ///< generic DV muxing/demuxing context
+ MpegTSContext *mpeg_demux; ///< generic HDV muxing/demuxing context
+
+ DVPacket *queue_first; ///< first element of packet queue
+ DVPacket *queue_last; ///< last element of packet queue
+
+ char *device_guid; ///< to select one of multiple DV devices
+
+ int packets; ///< Number of packets queued
+ int max_packets; ///< Max. number of packets in queue
+
+ int bandwidth; ///< returned by libiec61883
+ int channel; ///< returned by libiec61883
+ int input_port; ///< returned by libiec61883
+ int type; ///< Stream type, to distinguish DV/HDV
+ int node; ///< returned by libiec61883
+ int output_port; ///< returned by libiec61883
+ int thread_loop; ///< Condition for thread while-loop
+ int receiving; ///< True as soon data from device available
+ int receive_error; ///< Set in receive task in case of error
+ int eof; ///< True as soon as no more data available
+
+ struct pollfd raw1394_poll; ///< to poll for new data from libraw1394
+
+ /** Parse function for DV/HDV differs, so this is set before packets arrive */
+ int (*parse_queue)(struct iec61883_data *dv, AVPacket *pkt);
+
+#if THREADS
+ pthread_t receive_task_thread;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+#endif
+};
+
+static int iec61883_callback(unsigned char *data, int length,
+ int complete, void *callback_data)
+{
+ struct iec61883_data *dv = callback_data;
+ DVPacket *packet;
+ int ret;
+
+#ifdef THREADS
+ pthread_mutex_lock(&dv->mutex);
+#endif
+
+ if (dv->packets >= dv->max_packets) {
+ av_log(NULL, AV_LOG_ERROR, "DV packet queue overrun, dropping.\n");
+ ret = 0;
+ goto exit;
+ }
+
+ packet = av_mallocz(sizeof(*packet));
+ if (!packet) {
+ ret = -1;
+ goto exit;
+ }
+
+ packet->buf = av_malloc(length);
+ if (!packet->buf) {
+ ret = -1;
+ goto exit;
+ }
+ packet->len = length;
+
+ memcpy(packet->buf, data, length);
+
+ if (dv->queue_first) {
+ dv->queue_last->next = packet;
+ dv->queue_last = packet;
+ } else {
+ dv->queue_first = packet;
+ dv->queue_last = packet;
+ }
+ dv->packets++;
+
+ ret = 0;
+
+exit:
+#ifdef THREADS
+ pthread_cond_broadcast(&dv->cond);
+ pthread_mutex_unlock(&dv->mutex);
+#endif
+ return ret;
+}
+
+static void *iec61883_receive_task(void *opaque)
+{
+ struct iec61883_data *dv = (struct iec61883_data *)opaque;
+ int result;
+
+#ifdef THREADS
+ while (dv->thread_loop)
+#endif
+ {
+ while ((result = poll(&dv->raw1394_poll, 1, 200)) < 0) {
+ if (!(errno == EAGAIN || errno == EINTR)) {
+ av_log(NULL, AV_LOG_ERROR, "Raw1394 poll error occurred.\n");
+ dv->receive_error = AVERROR(EIO);
+ return NULL;
+ }
+ }
+ if (result > 0 && ((dv->raw1394_poll.revents & POLLIN)
+ || (dv->raw1394_poll.revents & POLLPRI))) {
+ dv->receiving = 1;
+ raw1394_loop_iterate(dv->raw1394);
+ } else if (dv->receiving) {
+ av_log(NULL, AV_LOG_ERROR, "No more input data available\n");
+#ifdef THREADS
+ pthread_mutex_lock(&dv->mutex);
+ dv->eof = 1;
+ pthread_cond_broadcast(&dv->cond);
+ pthread_mutex_unlock(&dv->mutex);
+#else
+ dv->eof = 1;
+#endif
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+static int iec61883_parse_queue_dv(struct iec61883_data *dv, AVPacket *pkt)
+{
+ DVPacket *packet;
+ int size;
+
+ size = avpriv_dv_get_packet(dv->dv_demux, pkt);
+ if (size > 0)
+ return size;
+
+ packet = dv->queue_first;
+ if (!packet)
+ return -1;
+
+ size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
+ packet->buf, packet->len, -1);
+ pkt->destruct = av_destruct_packet;
+ dv->queue_first = packet->next;
+ av_free(packet);
+ dv->packets--;
+
+ if (size > 0)
+ return size;
+
+ return -1;
+}
+
+static int iec61883_parse_queue_hdv(struct iec61883_data *dv, AVPacket *pkt)
+{
+ DVPacket *packet;
+ int size;
+
+ while (dv->queue_first) {
+ packet = dv->queue_first;
+ size = ff_mpegts_parse_packet(dv->mpeg_demux, pkt, packet->buf,
+ packet->len);
+ dv->queue_first = packet->next;
+ av_free(packet->buf);
+ av_free(packet);
+ dv->packets--;
+
+ if (size > 0)
+ return size;
+ }
+
+ return -1;
+}
+
+static int iec61883_read_header(AVFormatContext *context)
+{
+ struct iec61883_data *dv = context->priv_data;
+ struct raw1394_portinfo pinf[16];
+ rom1394_directory rom_dir;
+ char *endptr;
+ int inport;
+ int nb_ports;
+ int port = -1;
+ int response;
+ int i, j = 0;
+ uint64_t guid = 0;
+
+ dv->input_port = -1;
+ dv->output_port = -1;
+ dv->channel = -1;
+
+ dv->raw1394 = raw1394_new_handle();
+
+ if (!dv->raw1394) {
+ av_log(context, AV_LOG_ERROR, "Failed to open IEEE1394 interface.\n");
+ return AVERROR(EIO);
+ }
+
+ if ((nb_ports = raw1394_get_port_info(dv->raw1394, pinf, 16)) < 0) {
+ av_log(context, AV_LOG_ERROR, "Failed to get number of IEEE1394 ports.\n");
+ goto fail;
+ }
+
+ inport = strtol(context->filename, &endptr, 10);
+ if (endptr != context->filename && *endptr == '\0') {
+ av_log(context, AV_LOG_INFO, "Selecting IEEE1394 port: %d\n", inport);
+ j = inport;
+ nb_ports = inport + 1;
+ } else if (strcmp(context->filename, "auto")) {
+ av_log(context, AV_LOG_ERROR, "Invalid input \"%s\", you should specify "
+ "\"auto\" for auto-detection, or the port number.\n", context->filename);
+ goto fail;
+ }
+
+ if (dv->device_guid) {
+ if (sscanf(dv->device_guid, "%llx", (long long unsigned int *)&guid) != 1) {
+ av_log(context, AV_LOG_INFO, "Invalid dvguid parameter: %s\n",
+ dv->device_guid);
+ goto fail;
+ }
+ }
+
+ for (; j < nb_ports && port==-1; ++j) {
+ raw1394_destroy_handle(dv->raw1394);
+
+ if (!(dv->raw1394 = raw1394_new_handle_on_port(j))) {
+ av_log(context, AV_LOG_ERROR, "Failed setting IEEE1394 port.\n");
+ goto fail;
+ }
+
+ for (i=0; i<raw1394_get_nodecount(dv->raw1394); ++i) {
+
+ /* Select device explicitly by GUID */
+
+ if (guid > 1) {
+ if (guid == rom1394_get_guid(dv->raw1394, i)) {
+ dv->node = i;
+ port = j;
+ break;
+ }
+ } else {
+
+ /* Select first AV/C tape recorder player node */
+
+ if (rom1394_get_directory(dv->raw1394, i, &rom_dir) < 0)
+ continue;
+ if (((rom1394_get_node_type(&rom_dir) == ROM1394_NODE_TYPE_AVC) &&
+ avc1394_check_subunit_type(dv->raw1394, i, AVC1394_SUBUNIT_TYPE_VCR)) ||
+ (rom_dir.unit_spec_id == MOTDCT_SPEC_ID)) {
+ rom1394_free_directory(&rom_dir);
+ dv->node = i;
+ port = j;
+ break;
+ }
+ rom1394_free_directory(&rom_dir);
+ }
+ }
+ }
+
+ if (port == -1) {
+ av_log(context, AV_LOG_ERROR, "No AV/C devices found.\n");
+ goto fail;
+ }
+
+ /* Provide bus sanity for multiple connections */
+
+ iec61883_cmp_normalize_output(dv->raw1394, 0xffc0 | dv->node);
+
+ /* Find out if device is DV or HDV */
+
+ if (dv->type == IEC61883_AUTO) {
+ response = avc1394_transaction(dv->raw1394, dv->node,
+ AVC1394_CTYPE_STATUS |
+ AVC1394_SUBUNIT_TYPE_TAPE_RECORDER |
+ AVC1394_SUBUNIT_ID_0 |
+ AVC1394_VCR_COMMAND_OUTPUT_SIGNAL_MODE |
+ 0xFF, 2);
+ response = AVC1394_GET_OPERAND0(response);
+ dv->type = (response == 0x10 || response == 0x90 || response == 0x1A || response == 0x9A) ?
+ IEC61883_HDV : IEC61883_DV;
+ }
+
+ /* Connect to device, and do initialization */
+
+ dv->channel = iec61883_cmp_connect(dv->raw1394, dv->node, &dv->output_port,
+ raw1394_get_local_id(dv->raw1394),
+ &dv->input_port, &dv->bandwidth);
+
+ if (dv->channel < 0)
+ dv->channel = 63;
+
+ if (!dv->max_packets)
+ dv->max_packets = 100;
+
+ if (dv->type == IEC61883_HDV) {
+
+ /* Init HDV receive */
+
+ avformat_new_stream(context, NULL);
+
+ dv->mpeg_demux = ff_mpegts_parse_open(context);
+ if (!dv->mpeg_demux)
+ goto fail;
+
+ dv->parse_queue = iec61883_parse_queue_hdv;
+
+ dv->iec61883_mpeg2 = iec61883_mpeg2_recv_init(dv->raw1394,
+ (iec61883_mpeg2_recv_t)iec61883_callback,
+ dv);
+
+ dv->max_packets *= 766;
+ } else {
+
+ /* Init DV receive */
+
+ dv->dv_demux = avpriv_dv_init_demux(context);
+ if (!dv->dv_demux)
+ goto fail;
+
+ dv->parse_queue = iec61883_parse_queue_dv;
+
+ dv->iec61883_dv = iec61883_dv_fb_init(dv->raw1394, iec61883_callback, dv);
+ }
+
+ dv->raw1394_poll.fd = raw1394_get_fd(dv->raw1394);
+ dv->raw1394_poll.events = POLLIN | POLLERR | POLLHUP | POLLPRI;
+
+ /* Actually start receiving */
+
+ if (dv->type == IEC61883_HDV)
+ iec61883_mpeg2_recv_start(dv->iec61883_mpeg2, dv->channel);
+ else
+ iec61883_dv_fb_start(dv->iec61883_dv, dv->channel);
+
+#if THREADS
+ dv->thread_loop = 1;
+ pthread_mutex_init(&dv->mutex, NULL);
+ pthread_cond_init(&dv->cond, NULL);
+ pthread_create(&dv->receive_task_thread, NULL, iec61883_receive_task, dv);
+#endif
+
+ return 0;
+
+fail:
+ raw1394_destroy_handle(dv->raw1394);
+ return AVERROR(EIO);
+}
+
+static int iec61883_read_packet(AVFormatContext *context, AVPacket *pkt)
+{
+ struct iec61883_data *dv = context->priv_data;
+ int size;
+
+ /**
+ * Try to parse frames from queue
+ */
+
+#ifdef THREADS
+ pthread_mutex_lock(&dv->mutex);
+ while ((size = dv->parse_queue(dv, pkt)) == -1)
+ if (!dv->eof)
+ pthread_cond_wait(&dv->cond, &dv->mutex);
+ else
+ break;
+ pthread_mutex_unlock(&dv->mutex);
+#else
+ int result;
+ while ((size = dv->parse_queue(dv, pkt)) == -1) {
+ iec61883_receive_task((void *)dv);
+ if (dv->receive_error)
+ return dv->receive_error;
+ }
+#endif
+
+ return size;
+}
+
+static int iec61883_close(AVFormatContext *context)
+{
+ struct iec61883_data *dv = context->priv_data;
+
+#if THREADS
+ dv->thread_loop = 0;
+ pthread_join(dv->receive_task_thread, NULL);
+ pthread_cond_destroy(&dv->cond);
+ pthread_mutex_destroy(&dv->mutex);
+#endif
+
+ if (dv->type == IEC61883_HDV) {
+ iec61883_mpeg2_recv_stop(dv->iec61883_mpeg2);
+ iec61883_mpeg2_close(dv->iec61883_mpeg2);
+ ff_mpegts_parse_close(dv->mpeg_demux);
+ } else {
+ iec61883_dv_fb_stop(dv->iec61883_dv);
+ iec61883_dv_fb_close(dv->iec61883_dv);
+ }
+ while (dv->queue_first) {
+ DVPacket *packet = dv->queue_first;
+ dv->queue_first = packet->next;
+ av_free(packet->buf);
+ av_free(packet);
+ }
+
+ iec61883_cmp_disconnect(dv->raw1394, dv->node, dv->output_port,
+ raw1394_get_local_id(dv->raw1394),
+ dv->input_port, dv->channel, dv->bandwidth);
+
+ raw1394_destroy_handle(dv->raw1394);
+
+ return 0;
+}
+
+static const AVOption options[] = {
+ { "dvtype", "override autodetection of DV/HDV", offsetof(struct iec61883_data, type), AV_OPT_TYPE_INT, {.i64 = IEC61883_AUTO}, IEC61883_AUTO, IEC61883_HDV, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "auto", "auto detect DV/HDV", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_AUTO}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "dv", "force device being treated as DV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_DV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "hdv" , "force device being treated as HDV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_HDV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
+ { "dvbuffer", "set queue buffer size (in packets)", offsetof(struct iec61883_data, max_packets), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
+ { "dvguid", "select one of multiple DV devices by its GUID", offsetof(struct iec61883_data, device_guid), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
+ { NULL },
+};
+
+static const AVClass iec61883_class = {
+ .class_name = "iec61883 indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVInputFormat ff_iec61883_demuxer = {
+ .name = "iec61883",
+ .long_name = NULL_IF_CONFIG_SMALL("libiec61883 (new DV1394) A/V input device"),
+ .priv_data_size = sizeof(struct iec61883_data),
+ .read_header = iec61883_read_header,
+ .read_packet = iec61883_read_packet,
+ .read_close = iec61883_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &iec61883_class,
+};
diff --git a/libavdevice/jack_audio.c b/libavdevice/jack_audio.c
index 280f24dc48..bd6a770dd0 100644
--- a/libavdevice/jack_audio.c
+++ b/libavdevice/jack_audio.c
@@ -3,20 +3,20 @@
* Copyright (c) 2009 Samalyse
* Author: Olivier Guilyardi <olivier samalyse com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,6 +32,7 @@
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "timefilter.h"
+#include "avdevice.h"
/**
* Size of the internal FIFO buffers as a number of audio packets
@@ -151,7 +152,6 @@ static int start_jack(AVFormatContext *context)
JackData *self = context->priv_data;
jack_status_t status;
int i, test;
- double o, period;
/* Register as a JACK client, using the context filename as client name. */
self->client = jack_client_open(context->filename, JackNullOption, &status);
@@ -187,9 +187,7 @@ static int start_jack(AVFormatContext *context)
jack_set_xrun_callback(self->client, xrun_callback, self);
/* Create time filter */
- period = (double) self->buffer_size / self->sample_rate;
- o = 2 * M_PI * 1.5 * period; /// bandwidth: 1.5Hz
- self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, sqrt(2 * o), o * o);
+ self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, self->buffer_size, 1.5);
/* Create FIFO buffers */
self->filled_pkts = av_fifo_alloc(FIFO_PACKETS_NUM * sizeof(AVPacket));
diff --git a/libavdevice/lavfi.c b/libavdevice/lavfi.c
new file mode 100644
index 0000000000..559f721c8f
--- /dev/null
+++ b/libavdevice/lavfi.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libavfilter virtual input device
+ */
+
+/* #define DEBUG */
+
+#include <float.h> /* DBL_MIN, DBL_MAX */
+
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/file.h"
+#include "libavutil/log.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavfilter/avfilter.h"
+#include "libavfilter/avfiltergraph.h"
+#include "libavfilter/buffersink.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class; ///< class for private options
+ char *graph_str;
+ char *graph_filename;
+ char *dump_graph;
+ AVFilterGraph *graph;
+ AVFilterContext **sinks;
+ int *sink_stream_map;
+ int *sink_eof;
+ int *stream_sink_map;
+ AVFrame *decoded_frame;
+} LavfiContext;
+
+static int *create_all_formats(int n)
+{
+ int i, j, *fmts, count = 0;
+
+ for (i = 0; i < n; i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ count++;
+ }
+
+ if (!(fmts = av_malloc((count+1) * sizeof(int))))
+ return NULL;
+ for (j = 0, i = 0; i < n; i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ fmts[j++] = i;
+ }
+ fmts[j] = -1;
+ return fmts;
+}
+
+av_cold static int lavfi_read_close(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+
+ av_freep(&lavfi->sink_stream_map);
+ av_freep(&lavfi->sink_eof);
+ av_freep(&lavfi->stream_sink_map);
+ av_freep(&lavfi->sinks);
+ avfilter_graph_free(&lavfi->graph);
+ av_frame_free(&lavfi->decoded_frame);
+
+ return 0;
+}
+
+av_cold static int lavfi_read_header(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
+ AVFilter *buffersink, *abuffersink;
+ int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
+ enum AVMediaType type;
+ int ret = 0, i, n;
+
+#define FAIL(ERR) { ret = ERR; goto end; }
+
+ if (!pix_fmts)
+ FAIL(AVERROR(ENOMEM));
+
+ avfilter_register_all();
+
+ buffersink = avfilter_get_by_name("buffersink");
+ abuffersink = avfilter_get_by_name("abuffersink");
+
+ if (lavfi->graph_filename && lavfi->graph_str) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Only one of the graph or graph_file options must be specified\n");
+ FAIL(AVERROR(EINVAL));
+ }
+
+ if (lavfi->graph_filename) {
+ uint8_t *file_buf, *graph_buf;
+ size_t file_bufsize;
+ ret = av_file_map(lavfi->graph_filename,
+ &file_buf, &file_bufsize, 0, avctx);
+ if (ret < 0)
+ goto end;
+
+ /* create a 0-terminated string based on the read file */
+ graph_buf = av_malloc(file_bufsize + 1);
+ if (!graph_buf) {
+ av_file_unmap(file_buf, file_bufsize);
+ FAIL(AVERROR(ENOMEM));
+ }
+ memcpy(graph_buf, file_buf, file_bufsize);
+ graph_buf[file_bufsize] = 0;
+ av_file_unmap(file_buf, file_bufsize);
+ lavfi->graph_str = graph_buf;
+ }
+
+ if (!lavfi->graph_str)
+ lavfi->graph_str = av_strdup(avctx->filename);
+
+ /* parse the graph, create a stream for each open output */
+ if (!(lavfi->graph = avfilter_graph_alloc()))
+ FAIL(AVERROR(ENOMEM));
+
+ if ((ret = avfilter_graph_parse_ptr(lavfi->graph, lavfi->graph_str,
+ &input_links, &output_links, avctx)) < 0)
+ FAIL(ret);
+
+ if (input_links) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Open inputs in the filtergraph are not acceptable\n");
+ FAIL(AVERROR(EINVAL));
+ }
+
+ /* count the outputs */
+ for (n = 0, inout = output_links; inout; n++, inout = inout->next);
+
+ if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->sink_eof = av_mallocz(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+
+ for (i = 0; i < n; i++)
+ lavfi->stream_sink_map[i] = -1;
+
+ /* parse the output link names - they need to be of the form out0, out1, ...
+ * create a mapping between them and the streams */
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ int stream_idx;
+ if (!strcmp(inout->name, "out"))
+ stream_idx = 0;
+ else if (sscanf(inout->name, "out%d\n", &stream_idx) != 1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid outpad name '%s'\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+
+ if ((unsigned)stream_idx >= n) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid index was specified in output '%s', "
+ "must be a non-negative value < %d\n",
+ inout->name, n);
+ FAIL(AVERROR(EINVAL));
+ }
+
+ /* is an audio or video output? */
+ type = inout->filter_ctx->output_pads[inout->pad_idx].type;
+ if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Output '%s' is not a video or audio output, not yet supported\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+
+ if (lavfi->stream_sink_map[stream_idx] != -1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "An output with stream index %d was already specified\n",
+ stream_idx);
+ FAIL(AVERROR(EINVAL));
+ }
+ lavfi->sink_stream_map[i] = stream_idx;
+ lavfi->stream_sink_map[stream_idx] = i;
+ }
+
+ /* for each open output create a corresponding stream */
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ AVStream *st;
+ if (!(st = avformat_new_stream(avctx, NULL)))
+ FAIL(AVERROR(ENOMEM));
+ st->id = i;
+ }
+
+ /* create a sink for each output and connect them to the graph */
+ lavfi->sinks = av_malloc(sizeof(AVFilterContext *) * avctx->nb_streams);
+ if (!lavfi->sinks)
+ FAIL(AVERROR(ENOMEM));
+
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ AVFilterContext *sink;
+
+ type = inout->filter_ctx->output_pads[inout->pad_idx].type;
+
+ if (type == AVMEDIA_TYPE_VIDEO && ! buffersink ||
+ type == AVMEDIA_TYPE_AUDIO && ! abuffersink) {
+ av_log(avctx, AV_LOG_ERROR, "Missing required buffersink filter, aborting.\n");
+ FAIL(AVERROR_FILTER_NOT_FOUND);
+ }
+
+ if (type == AVMEDIA_TYPE_VIDEO) {
+ ret = avfilter_graph_create_filter(&sink, buffersink,
+ inout->name, NULL,
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ } else if (type == AVMEDIA_TYPE_AUDIO) {
+ enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_DBL, -1 };
+
+ ret = avfilter_graph_create_filter(&sink, abuffersink,
+ inout->name, NULL,
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ }
+
+ lavfi->sinks[i] = sink;
+ if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
+ FAIL(ret);
+ }
+
+ /* configure the graph */
+ if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
+ FAIL(ret);
+
+ if (lavfi->dump_graph) {
+ char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
+ fputs(dump, stderr);
+ fflush(stderr);
+ av_free(dump);
+ }
+
+ /* fill each stream with the information in the corresponding sink */
+ for (i = 0; i < avctx->nb_streams; i++) {
+ AVFilterLink *link = lavfi->sinks[lavfi->stream_sink_map[i]]->inputs[0];
+ AVStream *st = avctx->streams[i];
+ st->codec->codec_type = link->type;
+ avpriv_set_pts_info(st, 64, link->time_base.num, link->time_base.den);
+ if (link->type == AVMEDIA_TYPE_VIDEO) {
+ st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
+ st->codec->pix_fmt = link->format;
+ st->codec->time_base = link->time_base;
+ st->codec->width = link->w;
+ st->codec->height = link->h;
+ st ->sample_aspect_ratio =
+ st->codec->sample_aspect_ratio = link->sample_aspect_ratio;
+ avctx->probesize = FFMAX(avctx->probesize,
+ link->w * link->h *
+ av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(link->format)) *
+ 30);
+ } else if (link->type == AVMEDIA_TYPE_AUDIO) {
+ st->codec->codec_id = av_get_pcm_codec(link->format, -1);
+ st->codec->channels = avfilter_link_get_channels(link);
+ st->codec->sample_fmt = link->format;
+ st->codec->sample_rate = link->sample_rate;
+ st->codec->time_base = link->time_base;
+ st->codec->channel_layout = link->channel_layout;
+ if (st->codec->codec_id == AV_CODEC_ID_NONE)
+ av_log(avctx, AV_LOG_ERROR,
+ "Could not find PCM codec for sample format %s.\n",
+ av_get_sample_fmt_name(link->format));
+ }
+ }
+
+ if (!(lavfi->decoded_frame = av_frame_alloc()))
+ FAIL(AVERROR(ENOMEM));
+
+end:
+ av_free(pix_fmts);
+ avfilter_inout_free(&input_links);
+ avfilter_inout_free(&output_links);
+ if (ret < 0)
+ lavfi_read_close(avctx);
+ return ret;
+}
+
+static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ double min_pts = DBL_MAX;
+ int stream_idx, min_pts_sink_idx = 0;
+ AVFrame *frame = lavfi->decoded_frame;
+ AVPicture pict;
+ AVDictionary *frame_metadata;
+ int ret, i;
+ int size = 0;
+
+ /* iterate through all the graph sinks. Select the sink with the
+ * minimum PTS */
+ for (i = 0; i < avctx->nb_streams; i++) {
+ AVRational tb = lavfi->sinks[i]->inputs[0]->time_base;
+ double d;
+ int ret;
+
+ if (lavfi->sink_eof[i])
+ continue;
+
+ ret = av_buffersink_get_frame_flags(lavfi->sinks[i], frame,
+ AV_BUFFERSINK_FLAG_PEEK);
+ if (ret == AVERROR_EOF) {
+ av_dlog(avctx, "EOF sink_idx:%d\n", i);
+ lavfi->sink_eof[i] = 1;
+ continue;
+ } else if (ret < 0)
+ return ret;
+ d = av_rescale_q(frame->pts, tb, AV_TIME_BASE_Q);
+ av_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
+ av_frame_unref(frame);
+
+ if (d < min_pts) {
+ min_pts = d;
+ min_pts_sink_idx = i;
+ }
+ }
+ if (min_pts == DBL_MAX)
+ return AVERROR_EOF;
+
+ av_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
+
+ av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0);
+ stream_idx = lavfi->sink_stream_map[min_pts_sink_idx];
+
+ if (frame->width /* FIXME best way of testing a video */) {
+ size = avpicture_get_size(frame->format, frame->width, frame->height);
+ if ((ret = av_new_packet(pkt, size)) < 0)
+ return ret;
+
+ memcpy(pict.data, frame->data, 4*sizeof(frame->data[0]));
+ memcpy(pict.linesize, frame->linesize, 4*sizeof(frame->linesize[0]));
+
+ avpicture_layout(&pict, frame->format, frame->width, frame->height,
+ pkt->data, size);
+ } else if (av_frame_get_channels(frame) /* FIXME test audio */) {
+ size = frame->nb_samples * av_get_bytes_per_sample(frame->format) *
+ av_frame_get_channels(frame);
+ if ((ret = av_new_packet(pkt, size)) < 0)
+ return ret;
+ memcpy(pkt->data, frame->data[0], size);
+ }
+
+ frame_metadata = av_frame_get_metadata(frame);
+ if (frame_metadata) {
+ uint8_t *metadata;
+ AVDictionaryEntry *e = NULL;
+ AVBPrint meta_buf;
+
+ av_bprint_init(&meta_buf, 0, AV_BPRINT_SIZE_UNLIMITED);
+ while ((e = av_dict_get(frame_metadata, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ av_bprintf(&meta_buf, "%s", e->key);
+ av_bprint_chars(&meta_buf, '\0', 1);
+ av_bprintf(&meta_buf, "%s", e->value);
+ av_bprint_chars(&meta_buf, '\0', 1);
+ }
+ if (!av_bprint_is_complete(&meta_buf) ||
+ !(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA,
+ meta_buf.len))) {
+ av_bprint_finalize(&meta_buf, NULL);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(metadata, meta_buf.str, meta_buf.len);
+ av_bprint_finalize(&meta_buf, NULL);
+ }
+
+ pkt->stream_index = stream_idx;
+ pkt->pts = frame->pts;
+ pkt->pos = av_frame_get_pkt_pos(frame);
+ pkt->size = size;
+ av_frame_unref(frame);
+ return size;
+}
+
+#define OFFSET(x) offsetof(LavfiContext, x)
+
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ { "graph", "set libavfilter graph", OFFSET(graph_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "graph_file","set libavfilter graph filename", OFFSET(graph_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC},
+ { "dumpgraph", "dump graph to stderr", OFFSET(dump_graph), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { NULL },
+};
+
+static const AVClass lavfi_class = {
+ .class_name = "lavfi indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVInputFormat ff_lavfi_demuxer = {
+ .name = "lavfi",
+ .long_name = NULL_IF_CONFIG_SMALL("Libavfilter virtual input device"),
+ .priv_data_size = sizeof(LavfiContext),
+ .read_header = lavfi_read_header,
+ .read_packet = lavfi_read_packet,
+ .read_close = lavfi_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &lavfi_class,
+};
diff --git a/libavdevice/libcdio.c b/libavdevice/libcdio.c
index 06ddb4a784..a824bc3882 100644
--- a/libavdevice/libcdio.c
+++ b/libavdevice/libcdio.c
@@ -41,7 +41,7 @@
#include "libavformat/internal.h"
typedef struct CDIOContext {
- AVClass *class;
+ const AVClass *class;
cdrom_drive_t *drive;
cdrom_paranoia_t *paranoia;
int32_t last_sector;
diff --git a/libavdevice/libdc1394.c b/libavdevice/libdc1394.c
index f030e3e31d..80cb1bee11 100644
--- a/libavdevice/libdc1394.c
+++ b/libavdevice/libdc1394.c
@@ -3,20 +3,20 @@
* Copyright (c) 2004 Roman Shaposhnik
* Copyright (c) 2008 Alessandro Sappia
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavdevice/openal-dec.c b/libavdevice/openal-dec.c
new file mode 100644
index 0000000000..93633ff7d2
--- /dev/null
+++ b/libavdevice/openal-dec.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2011 Jonathan Baldwin
+ *
+ * This file is part of FFmpeg.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * @file
+ * OpenAL 1.1 capture device for libavdevice
+ **/
+
+#include <AL/al.h>
+#include <AL/alc.h>
+
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ /** OpenAL capture device context. **/
+ ALCdevice *device;
+ /** The number of channels in the captured audio. **/
+ int channels;
+ /** The sample rate (in Hz) of the captured audio. **/
+ int sample_rate;
+ /** The sample size (in bits) of the captured audio. **/
+ int sample_size;
+ /** The OpenAL sample format of the captured audio. **/
+ ALCenum sample_format;
+ /** The number of bytes between two consecutive samples of the same channel/component. **/
+ ALCint sample_step;
+ /** If true, print a list of capture devices on this system and exit. **/
+ int list_devices;
+} al_data;
+
+typedef struct {
+ ALCenum al_fmt;
+ enum AVCodecID codec_id;
+ int channels;
+} al_format_info;
+
+#define LOWEST_AL_FORMAT FFMIN(FFMIN(AL_FORMAT_MONO8,AL_FORMAT_MONO16),FFMIN(AL_FORMAT_STEREO8,AL_FORMAT_STEREO16))
+
+/**
+ * Get information about an AL_FORMAT value.
+ * @param al_fmt the AL_FORMAT value to find information about.
+ * @return A pointer to a structure containing information about the AL_FORMAT value.
+ */
+static inline al_format_info* get_al_format_info(ALCenum al_fmt)
+{
+ static al_format_info info_table[] = {
+ [AL_FORMAT_MONO8-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO8, AV_CODEC_ID_PCM_U8, 1},
+ [AL_FORMAT_MONO16-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 1},
+ [AL_FORMAT_STEREO8-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO8, AV_CODEC_ID_PCM_U8, 2},
+ [AL_FORMAT_STEREO16-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 2},
+ };
+
+ return &info_table[al_fmt-LOWEST_AL_FORMAT];
+}
+
+/**
+ * Get the OpenAL error code, translated into an av/errno error code.
+ * @param device The ALC device to check for errors.
+ * @param error_msg_ret A pointer to a char* in which to return the error message, or NULL if desired.
+ * @return The error code, or 0 if there is no error.
+ */
+static inline int al_get_error(ALCdevice *device, const char** error_msg_ret)
+{
+ ALCenum error = alcGetError(device);
+ if (error_msg_ret)
+ *error_msg_ret = (const char*) alcGetString(device, error);
+ switch (error) {
+ case ALC_NO_ERROR:
+ return 0;
+ case ALC_INVALID_DEVICE:
+ return AVERROR(ENODEV);
+ break;
+ case ALC_INVALID_CONTEXT:
+ case ALC_INVALID_ENUM:
+ case ALC_INVALID_VALUE:
+ return AVERROR(EINVAL);
+ break;
+ case ALC_OUT_OF_MEMORY:
+ return AVERROR(ENOMEM);
+ break;
+ default:
+ return AVERROR(EIO);
+ }
+}
+
+/**
+ * Print out a list of OpenAL capture devices on this system.
+ */
+static inline void print_al_capture_devices(void *log_ctx)
+{
+ const char *devices;
+
+ if (!(devices = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER)))
+ return;
+
+ av_log(log_ctx, AV_LOG_INFO, "List of OpenAL capture devices on this system:\n");
+
+ for (; *devices != '\0'; devices += strlen(devices) + 1)
+ av_log(log_ctx, AV_LOG_INFO, " %s\n", devices);
+}
+
+static int read_header(AVFormatContext *ctx)
+{
+ al_data *ad = ctx->priv_data;
+ static const ALCenum sample_formats[2][2] = {
+ { AL_FORMAT_MONO8, AL_FORMAT_STEREO8 },
+ { AL_FORMAT_MONO16, AL_FORMAT_STEREO16 }
+ };
+ int error = 0;
+ const char *error_msg;
+ AVStream *st = NULL;
+ AVCodecContext *codec = NULL;
+
+ if (ad->list_devices) {
+ print_al_capture_devices(ctx);
+ return AVERROR_EXIT;
+ }
+
+ ad->sample_format = sample_formats[ad->sample_size/8-1][ad->channels-1];
+
+ /* Open device for capture */
+ ad->device =
+ alcCaptureOpenDevice(ctx->filename[0] ? ctx->filename : NULL,
+ ad->sample_rate,
+ ad->sample_format,
+ ad->sample_rate); /* Maximum 1 second of sample data to be read at once */
+
+ if (error = al_get_error(ad->device, &error_msg)) goto fail;
+
+ /* Create stream */
+ if (!(st = avformat_new_stream(ctx, NULL))) {
+ error = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ /* We work in microseconds */
+ avpriv_set_pts_info(st, 64, 1, 1000000);
+
+ /* Set codec parameters */
+ codec = st->codec;
+ codec->codec_type = AVMEDIA_TYPE_AUDIO;
+ codec->sample_rate = ad->sample_rate;
+ codec->channels = get_al_format_info(ad->sample_format)->channels;
+ codec->codec_id = get_al_format_info(ad->sample_format)->codec_id;
+
+ /* This is needed to read the audio data */
+ ad->sample_step = (av_get_bits_per_sample(get_al_format_info(ad->sample_format)->codec_id) *
+ get_al_format_info(ad->sample_format)->channels) / 8;
+
+ /* Finally, start the capture process */
+ alcCaptureStart(ad->device);
+
+ return 0;
+
+fail:
+ /* Handle failure */
+ if (ad->device)
+ alcCaptureCloseDevice(ad->device);
+ if (error_msg)
+ av_log(ctx, AV_LOG_ERROR, "Cannot open device: %s\n", error_msg);
+ return error;
+}
+
+static int read_packet(AVFormatContext* ctx, AVPacket *pkt)
+{
+ al_data *ad = ctx->priv_data;
+ int error=0;
+ const char *error_msg;
+ ALCint nb_samples;
+
+ /* Get number of samples available */
+ alcGetIntegerv(ad->device, ALC_CAPTURE_SAMPLES, (ALCsizei) sizeof(ALCint), &nb_samples);
+ if (error = al_get_error(ad->device, &error_msg)) goto fail;
+
+ /* Create a packet of appropriate size */
+ av_new_packet(pkt, nb_samples*ad->sample_step);
+ pkt->pts = av_gettime();
+
+ /* Fill the packet with the available samples */
+ alcCaptureSamples(ad->device, pkt->data, nb_samples);
+ if (error = al_get_error(ad->device, &error_msg)) goto fail;
+
+ return pkt->size;
+fail:
+ /* Handle failure */
+ if (pkt->data)
+ av_destruct_packet(pkt);
+ if (error_msg)
+ av_log(ctx, AV_LOG_ERROR, "Error: %s\n", error_msg);
+ return error;
+}
+
+static int read_close(AVFormatContext* ctx)
+{
+ al_data *ad = ctx->priv_data;
+
+ if (ad->device) {
+ alcCaptureStop(ad->device);
+ alcCaptureCloseDevice(ad->device);
+ }
+ return 0;
+}
+
+#define OFFSET(x) offsetof(al_data, x)
+
+static const AVOption options[] = {
+ {"channels", "set number of channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AV_OPT_FLAG_DECODING_PARAM },
+ {"sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, 192000, AV_OPT_FLAG_DECODING_PARAM },
+ {"sample_size", "set sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64=16}, 8, 16, AV_OPT_FLAG_DECODING_PARAM },
+ {"list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ {"true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ {"false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
+ {NULL},
+};
+
+static const AVClass class = {
+ .class_name = "openal",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT
+};
+
+AVInputFormat ff_openal_demuxer = {
+ .name = "openal",
+ .long_name = NULL_IF_CONFIG_SMALL("OpenAL audio capture device"),
+ .priv_data_size = sizeof(al_data),
+ .read_probe = NULL,
+ .read_header = read_header,
+ .read_packet = read_packet,
+ .read_close = read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &class
+};
diff --git a/libavdevice/oss_audio.c b/libavdevice/oss_audio.c
index f1cc91f725..916908c425 100644
--- a/libavdevice/oss_audio.c
+++ b/libavdevice/oss_audio.c
@@ -2,20 +2,20 @@
* Linux audio play and grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -39,7 +39,7 @@
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavcodec/avcodec.h"
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "libavformat/internal.h"
#define AUDIO_BLOCK_SIZE 4096
@@ -77,8 +77,11 @@ static int audio_open(AVFormatContext *s1, int is_output, const char *audio_devi
}
/* non blocking mode */
- if (!is_output)
- fcntl(audio_fd, F_SETFL, O_NONBLOCK);
+ if (!is_output) {
+ if (fcntl(audio_fd, F_SETFL, O_NONBLOCK) < 0) {
+ av_log(s1, AV_LOG_WARNING, "%s: Could not enable non block mode (%s)\n", audio_device, strerror(errno));
+ }
+ }
s->frame_size = AUDIO_BLOCK_SIZE;
diff --git a/libavdevice/pulse.c b/libavdevice/pulse.c
index a8e710d279..86fdc22837 100644
--- a/libavdevice/pulse.c
+++ b/libavdevice/pulse.c
@@ -162,7 +162,7 @@ static av_cold int pulse_close(AVFormatContext *s)
static const AVOption options[] = {
{ "server", "pulse server name", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
- { "name", "application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = "libav"}, 0, 0, D },
+ { "name", "application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, D },
{ "stream_name", "stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
{ "sample_rate", "sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
{ "channels", "number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
diff --git a/libavdevice/sdl.c b/libavdevice/sdl.c
new file mode 100644
index 0000000000..e708dfdf20
--- /dev/null
+++ b/libavdevice/sdl.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libSDL output device
+ */
+
+#include <SDL.h>
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ SDL_Surface *surface;
+ SDL_Overlay *overlay;
+ char *window_title;
+ char *icon_title;
+ int window_width, window_height; /**< size of the window */
+ int window_fullscreen;
+ int overlay_width, overlay_height; /**< size of the video in the window */
+ int overlay_x, overlay_y;
+ int overlay_fmt;
+ int sdl_was_already_inited;
+} SDLContext;
+
+static const struct sdl_overlay_pix_fmt_entry {
+ enum AVPixelFormat pix_fmt; int overlay_fmt;
+} sdl_overlay_pix_fmt_map[] = {
+ { AV_PIX_FMT_YUV420P, SDL_IYUV_OVERLAY },
+ { AV_PIX_FMT_YUYV422, SDL_YUY2_OVERLAY },
+ { AV_PIX_FMT_UYVY422, SDL_UYVY_OVERLAY },
+ { AV_PIX_FMT_NONE, 0 },
+};
+
+static int sdl_write_trailer(AVFormatContext *s)
+{
+ SDLContext *sdl = s->priv_data;
+
+ av_freep(&sdl->window_title);
+ av_freep(&sdl->icon_title);
+
+ if (sdl->overlay) {
+ SDL_FreeYUVOverlay(sdl->overlay);
+ sdl->overlay = NULL;
+ }
+ if (!sdl->sdl_was_already_inited)
+ SDL_Quit();
+
+ return 0;
+}
+
+static int sdl_write_header(AVFormatContext *s)
+{
+ SDLContext *sdl = s->priv_data;
+ AVStream *st = s->streams[0];
+ AVCodecContext *encctx = st->codec;
+ AVRational sar, dar; /* sample and display aspect ratios */
+ int i, ret;
+ int flags = SDL_SWSURFACE | sdl->window_fullscreen ? SDL_FULLSCREEN : 0;
+
+ if (!sdl->window_title)
+ sdl->window_title = av_strdup(s->filename);
+ if (!sdl->icon_title)
+ sdl->icon_title = av_strdup(sdl->window_title);
+
+ if (SDL_WasInit(SDL_INIT_VIDEO)) {
+ av_log(s, AV_LOG_ERROR,
+ "SDL video subsystem was already inited, aborting\n");
+ sdl->sdl_was_already_inited = 1;
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if (SDL_Init(SDL_INIT_VIDEO) != 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if ( s->nb_streams > 1
+ || encctx->codec_type != AVMEDIA_TYPE_VIDEO
+ || encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ for (i = 0; sdl_overlay_pix_fmt_map[i].pix_fmt != AV_PIX_FMT_NONE; i++) {
+ if (sdl_overlay_pix_fmt_map[i].pix_fmt == encctx->pix_fmt) {
+ sdl->overlay_fmt = sdl_overlay_pix_fmt_map[i].overlay_fmt;
+ break;
+ }
+ }
+
+ if (!sdl->overlay_fmt) {
+ av_log(s, AV_LOG_ERROR,
+ "Unsupported pixel format '%s', choose one of yuv420p, yuyv422, or uyvy422\n",
+ av_get_pix_fmt_name(encctx->pix_fmt));
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ /* compute overlay width and height from the codec context information */
+ sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
+ dar = av_mul_q(sar, (AVRational){ encctx->width, encctx->height });
+
+ /* we suppose the screen has a 1/1 sample aspect ratio */
+ if (sdl->window_width && sdl->window_height) {
+ /* fit in the window */
+ if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) {
+ /* fit in width */
+ sdl->overlay_width = sdl->window_width;
+ sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num);
+ } else {
+ /* fit in height */
+ sdl->overlay_height = sdl->window_height;
+ sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den);
+ }
+ } else {
+ if (sar.num > sar.den) {
+ sdl->overlay_width = encctx->width;
+ sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num);
+ } else {
+ sdl->overlay_height = encctx->height;
+ sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den);
+ }
+ sdl->window_width = sdl->overlay_width;
+ sdl->window_height = sdl->overlay_height;
+ }
+ sdl->overlay_x = (sdl->window_width - sdl->overlay_width ) / 2;
+ sdl->overlay_y = (sdl->window_height - sdl->overlay_height) / 2;
+
+ SDL_WM_SetCaption(sdl->window_title, sdl->icon_title);
+ sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height,
+ 24, flags);
+ if (!sdl->surface) {
+ av_log(s, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError());
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ sdl->overlay = SDL_CreateYUVOverlay(encctx->width, encctx->height,
+ sdl->overlay_fmt, sdl->surface);
+ if (!sdl->overlay || sdl->overlay->pitches[0] < encctx->width) {
+ av_log(s, AV_LOG_ERROR,
+ "SDL does not support an overlay with size of %dx%d pixels\n",
+ encctx->width, encctx->height);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d\n",
+ encctx->width, encctx->height, av_get_pix_fmt_name(encctx->pix_fmt), sar.num, sar.den,
+ sdl->overlay_width, sdl->overlay_height);
+ return 0;
+
+fail:
+ sdl_write_trailer(s);
+ return ret;
+}
+
+static int sdl_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ SDLContext *sdl = s->priv_data;
+ AVCodecContext *encctx = s->streams[0]->codec;
+ SDL_Rect rect = { sdl->overlay_x, sdl->overlay_y, sdl->overlay_width, sdl->overlay_height };
+ AVPicture pict;
+ int i;
+
+ avpicture_fill(&pict, pkt->data, encctx->pix_fmt, encctx->width, encctx->height);
+
+ SDL_FillRect(sdl->surface, &sdl->surface->clip_rect,
+ SDL_MapRGB(sdl->surface->format, 0, 0, 0));
+ SDL_LockYUVOverlay(sdl->overlay);
+ for (i = 0; i < 3; i++) {
+ sdl->overlay->pixels [i] = pict.data [i];
+ sdl->overlay->pitches[i] = pict.linesize[i];
+ }
+ SDL_DisplayYUVOverlay(sdl->overlay, &rect);
+ SDL_UnlockYUVOverlay(sdl->overlay);
+
+ SDL_UpdateRect(sdl->surface, rect.x, rect.y, rect.w, rect.h);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(SDLContext,x)
+
+static const AVOption options[] = {
+ { "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "icon_title", "set SDL iconified window title", OFFSET(icon_title) , AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE,{.str=NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_INT,{.i64=0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { NULL },
+};
+
+static const AVClass sdl_class = {
+ .class_name = "sdl outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVOutputFormat ff_sdl_muxer = {
+ .name = "sdl",
+ .long_name = NULL_IF_CONFIG_SMALL("SDL output device"),
+ .priv_data_size = sizeof(SDLContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = sdl_write_header,
+ .write_packet = sdl_write_packet,
+ .write_trailer = sdl_write_trailer,
+ .flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
+ .priv_class = &sdl_class,
+};
diff --git a/libavdevice/sndio_common.c b/libavdevice/sndio_common.c
index 1bea6c5be1..19f39be780 100644
--- a/libavdevice/sndio_common.c
+++ b/libavdevice/sndio_common.c
@@ -2,27 +2,27 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
-#include "libavformat/avformat.h"
+#include "avdevice.h"
#include "sndio_common.h"
diff --git a/libavdevice/sndio_common.h b/libavdevice/sndio_common.h
index 2f70213aed..74f41f59bf 100644
--- a/libavdevice/sndio_common.h
+++ b/libavdevice/sndio_common.h
@@ -2,20 +2,20 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,8 +25,8 @@
#include <stdint.h>
#include <sndio.h>
-#include "libavformat/avformat.h"
#include "libavutil/log.h"
+#include "avdevice.h"
typedef struct SndioData {
AVClass *class;
diff --git a/libavdevice/sndio_dec.c b/libavdevice/sndio_dec.c
index 58caaa1f44..806f47867e 100644
--- a/libavdevice/sndio_dec.c
+++ b/libavdevice/sndio_dec.c
@@ -2,20 +2,20 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavdevice/sndio_enc.c b/libavdevice/sndio_enc.c
index 6f69b9e501..84d070e89c 100644
--- a/libavdevice/sndio_enc.c
+++ b/libavdevice/sndio_enc.c
@@ -2,28 +2,27 @@
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
-#include "libavformat/avformat.h"
-
+#include "avdevice.h"
#include "sndio_common.h"
static av_cold int audio_write_header(AVFormatContext *s1)
diff --git a/libavdevice/timefilter.c b/libavdevice/timefilter.c
index 8b98d338f0..424e4929bd 100644
--- a/libavdevice/timefilter.c
+++ b/libavdevice/timefilter.c
@@ -5,20 +5,20 @@
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -37,14 +37,21 @@ struct TimeFilter {
int count;
};
-TimeFilter *ff_timefilter_new(double clock_period,
- double feedback2_factor,
- double feedback3_factor)
+/* 1 - exp(-x) using a 3-order power series */
+static double qexpneg(double x)
+{
+ return 1 - 1 / (1 + x * (1 + x / 2 * (1 + x / 3)));
+}
+
+TimeFilter *ff_timefilter_new(double time_base,
+ double period,
+ double bandwidth)
{
TimeFilter *self = av_mallocz(sizeof(TimeFilter));
- self->clock_period = clock_period;
- self->feedback2_factor = feedback2_factor;
- self->feedback3_factor = feedback3_factor;
+ double o = 2 * M_PI * bandwidth * period * time_base;
+ self->clock_period = time_base;
+ self->feedback2_factor = qexpneg(M_SQRT2 * o);
+ self->feedback3_factor = qexpneg(o * o) / period;
return self;
}
@@ -69,11 +76,16 @@ double ff_timefilter_update(TimeFilter *self, double system_time, double period)
loop_error = system_time - self->cycle_time;
self->cycle_time += FFMAX(self->feedback2_factor, 1.0 / self->count) * loop_error;
- self->clock_period += self->feedback3_factor * loop_error / period;
+ self->clock_period += self->feedback3_factor * loop_error;
}
return self->cycle_time;
}
+double ff_timefilter_eval(TimeFilter *self, double delta)
+{
+ return self->cycle_time + self->clock_period * delta;
+}
+
#ifdef TEST
#include "libavutil/lfg.h"
#define LFG_MAX ((1LL << 32) - 1)
@@ -85,17 +97,21 @@ int main(void)
#define SAMPLES 1000
double ideal[SAMPLES];
double samples[SAMPLES];
+ double samplet[SAMPLES];
for (n0 = 0; n0 < 40; n0 = 2 * n0 + 1) {
for (n1 = 0; n1 < 10; n1 = 2 * n1 + 1) {
double best_error = 1000000000;
- double bestpar0 = 1;
- double bestpar1 = 0.001;
+ double bestpar0 = n0 ? 1 : 100000;
+ double bestpar1 = 1;
int better, i;
av_lfg_init(&prng, 123);
for (i = 0; i < SAMPLES; i++) {
- ideal[i] = 10 + i + n1 * i / (1000);
+ samplet[i] = 10 + i + (av_lfg_get(&prng) < LFG_MAX/2 ? 0 : 0.999);
+ ideal[i] = samplet[i] + n1 * i / (1000);
samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2) / (LFG_MAX * 10LL);
+ if(i && samples[i]<samples[i-1])
+ samples[i]=samples[i-1]+0.001;
}
do {
@@ -107,7 +123,9 @@ int main(void)
TimeFilter *tf = ff_timefilter_new(1, par0, par1);
for (i = 0; i < SAMPLES; i++) {
double filtered;
- filtered = ff_timefilter_update(tf, samples[i], 1);
+ filtered = ff_timefilter_update(tf, samples[i], i ? (samplet[i] - samplet[i-1]) : 1);
+ if(filtered < 0 || filtered > 1000000000)
+ printf("filter is unstable\n");
error += (filtered - ideal[i]) * (filtered - ideal[i]);
}
ff_timefilter_destroy(tf);
@@ -132,7 +150,7 @@ int main(void)
}
ff_timefilter_destroy(tf);
#else
- printf(" [%f %f %9f]", bestpar0, bestpar1, best_error);
+ printf(" [%12f %11f %9f]", bestpar0, bestpar1, best_error);
#endif
}
printf("\n");
diff --git a/libavdevice/timefilter.h b/libavdevice/timefilter.h
index 8cadd8b066..66629591a2 100644
--- a/libavdevice/timefilter.h
+++ b/libavdevice/timefilter.h
@@ -5,20 +5,20 @@
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -45,16 +45,18 @@ typedef struct TimeFilter TimeFilter;
*
* Unless you know what you are doing, you should set these as follow:
*
- * o = 2 * M_PI * bandwidth * period
- * feedback2_factor = sqrt(2 * o)
+ * o = 2 * M_PI * bandwidth * period_in_seconds
+ * feedback2_factor = sqrt(2) * o
* feedback3_factor = o * o
*
* Where bandwidth is up to you to choose. Smaller values will filter out more
* of the jitter, but also take a longer time for the loop to settle. A good
* starting point is something between 0.3 and 3 Hz.
*
- * @param clock_period period of the hardware clock in seconds
- * (for example 1.0/44100)
+ * @param time_base period of the hardware clock in seconds
+ * (for example 1.0/44100)
+ * @param period expected update interval, in input units
+ * @param brandwidth filtering bandwidth, in Hz
*
* For more details about these parameters and background concepts please see:
* http://www.kokkinizita.net/papers/usingdll.pdf
@@ -80,6 +82,15 @@ TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, dou
double ff_timefilter_update(TimeFilter *self, double system_time, double period);
/**
+ * Evaluate the filter at a specified time
+ *
+ * @param delta difference between the requested time and the current time
+ * (last call to ff_timefilter_update).
+ * @return the filtered time
+ */
+double ff_timefilter_eval(TimeFilter *self, double delta);
+
+/**
* Reset the filter
*
* This function should mainly be called in case of XRUN.
diff --git a/libavdevice/v4l.c b/libavdevice/v4l.c
new file mode 100644
index 0000000000..e2f37d6d95
--- /dev/null
+++ b/libavdevice/v4l.c
@@ -0,0 +1,363 @@
+/*
+ * Linux video grab interface
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avdevice.h"
+
+#undef __STRICT_ANSI__ //workaround due to broken kernel headers
+#include "config.h"
+#include "libavutil/rational.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "libavformat/internal.h"
+#include "libavcodec/dsputil.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#define _LINUX_TIME_H 1
+#include <linux/videodev.h>
+#include <time.h>
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ int fd;
+ int frame_format; /* see VIDEO_PALETTE_xxx */
+ int use_mmap;
+ AVRational time_base;
+ int64_t time_frame;
+ int frame_size;
+ struct video_capability video_cap;
+ struct video_audio audio_saved;
+ struct video_window video_win;
+ uint8_t *video_buf;
+ struct video_mbuf gb_buffers;
+ struct video_mmap gb_buf;
+ int gb_frame;
+ int standard;
+} VideoData;
+
+static const struct {
+ int palette;
+ int depth;
+ enum AVPixelFormat pix_fmt;
+} video_formats [] = {
+ {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = AV_PIX_FMT_YUV420P },
+ {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
+ {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = AV_PIX_FMT_UYVY422 },
+ {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
+ /* NOTE: v4l uses BGR24, not RGB24 */
+ {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = AV_PIX_FMT_BGR24 },
+ {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = AV_PIX_FMT_BGR565 },
+ {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = AV_PIX_FMT_GRAY8 },
+};
+
+
+static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ AVStream *st;
+ int video_fd;
+ int desired_palette, desired_depth;
+ struct video_tuner tuner;
+ struct video_audio audio;
+ struct video_picture pict;
+ int j;
+ int vformat_num = FF_ARRAY_ELEMS(video_formats);
+
+ av_log(s1, AV_LOG_WARNING, "V4L input device is deprecated and will be removed in the next release.");
+
+ if (ap->time_base.den <= 0) {
+ av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den);
+ return -1;
+ }
+ s->time_base = ap->time_base;
+
+ s->video_win.width = ap->width;
+ s->video_win.height = ap->height;
+
+ st = avformat_new_stream(s1, NULL);
+ if (!st)
+ return AVERROR(ENOMEM);
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ video_fd = open(s1->filename, O_RDWR);
+ if (video_fd < 0) {
+ av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
+ goto fail;
+ }
+
+ if (ioctl(video_fd, VIDIOCGCAP, &s->video_cap) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
+ goto fail;
+ }
+
+ if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
+ goto fail;
+ }
+
+ /* no values set, autodetect them */
+ if (s->video_win.width <= 0 || s->video_win.height <= 0) {
+ if (ioctl(video_fd, VIDIOCGWIN, &s->video_win, sizeof(s->video_win)) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCGWIN: %s\n", strerror(errno));
+ goto fail;
+ }
+ }
+
+ if(av_image_check_size(s->video_win.width, s->video_win.height, 0, s1) < 0)
+ return -1;
+
+ desired_palette = -1;
+ desired_depth = -1;
+ for (j = 0; j < vformat_num; j++) {
+ if (ap->pix_fmt == video_formats[j].pix_fmt) {
+ desired_palette = video_formats[j].palette;
+ desired_depth = video_formats[j].depth;
+ break;
+ }
+ }
+
+ /* set tv standard */
+ if (!ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
+ tuner.mode = s->standard;
+ ioctl(video_fd, VIDIOCSTUNER, &tuner);
+ }
+
+ /* unmute audio */
+ audio.audio = 0;
+ ioctl(video_fd, VIDIOCGAUDIO, &audio);
+ memcpy(&s->audio_saved, &audio, sizeof(audio));
+ audio.flags &= ~VIDEO_AUDIO_MUTE;
+ ioctl(video_fd, VIDIOCSAUDIO, &audio);
+
+ ioctl(video_fd, VIDIOCGPICT, &pict);
+ av_dlog(s1, "v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
+ pict.colour, pict.hue, pict.brightness, pict.contrast, pict.whiteness);
+ /* try to choose a suitable video format */
+ pict.palette = desired_palette;
+ pict.depth= desired_depth;
+ if (desired_palette == -1 || ioctl(video_fd, VIDIOCSPICT, &pict) < 0) {
+ for (j = 0; j < vformat_num; j++) {
+ pict.palette = video_formats[j].palette;
+ pict.depth = video_formats[j].depth;
+ if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
+ break;
+ }
+ if (j >= vformat_num)
+ goto fail1;
+ }
+
+ if (ioctl(video_fd, VIDIOCGMBUF, &s->gb_buffers) < 0) {
+ /* try to use read based access */
+ int val;
+
+ s->video_win.x = 0;
+ s->video_win.y = 0;
+ s->video_win.chromakey = -1;
+ s->video_win.flags = 0;
+
+ if (ioctl(video_fd, VIDIOCSWIN, s->video_win) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCSWIN: %s\n", strerror(errno));
+ goto fail;
+ }
+
+ s->frame_format = pict.palette;
+
+ val = 1;
+ if (ioctl(video_fd, VIDIOCCAPTURE, &val) < 0) {
+ av_log(s1, AV_LOG_ERROR, "VIDIOCCAPTURE: %s\n", strerror(errno));
+ goto fail;
+ }
+
+ s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
+ s->use_mmap = 0;
+ } else {
+ s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_SHARED, video_fd, 0);
+ if ((unsigned char*)-1 == s->video_buf) {
+ s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_PRIVATE, video_fd, 0);
+ if ((unsigned char*)-1 == s->video_buf) {
+ av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
+ goto fail;
+ }
+ }
+ s->gb_frame = 0;
+ s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
+
+ /* start to grab the first frame */
+ s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
+ s->gb_buf.height = s->video_win.height;
+ s->gb_buf.width = s->video_win.width;
+ s->gb_buf.format = pict.palette;
+
+ if (ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
+ if (errno != EAGAIN) {
+ fail1:
+ av_log(s1, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
+ } else {
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not receive any video signal\n");
+ }
+ goto fail;
+ }
+ for (j = 1; j < s->gb_buffers.frames; j++) {
+ s->gb_buf.frame = j;
+ ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
+ }
+ s->frame_format = s->gb_buf.format;
+ s->use_mmap = 1;
+ }
+
+ for (j = 0; j < vformat_num; j++) {
+ if (s->frame_format == video_formats[j].palette) {
+ s->frame_size = s->video_win.width * s->video_win.height * video_formats[j].depth / 8;
+ st->codec->pix_fmt = video_formats[j].pix_fmt;
+ break;
+ }
+ }
+
+ if (j >= vformat_num)
+ goto fail;
+
+ s->fd = video_fd;
+
+ st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+ st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
+ st->codec->width = s->video_win.width;
+ st->codec->height = s->video_win.height;
+ st->codec->time_base = s->time_base;
+ st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
+
+ return 0;
+ fail:
+ if (video_fd >= 0)
+ close(video_fd);
+ return AVERROR(EIO);
+}
+
+static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
+{
+ uint8_t *ptr;
+
+ while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
+ (errno == EAGAIN || errno == EINTR));
+
+ ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
+ memcpy(buf, ptr, s->frame_size);
+
+ /* Setup to capture the next frame */
+ s->gb_buf.frame = s->gb_frame;
+ if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
+ if (errno == EAGAIN)
+ av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
+ else
+ av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
+ return AVERROR(EIO);
+ }
+
+ /* This is now the grabbing frame */
+ s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
+
+ return s->frame_size;
+}
+
+static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ int64_t curtime, delay;
+ struct timespec ts;
+
+ /* Calculate the time of the next frame */
+ s->time_frame += INT64_C(1000000);
+
+ /* wait based on the frame rate */
+ for(;;) {
+ curtime = av_gettime();
+ delay = s->time_frame * s->time_base.num / s->time_base.den - curtime;
+ if (delay <= 0) {
+ if (delay < INT64_C(-1000000) * s->time_base.num / s->time_base.den) {
+ /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
+ s->time_frame += INT64_C(1000000);
+ }
+ break;
+ }
+ ts.tv_sec = delay / 1000000;
+ ts.tv_nsec = (delay % 1000000) * 1000;
+ nanosleep(&ts, NULL);
+ }
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR(EIO);
+
+ pkt->pts = curtime;
+
+ /* read one frame */
+ if (s->use_mmap) {
+ return v4l_mm_read_picture(s, pkt->data);
+ } else {
+ if (read(s->fd, pkt->data, pkt->size) != pkt->size)
+ return AVERROR(EIO);
+ return s->frame_size;
+ }
+}
+
+static int grab_read_close(AVFormatContext *s1)
+{
+ VideoData *s = s1->priv_data;
+
+ if (s->use_mmap)
+ munmap(s->video_buf, s->gb_buffers.size);
+
+ /* mute audio. we must force it because the BTTV driver does not
+ return its state correctly */
+ s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
+ ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
+
+ close(s->fd);
+ return 0;
+}
+
+static const AVOption options[] = {
+ { "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_MODE_NTSC}, VIDEO_MODE_PAL, VIDEO_MODE_NTSC, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
+ { NULL },
+};
+
+static const AVClass v4l_class = {
+ .class_name = "V4L indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVInputFormat ff_v4l_demuxer = {
+ .name = "video4linux,v4l",
+ .long_name = NULL_IF_CONFIG_SMALL("Video4Linux device grab"),
+ .priv_data_size = sizeof(VideoData),
+ .read_header = grab_read_header,
+ .read_packet = grab_read_packet,
+ .read_close = grab_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &v4l_class,
+};
diff --git a/libavdevice/v4l2-common.c b/libavdevice/v4l2-common.c
new file mode 100644
index 0000000000..572f0ed36b
--- /dev/null
+++ b/libavdevice/v4l2-common.c
@@ -0,0 +1,96 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "v4l2-common.h"
+
+const struct fmt_map avpriv_fmt_conversion_table[] = {
+ //ff_fmt codec_id v4l2_fmt
+ { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
+ { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU420 },
+ { AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
+ { AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
+ { AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
+ { AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
+ { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
+ { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU410 },
+ { AV_PIX_FMT_RGB555LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
+ { AV_PIX_FMT_RGB555BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555X },
+ { AV_PIX_FMT_RGB565LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
+ { AV_PIX_FMT_RGB565BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565X },
+ { AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
+ { AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
+ { AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
+ { AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB32 },
+ { AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
+#ifdef V4L2_PIX_FMT_Y16
+ { AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Y16 },
+#endif
+ { AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
+#ifdef V4L2_PIX_FMT_H264
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 },
+#endif
+#ifdef V4L2_PIX_FMT_CPIA1
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_CPIA, V4L2_PIX_FMT_CPIA1 },
+#endif
+ { AV_PIX_FMT_NONE, AV_CODEC_ID_NONE, 0 },
+};
+
+uint32_t avpriv_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
+{
+ int i;
+
+ for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
+ if ((codec_id == AV_CODEC_ID_NONE ||
+ avpriv_fmt_conversion_table[i].codec_id == codec_id) &&
+ (pix_fmt == AV_PIX_FMT_NONE ||
+ avpriv_fmt_conversion_table[i].ff_fmt == pix_fmt)) {
+ return avpriv_fmt_conversion_table[i].v4l2_fmt;
+ }
+ }
+
+ return 0;
+}
+
+enum AVPixelFormat avpriv_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
+{
+ int i;
+
+ for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
+ if (avpriv_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
+ avpriv_fmt_conversion_table[i].codec_id == codec_id) {
+ return avpriv_fmt_conversion_table[i].ff_fmt;
+ }
+ }
+
+ return AV_PIX_FMT_NONE;
+}
+
+enum AVCodecID avpriv_fmt_v4l2codec(uint32_t v4l2_fmt)
+{
+ int i;
+
+ for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
+ if (avpriv_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
+ return avpriv_fmt_conversion_table[i].codec_id;
+ }
+ }
+
+ return AV_CODEC_ID_NONE;
+}
diff --git a/libavdevice/v4l2-common.h b/libavdevice/v4l2-common.h
new file mode 100644
index 0000000000..8aef2349ae
--- /dev/null
+++ b/libavdevice/v4l2-common.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_V4L2_COMMON_H
+#define AVDEVICE_V4L2_COMMON_H
+
+#undef __STRICT_ANSI__ //workaround due to broken kernel headers
+#include "config.h"
+#include "libavformat/internal.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#if HAVE_SYS_VIDEOIO_H
+#include <sys/videoio.h>
+#else
+#if HAVE_ASM_TYPES_H
+#include <asm/types.h>
+#endif
+#include <linux/videodev2.h>
+#endif
+#include "libavutil/atomic.h"
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "avdevice.h"
+#include "timefilter.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+#include "libavutil/avstring.h"
+
+struct fmt_map {
+ enum AVPixelFormat ff_fmt;
+ enum AVCodecID codec_id;
+ uint32_t v4l2_fmt;
+};
+
+extern av_export const struct fmt_map avpriv_fmt_conversion_table[];
+
+uint32_t avpriv_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id);
+enum AVPixelFormat avpriv_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id);
+enum AVCodecID avpriv_fmt_v4l2codec(uint32_t v4l2_fmt);
+
+#endif /* AVDEVICE_V4L2_COMMON_H */
diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c
index adb289d140..33668c152f 100644
--- a/libavdevice/v4l2.c
+++ b/libavdevice/v4l2.c
@@ -1,57 +1,40 @@
/*
- * Video4Linux2 grab interface
* Copyright (c) 2000,2001 Fabrice Bellard
* Copyright (c) 2006 Luca Abeni
*
- * Part of this file is based on the V4L2 video capture example
- * (http://v4l2spec.bytesex.org/v4l2spec/capture.c)
- *
- * Thanks to Michael Niedermayer for providing the mapping between
- * V4L2_PIX_FMT_* and AV_PIX_FMT_*
- *
- *
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#undef __STRICT_ANSI__ //workaround due to broken kernel headers
-#include "config.h"
-#include "libavformat/avformat.h"
-#include "libavformat/internal.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <poll.h>
-#if HAVE_SYS_VIDEOIO_H
-#include <sys/videoio.h>
-#else
-#include <linux/videodev2.h>
+/**
+ * @file
+ * Video4Linux2 grab interface
+ *
+ * Part of this file is based on the V4L2 video capture example
+ * (http://linuxtv.org/downloads/v4l-dvb-apis/capture-example.html)
+ *
+ * Thanks to Michael Niedermayer for providing the mapping between
+ * V4L2_PIX_FMT_* and AV_PIX_FMT_*
+ */
+
+#include "v4l2-common.h"
+
+#if CONFIG_LIBV4L2
+#include <libv4l2.h>
#endif
-#include "libavutil/atomic.h"
-#include "libavutil/avassert.h"
-#include "libavutil/imgutils.h"
-#include "libavutil/internal.h"
-#include "libavutil/log.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "libavutil/pixdesc.h"
-#include "libavutil/avstring.h"
-#include "libavutil/mathematics.h"
static const int desired_video_buffers = 256;
@@ -59,113 +42,144 @@ static const int desired_video_buffers = 256;
#define V4L_RAWFORMATS 1
#define V4L_COMPFORMATS 2
+/**
+ * Return timestamps to the user exactly as returned by the kernel
+ */
+#define V4L_TS_DEFAULT 0
+/**
+ * Autodetect the kind of timestamps returned by the kernel and convert to
+ * absolute (wall clock) timestamps.
+ */
+#define V4L_TS_ABS 1
+/**
+ * Assume kernel timestamps are from the monotonic clock and convert to
+ * absolute timestamps.
+ */
+#define V4L_TS_MONO2ABS 2
+
+/**
+ * Once the kind of timestamps returned by the kernel have been detected,
+ * the value of the timefilter (NULL or not) determines whether a conversion
+ * takes place.
+ */
+#define V4L_TS_CONVERT_READY V4L_TS_DEFAULT
+
struct video_data {
AVClass *class;
int fd;
int frame_format; /* V4L2_PIX_FMT_* */
int width, height;
int frame_size;
- int timeout;
int interlaced;
int top_field_first;
+ int ts_mode;
+ TimeFilter *timefilter;
+ int64_t last_time_m;
int buffers;
volatile int buffers_queued;
void **buf_start;
unsigned int *buf_len;
char *standard;
+ v4l2_std_id std_id;
int channel;
- char *video_size; /**< String describing video size,
- set by a private option. */
char *pixel_format; /**< Set by a private option. */
int list_format; /**< Set by a private option. */
+ int list_standard; /**< Set by a private option. */
char *framerate; /**< Set by a private option. */
+
+ int use_libv4l2;
+ int (*open_f)(const char *file, int oflag, ...);
+ int (*close_f)(int fd);
+ int (*dup_f)(int fd);
+ int (*ioctl_f)(int fd, unsigned long int request, ...);
+ ssize_t (*read_f)(int fd, void *buffer, size_t n);
+ void *(*mmap_f)(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
+ int (*munmap_f)(void *_start, size_t length);
};
struct buff_data {
struct video_data *s;
int index;
- int fd;
-};
-
-struct fmt_map {
- enum AVPixelFormat ff_fmt;
- enum AVCodecID codec_id;
- uint32_t v4l2_fmt;
-};
-
-static struct fmt_map fmt_conversion_table[] = {
- //ff_fmt codec_id v4l2_fmt
- { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
- { AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
- { AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
- { AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
- { AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
- { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
- { AV_PIX_FMT_RGB555, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
- { AV_PIX_FMT_RGB565, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
- { AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
- { AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
- { AV_PIX_FMT_BGRA, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
- { AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
- { AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
- { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
- { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
};
static int device_open(AVFormatContext *ctx)
{
+ struct video_data *s = ctx->priv_data;
struct v4l2_capability cap;
int fd;
- int res, err;
+ int ret;
int flags = O_RDWR;
+#define SET_WRAPPERS(prefix) do { \
+ s->open_f = prefix ## open; \
+ s->close_f = prefix ## close; \
+ s->dup_f = prefix ## dup; \
+ s->ioctl_f = prefix ## ioctl; \
+ s->read_f = prefix ## read; \
+ s->mmap_f = prefix ## mmap; \
+ s->munmap_f = prefix ## munmap; \
+} while (0)
+
+ if (s->use_libv4l2) {
+#if CONFIG_LIBV4L2
+ SET_WRAPPERS(v4l2_);
+#else
+ av_log(ctx, AV_LOG_ERROR, "libavdevice is not build with libv4l2 support.\n");
+ return AVERROR(EINVAL);
+#endif
+ } else {
+ SET_WRAPPERS();
+ }
+
+#define v4l2_open s->open_f
+#define v4l2_close s->close_f
+#define v4l2_dup s->dup_f
+#define v4l2_ioctl s->ioctl_f
+#define v4l2_read s->read_f
+#define v4l2_mmap s->mmap_f
+#define v4l2_munmap s->munmap_f
+
if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
flags |= O_NONBLOCK;
}
- fd = avpriv_open(ctx->filename, flags);
+ fd = v4l2_open(ctx->filename, flags, 0);
if (fd < 0) {
- err = errno;
-
- av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
- ctx->filename, strerror(err));
-
- return AVERROR(err);
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s: %s\n",
+ ctx->filename, av_err2str(ret));
+ return ret;
}
- res = ioctl(fd, VIDIOC_QUERYCAP, &cap);
- if (res < 0) {
- err = errno;
+ if (v4l2_ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
+ ret = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
- strerror(err));
-
+ av_err2str(ret));
goto fail;
}
- av_log(ctx, AV_LOG_VERBOSE, "[%d]Capabilities: %x\n",
+ av_log(ctx, AV_LOG_VERBOSE, "fd:%d capabilities:%x\n",
fd, cap.capabilities);
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
av_log(ctx, AV_LOG_ERROR, "Not a video capture device.\n");
- err = ENODEV;
-
+ ret = AVERROR(ENODEV);
goto fail;
}
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
av_log(ctx, AV_LOG_ERROR,
"The device does not support the streaming I/O method.\n");
- err = ENOSYS;
-
+ ret = AVERROR(ENOSYS);
goto fail;
}
return fd;
fail:
- close(fd);
- return AVERROR(err);
+ v4l2_close(fd);
+ return ret;
}
static int device_init(AVFormatContext *ctx, int *width, int *height,
@@ -176,14 +190,15 @@ static int device_init(AVFormatContext *ctx, int *width, int *height,
struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
struct v4l2_pix_format *pix = &fmt.fmt.pix;
- int res;
+ int res = 0;
pix->width = *width;
pix->height = *height;
pix->pixelformat = pix_fmt;
pix->field = V4L2_FIELD_ANY;
- res = ioctl(fd, VIDIOC_S_FMT, &fmt);
+ if (v4l2_ioctl(fd, VIDIOC_S_FMT, &fmt) < 0)
+ res = AVERROR(errno);
if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
av_log(ctx, AV_LOG_INFO,
@@ -198,23 +213,24 @@ static int device_init(AVFormatContext *ctx, int *width, int *height,
"The V4L2 driver changed the pixel format "
"from 0x%08X to 0x%08X\n",
pix_fmt, fmt.fmt.pix.pixelformat);
- res = -1;
+ res = AVERROR(EINVAL);
}
if (fmt.fmt.pix.field == V4L2_FIELD_INTERLACED) {
- av_log(ctx, AV_LOG_DEBUG, "The V4L2 driver using the interlaced mode");
+ av_log(ctx, AV_LOG_DEBUG,
+ "The V4L2 driver is using the interlaced mode\n");
s->interlaced = 1;
}
return res;
}
-static int first_field(int fd)
+static int first_field(const struct video_data *s, int fd)
{
int res;
v4l2_std_id std;
- res = ioctl(fd, VIDIOC_G_STD, &std);
+ res = v4l2_ioctl(fd, VIDIOC_G_STD, &std);
if (res < 0) {
return 0;
}
@@ -225,55 +241,13 @@ static int first_field(int fd)
return 1;
}
-static uint32_t fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if ((codec_id == AV_CODEC_ID_NONE ||
- fmt_conversion_table[i].codec_id == codec_id) &&
- (pix_fmt == AV_PIX_FMT_NONE ||
- fmt_conversion_table[i].ff_fmt == pix_fmt)) {
- return fmt_conversion_table[i].v4l2_fmt;
- }
- }
-
- return 0;
-}
-
-static enum AVPixelFormat fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if (fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
- fmt_conversion_table[i].codec_id == codec_id) {
- return fmt_conversion_table[i].ff_fmt;
- }
- }
-
- return AV_PIX_FMT_NONE;
-}
-
-static enum AVCodecID fmt_v4l2codec(uint32_t v4l2_fmt)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if (fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
- return fmt_conversion_table[i].codec_id;
- }
- }
-
- return AV_CODEC_ID_NONE;
-}
-
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
static void list_framesizes(AVFormatContext *ctx, int fd, uint32_t pixelformat)
{
+ const struct video_data *s = ctx->priv_data;
struct v4l2_frmsizeenum vfse = { .pixel_format = pixelformat };
- while(!ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
+ while(!v4l2_ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
switch (vfse.type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
av_log(ctx, AV_LOG_INFO, " %ux%u",
@@ -296,24 +270,25 @@ static void list_framesizes(AVFormatContext *ctx, int fd, uint32_t pixelformat)
static void list_formats(AVFormatContext *ctx, int fd, int type)
{
+ const struct video_data *s = ctx->priv_data;
struct v4l2_fmtdesc vfd = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
- while(!ioctl(fd, VIDIOC_ENUM_FMT, &vfd)) {
- enum AVCodecID codec_id = fmt_v4l2codec(vfd.pixelformat);
- enum AVPixelFormat pix_fmt = fmt_v4l2ff(vfd.pixelformat, codec_id);
+ while(!v4l2_ioctl(fd, VIDIOC_ENUM_FMT, &vfd)) {
+ enum AVCodecID codec_id = avpriv_fmt_v4l2codec(vfd.pixelformat);
+ enum AVPixelFormat pix_fmt = avpriv_fmt_v4l2ff(vfd.pixelformat, codec_id);
vfd.index++;
if (!(vfd.flags & V4L2_FMT_FLAG_COMPRESSED) &&
type & V4L_RAWFORMATS) {
const char *fmt_name = av_get_pix_fmt_name(pix_fmt);
- av_log(ctx, AV_LOG_INFO, "R : %9s : %20s :",
+ av_log(ctx, AV_LOG_INFO, "Raw : %9s : %20s :",
fmt_name ? fmt_name : "Unsupported",
vfd.description);
} else if (vfd.flags & V4L2_FMT_FLAG_COMPRESSED &&
type & V4L_COMPFORMATS) {
- AVCodec *codec = avcodec_find_encoder(codec_id);
- av_log(ctx, AV_LOG_INFO, "C : %9s : %20s :",
+ AVCodec *codec = avcodec_find_decoder(codec_id);
+ av_log(ctx, AV_LOG_INFO, "Compressed: %9s : %20s :",
codec ? codec->name : "Unsupported",
vfd.description);
} else {
@@ -321,10 +296,8 @@ static void list_formats(AVFormatContext *ctx, int fd, int type)
}
#ifdef V4L2_FMT_FLAG_EMULATED
- if (vfd.flags & V4L2_FMT_FLAG_EMULATED) {
- av_log(ctx, AV_LOG_WARNING, "%s", "Emulated");
- continue;
- }
+ if (vfd.flags & V4L2_FMT_FLAG_EMULATED)
+ av_log(ctx, AV_LOG_INFO, " Emulated :");
#endif
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
list_framesizes(ctx, fd, vfd.pixelformat);
@@ -333,6 +306,30 @@ static void list_formats(AVFormatContext *ctx, int fd, int type)
}
}
+static void list_standards(AVFormatContext *ctx)
+{
+ int ret;
+ struct video_data *s = ctx->priv_data;
+ struct v4l2_standard standard;
+
+ if (s->std_id == 0)
+ return;
+
+ for (standard.index = 0; ; standard.index++) {
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ ret = AVERROR(errno);
+ if (ret == AVERROR(EINVAL)) {
+ break;
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
+ return;
+ }
+ }
+ av_log(ctx, AV_LOG_INFO, "%2d, %16llx, %s\n",
+ standard.index, standard.id, standard.name);
+ }
+}
+
static int mmap_init(AVFormatContext *ctx)
{
int i, res;
@@ -343,34 +340,26 @@ static int mmap_init(AVFormatContext *ctx)
.memory = V4L2_MEMORY_MMAP
};
- res = ioctl(s->fd, VIDIOC_REQBUFS, &req);
- if (res < 0) {
- if (errno == EINVAL) {
- av_log(ctx, AV_LOG_ERROR, "Device does not support mmap\n");
- } else {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
- }
-
- return AVERROR(errno);
+ if (v4l2_ioctl(s->fd, VIDIOC_REQBUFS, &req) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS): %s\n", av_err2str(res));
+ return res;
}
if (req.count < 2) {
av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n");
-
return AVERROR(ENOMEM);
}
s->buffers = req.count;
s->buf_start = av_malloc(sizeof(void *) * s->buffers);
if (s->buf_start == NULL) {
av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
-
return AVERROR(ENOMEM);
}
s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
if (s->buf_len == NULL) {
av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
av_free(s->buf_start);
-
return AVERROR(ENOMEM);
}
@@ -380,30 +369,27 @@ static int mmap_init(AVFormatContext *ctx)
.index = i,
.memory = V4L2_MEMORY_MMAP
};
-
- res = ioctl(s->fd, VIDIOC_QUERYBUF, &buf);
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
-
- return AVERROR(errno);
+ if (v4l2_ioctl(s->fd, VIDIOC_QUERYBUF, &buf) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF): %s\n", av_err2str(res));
+ return res;
}
s->buf_len[i] = buf.length;
if (s->frame_size > 0 && s->buf_len[i] < s->frame_size) {
av_log(ctx, AV_LOG_ERROR,
- "Buffer len [%d] = %d != %d\n",
+ "buf_len[%d] = %d < expected frame size %d\n",
i, s->buf_len[i], s->frame_size);
-
- return -1;
+ return AVERROR(ENOMEM);
}
- s->buf_start[i] = mmap(NULL, buf.length,
+ s->buf_start[i] = v4l2_mmap(NULL, buf.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
s->fd, buf.m.offset);
if (s->buf_start[i] == MAP_FAILED) {
- av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
-
- return AVERROR(errno);
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", av_err2str(res));
+ return res;
}
}
@@ -420,23 +406,84 @@ static void dummy_release_buffer(AVPacket *pkt)
static void mmap_release_buffer(void *opaque, uint8_t *data)
{
struct v4l2_buffer buf = { 0 };
- int res, fd;
+ int res;
struct buff_data *buf_descriptor = opaque;
struct video_data *s = buf_descriptor->s;
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = buf_descriptor->index;
- fd = buf_descriptor->fd;
av_free(buf_descriptor);
- res = ioctl(fd, VIDIOC_QBUF, &buf);
- if (res < 0)
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) {
+ res = AVERROR(errno);
av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n",
- strerror(errno));
+ av_err2str(res));
+ }
+
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
}
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+static int64_t av_gettime_monotonic(void)
+{
+ struct timespec tv;
+
+ clock_gettime(CLOCK_MONOTONIC, &tv);
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_nsec / 1000;
+}
+#endif
+
+static int init_convert_timestamp(AVFormatContext *ctx, int64_t ts)
+{
+ struct video_data *s = ctx->priv_data;
+ int64_t now;
+
+ now = av_gettime();
+ if (s->ts_mode == V4L_TS_ABS &&
+ ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE) {
+ av_log(ctx, AV_LOG_INFO, "Detected absolute timestamps\n");
+ s->ts_mode = V4L_TS_CONVERT_READY;
+ return 0;
+ }
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+ now = av_gettime_monotonic();
+ if (s->ts_mode == V4L_TS_MONO2ABS ||
+ (ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE)) {
+ AVRational tb = {AV_TIME_BASE, 1};
+ int64_t period = av_rescale_q(1, tb, ctx->streams[0]->avg_frame_rate);
+ av_log(ctx, AV_LOG_INFO, "Detected monotonic timestamps, converting\n");
+ /* microseconds instead of seconds, MHz instead of Hz */
+ s->timefilter = ff_timefilter_new(1, period, 1.0E-6);
+ s->ts_mode = V4L_TS_CONVERT_READY;
+ return 0;
+ }
+#endif
+ av_log(ctx, AV_LOG_ERROR, "Unknown timestamps\n");
+ return AVERROR(EIO);
+}
+
+static int convert_timestamp(AVFormatContext *ctx, int64_t *ts)
+{
+ struct video_data *s = ctx->priv_data;
+
+ if (s->ts_mode) {
+ int r = init_convert_timestamp(ctx, *ts);
+ if (r < 0)
+ return r;
+ }
+#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
+ if (s->timefilter) {
+ int64_t nowa = av_gettime();
+ int64_t nowm = av_gettime_monotonic();
+ ff_timefilter_update(s->timefilter, nowa, nowm - s->last_time_m);
+ s->last_time_m = nowm;
+ *ts = ff_timefilter_eval(s->timefilter, *ts - nowm);
+ }
+#endif
+ return 0;
+}
+
static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
{
struct video_data *s = ctx->priv_data;
@@ -444,28 +491,18 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.memory = V4L2_MEMORY_MMAP
};
- struct pollfd p = { .fd = s->fd, .events = POLLIN };
int res;
- res = poll(&p, 1, s->timeout);
- if (res < 0)
- return AVERROR(errno);
-
- if (!(p.revents & (POLLIN | POLLERR | POLLHUP)))
- return AVERROR(EAGAIN);
-
/* FIXME: Some special treatment might be needed in case of loss of signal... */
- while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR));
+ while ((res = v4l2_ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR));
if (res < 0) {
if (errno == EAGAIN) {
pkt->size = 0;
-
return AVERROR(EAGAIN);
}
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n",
- strerror(errno));
-
- return AVERROR(errno);
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", av_err2str(res));
+ return res;
}
if (buf.index >= s->buffers) {
@@ -476,11 +513,16 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
// always keep at least one buffer queued
av_assert0(avpriv_atomic_int_get(&s->buffers_queued) >= 1);
+ /* CPIA is a compressed format and we don't know the exact number of bytes
+ * used by a frame, so set it here as the driver announces it.
+ */
+ if (ctx->video_codec_id == AV_CODEC_ID_CPIA)
+ s->frame_size = buf.bytesused;
+
if (s->frame_size > 0 && buf.bytesused != s->frame_size) {
av_log(ctx, AV_LOG_ERROR,
"The v4l2 frame is %d bytes, but %d bytes are expected\n",
buf.bytesused, s->frame_size);
-
return AVERROR_INVALIDDATA;
}
@@ -490,15 +532,17 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
res = av_new_packet(pkt, buf.bytesused);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n");
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) == 0)
+ avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
return res;
}
memcpy(pkt->data, s->buf_start[buf.index], buf.bytesused);
- res = ioctl(s->fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", av_err2str(res));
av_free_packet(pkt);
- return AVERROR(errno);
+ return res;
}
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
} else {
@@ -518,22 +562,26 @@ FF_ENABLE_DEPRECATION_WARNINGS
* allocate a buffer for memcpying into it
*/
av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n");
- res = ioctl(s->fd, VIDIOC_QBUF, &buf);
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) == 0)
+ avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
return AVERROR(ENOMEM);
}
- buf_descriptor->fd = s->fd;
buf_descriptor->index = buf.index;
buf_descriptor->s = s;
pkt->buf = av_buffer_create(pkt->data, pkt->size, mmap_release_buffer,
buf_descriptor, 0);
if (!pkt->buf) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create a buffer\n");
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) == 0)
+ avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
av_freep(&buf_descriptor);
return AVERROR(ENOMEM);
}
}
pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec;
+ convert_timestamp(ctx, &pkt->pts);
return s->buf_len[buf.index];
}
@@ -551,23 +599,19 @@ static int mmap_start(AVFormatContext *ctx)
.memory = V4L2_MEMORY_MMAP
};
- res = ioctl(s->fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n",
- strerror(errno));
-
- return AVERROR(errno);
+ if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", av_err2str(res));
+ return res;
}
}
s->buffers_queued = s->buffers;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- res = ioctl(s->fd, VIDIOC_STREAMON, &type);
- if (res < 0) {
- av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n",
- strerror(errno));
-
- return AVERROR(errno);
+ if (v4l2_ioctl(s->fd, VIDIOC_STREAMON, &type) < 0) {
+ res = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", av_err2str(res));
+ return res;
}
return 0;
@@ -582,9 +626,9 @@ static void mmap_close(struct video_data *s)
/* We do not check for the result, because we could
* not do anything about it anyway...
*/
- ioctl(s->fd, VIDIOC_STREAMOFF, &type);
+ v4l2_ioctl(s->fd, VIDIOC_STREAMOFF, &type);
for (i = 0; i < s->buffers; i++) {
- munmap(s->buf_start[i], s->buf_len[i]);
+ v4l2_munmap(s->buf_start[i], s->buf_len[i]);
}
av_free(s->buf_start);
av_free(s->buf_len);
@@ -593,15 +637,12 @@ static void mmap_close(struct video_data *s)
static int v4l2_set_parameters(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
- struct v4l2_input input = { 0 };
struct v4l2_standard standard = { 0 };
struct v4l2_streamparm streamparm = { 0 };
- struct v4l2_fract *tpf = &streamparm.parm.capture.timeperframe;
+ struct v4l2_fract *tpf;
AVRational framerate_q = { 0 };
int i, ret;
- streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
if (s->framerate &&
(ret = av_parse_video_rate(&framerate_q, s->framerate)) < 0) {
av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n",
@@ -609,120 +650,149 @@ static int v4l2_set_parameters(AVFormatContext *s1)
return ret;
}
- /* set tv video input */
- input.index = s->channel;
- if (ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
- av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n");
- return AVERROR(EIO);
- }
+ if (s->standard) {
+ if (s->std_id) {
+ ret = 0;
+ av_log(s1, AV_LOG_DEBUG, "Setting standard: %s\n", s->standard);
+ /* set tv standard */
+ for (i = 0; ; i++) {
+ standard.index = i;
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ ret = AVERROR(errno);
+ break;
+ }
+ if (!av_strcasecmp(standard.name, s->standard))
+ break;
+ }
+ if (ret < 0) {
+ av_log(s1, AV_LOG_ERROR, "Unknown or unsupported standard '%s'\n", s->standard);
+ return ret;
+ }
- av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n",
- s->channel, input.name);
- if (ioctl(s->fd, VIDIOC_S_INPUT, &input.index) < 0) {
- av_log(s1, AV_LOG_ERROR,
- "The V4L2 driver ioctl set input(%d) failed\n",
- s->channel);
- return AVERROR(EIO);
+ if (v4l2_ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
+ ret = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_STD): %s\n", av_err2str(ret));
+ return ret;
+ }
+ } else {
+ av_log(s1, AV_LOG_WARNING,
+ "This device does not support any standard\n");
+ }
}
- if (s->standard) {
- av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s\n",
- s->standard);
- /* set tv standard */
- for(i=0;;i++) {
+ /* get standard */
+ if (v4l2_ioctl(s->fd, VIDIOC_G_STD, &s->std_id) == 0) {
+ tpf = &standard.frameperiod;
+ for (i = 0; ; i++) {
standard.index = i;
- if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
- av_log(s1, AV_LOG_ERROR,
- "The V4L2 driver ioctl set standard(%s) failed\n",
- s->standard);
- return AVERROR(EIO);
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ ret = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
+ return ret;
}
-
- if (!av_strcasecmp(standard.name, s->standard)) {
+ if (standard.id == s->std_id) {
+ av_log(s1, AV_LOG_DEBUG,
+ "Current standard: %s, id: %"PRIu64", frameperiod: %d/%d\n",
+ standard.name, (uint64_t)standard.id, tpf->numerator, tpf->denominator);
break;
}
}
+ } else {
+ tpf = &streamparm.parm.capture.timeperframe;
+ }
- av_log(s1, AV_LOG_DEBUG,
- "The V4L2 driver set standard: %s, id: %"PRIu64"\n",
- s->standard, (uint64_t)standard.id);
- if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
- av_log(s1, AV_LOG_ERROR,
- "The V4L2 driver ioctl set standard(%s) failed\n",
- s->standard);
- return AVERROR(EIO);
- }
+ streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) {
+ ret = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret));
+ return ret;
}
if (framerate_q.num && framerate_q.den) {
- av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n",
- framerate_q.den, framerate_q.num);
- tpf->numerator = framerate_q.den;
- tpf->denominator = framerate_q.num;
-
- if (ioctl(s->fd, VIDIOC_S_PARM, &streamparm) != 0) {
- av_log(s1, AV_LOG_ERROR,
- "ioctl set time per frame(%d/%d) failed\n",
+ if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
+ tpf = &streamparm.parm.capture.timeperframe;
+
+ av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n",
framerate_q.den, framerate_q.num);
- return AVERROR(EIO);
- }
+ tpf->numerator = framerate_q.den;
+ tpf->denominator = framerate_q.num;
- if (framerate_q.num != tpf->denominator ||
- framerate_q.den != tpf->numerator) {
- av_log(s1, AV_LOG_INFO,
- "The driver changed the time per frame from "
- "%d/%d to %d/%d\n",
- framerate_q.den, framerate_q.num,
- tpf->numerator, tpf->denominator);
- }
- } else {
- if (ioctl(s->fd, VIDIOC_G_PARM, &streamparm) != 0) {
- av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n",
- strerror(errno));
- return AVERROR(errno);
+ if (v4l2_ioctl(s->fd, VIDIOC_S_PARM, &streamparm) < 0) {
+ ret = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_PARM): %s\n", av_err2str(ret));
+ return ret;
+ }
+
+ if (framerate_q.num != tpf->denominator ||
+ framerate_q.den != tpf->numerator) {
+ av_log(s1, AV_LOG_INFO,
+ "The driver changed the time per frame from "
+ "%d/%d to %d/%d\n",
+ framerate_q.den, framerate_q.num,
+ tpf->numerator, tpf->denominator);
+ }
+ } else {
+ av_log(s1, AV_LOG_WARNING,
+ "The driver does not allow to change time per frame\n");
}
}
s1->streams[0]->avg_frame_rate.num = tpf->denominator;
s1->streams[0]->avg_frame_rate.den = tpf->numerator;
-
- s->timeout = 100 +
- av_rescale_q(1, s1->streams[0]->avg_frame_rate,
- (AVRational){1, 1000});
+ s1->streams[0]->r_frame_rate = s1->streams[0]->avg_frame_rate;
return 0;
}
-static uint32_t device_try_init(AVFormatContext *s1,
- enum AVPixelFormat pix_fmt,
- int *width,
- int *height,
- enum AVCodecID *codec_id)
+static int device_try_init(AVFormatContext *s1,
+ enum AVPixelFormat pix_fmt,
+ int *width,
+ int *height,
+ uint32_t *desired_format,
+ enum AVCodecID *codec_id)
{
- uint32_t desired_format = fmt_ff2v4l(pix_fmt, s1->video_codec_id);
+ int ret, i;
- if (desired_format == 0 ||
- device_init(s1, width, height, desired_format) < 0) {
- int i;
+ *desired_format = avpriv_fmt_ff2v4l(pix_fmt, s1->video_codec_id);
- desired_format = 0;
- for (i = 0; i<FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
+ if (*desired_format) {
+ ret = device_init(s1, width, height, *desired_format);
+ if (ret < 0) {
+ *desired_format = 0;
+ if (ret != AVERROR(EINVAL))
+ return ret;
+ }
+ }
+
+ if (!*desired_format) {
+ for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (s1->video_codec_id == AV_CODEC_ID_NONE ||
- fmt_conversion_table[i].codec_id == s1->video_codec_id) {
- desired_format = fmt_conversion_table[i].v4l2_fmt;
- if (device_init(s1, width, height, desired_format) >= 0) {
+ avpriv_fmt_conversion_table[i].codec_id == s1->video_codec_id) {
+ av_log(s1, AV_LOG_DEBUG, "Trying to set codec:%s pix_fmt:%s\n",
+ avcodec_get_name(avpriv_fmt_conversion_table[i].codec_id),
+ (char *)av_x_if_null(av_get_pix_fmt_name(avpriv_fmt_conversion_table[i].ff_fmt), "none"));
+
+ *desired_format = avpriv_fmt_conversion_table[i].v4l2_fmt;
+ ret = device_init(s1, width, height, *desired_format);
+ if (ret >= 0)
break;
- }
- desired_format = 0;
+ else if (ret != AVERROR(EINVAL))
+ return ret;
+ *desired_format = 0;
}
}
- }
- if (desired_format != 0) {
- *codec_id = fmt_v4l2codec(desired_format);
- assert(*codec_id != AV_CODEC_ID_NONE);
+ if (*desired_format == 0) {
+ av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for "
+ "codec '%s' (id %d), pixel format '%s' (id %d)\n",
+ avcodec_get_name(s1->video_codec_id), s1->video_codec_id,
+ (char *)av_x_if_null(av_get_pix_fmt_name(pix_fmt), "none"), pix_fmt);
+ ret = AVERROR(EINVAL);
+ }
}
- return desired_format;
+ *codec_id = avpriv_fmt_v4l2codec(*desired_format);
+ av_assert0(*codec_id != AV_CODEC_ID_NONE);
+ return ret;
}
static int v4l2_read_header(AVFormatContext *s1)
@@ -731,31 +801,65 @@ static int v4l2_read_header(AVFormatContext *s1)
AVStream *st;
int res = 0;
uint32_t desired_format;
- enum AVCodecID codec_id;
+ enum AVCodecID codec_id = AV_CODEC_ID_NONE;
enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
+ struct v4l2_input input = { 0 };
st = avformat_new_stream(s1, NULL);
if (!st)
return AVERROR(ENOMEM);
+#if CONFIG_LIBV4L2
+ /* silence libv4l2 logging. if fopen() fails v4l2_log_file will be NULL
+ and errors will get sent to stderr */
+ if (s->use_libv4l2)
+ v4l2_log_file = fopen("/dev/null", "w");
+#endif
+
s->fd = device_open(s1);
if (s->fd < 0)
return s->fd;
+ if (s->channel != -1) {
+ /* set video input */
+ av_log(s1, AV_LOG_DEBUG, "Selecting input_channel: %d\n", s->channel);
+ if (v4l2_ioctl(s->fd, VIDIOC_S_INPUT, &s->channel) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res));
+ return res;
+ }
+ } else {
+ /* get current video input */
+ if (v4l2_ioctl(s->fd, VIDIOC_G_INPUT, &s->channel) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_INPUT): %s\n", av_err2str(res));
+ return res;
+ }
+ }
+
+ /* enum input */
+ input.index = s->channel;
+ if (v4l2_ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMINPUT): %s\n", av_err2str(res));
+ return res;
+ }
+ s->std_id = input.std;
+ av_log(s1, AV_LOG_DEBUG, "Current input_channel: %d, input_name: %s\n",
+ s->channel, input.name);
+
if (s->list_format) {
list_formats(s1, s->fd, s->list_format);
return AVERROR_EXIT;
}
- avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
-
- if (s->video_size &&
- (res = av_parse_video_size(&s->width, &s->height, s->video_size)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse video size '%s'.\n",
- s->video_size);
- return res;
+ if (s->list_standard) {
+ list_standards(s1);
+ return AVERROR_EXIT;
}
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
if (s->pixel_format) {
AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format);
@@ -773,15 +877,14 @@ static int v4l2_read_header(AVFormatContext *s1)
}
if (!s->width && !s->height) {
- struct v4l2_format fmt;
+ struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
av_log(s1, AV_LOG_VERBOSE,
"Querying the device for the current frame size\n");
- fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
- av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n",
- strerror(errno));
- return AVERROR(errno);
+ if (v4l2_ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
+ return res;
}
s->width = fmt.fmt.pix.width;
@@ -790,41 +893,48 @@ static int v4l2_read_header(AVFormatContext *s1)
"Setting frame size to %dx%d\n", s->width, s->height);
}
- desired_format = device_try_init(s1, pix_fmt, &s->width, &s->height,
- &codec_id);
- if (desired_format == 0) {
- av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for "
- "codec_id %d, pix_fmt %d.\n", s1->video_codec_id, pix_fmt);
- close(s->fd);
-
- return AVERROR(EIO);
+ res = device_try_init(s1, pix_fmt, &s->width, &s->height, &desired_format, &codec_id);
+ if (res < 0) {
+ v4l2_close(s->fd);
+ return res;
}
- if ((res = av_image_check_size(s->width, s->height, 0, s1) < 0))
+ /* If no pixel_format was specified, the codec_id was not known up
+ * until now. Set video_codec_id in the context, as codec_id will
+ * not be available outside this function
+ */
+ if (codec_id != AV_CODEC_ID_NONE && s1->video_codec_id == AV_CODEC_ID_NONE)
+ s1->video_codec_id = codec_id;
+
+ if ((res = av_image_check_size(s->width, s->height, 0, s1)) < 0)
return res;
s->frame_format = desired_format;
- if ((res = v4l2_set_parameters(s1) < 0))
+ if ((res = v4l2_set_parameters(s1)) < 0)
return res;
- st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id);
+ st->codec->pix_fmt = avpriv_fmt_v4l2ff(desired_format, codec_id);
s->frame_size =
avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
if ((res = mmap_init(s1)) ||
(res = mmap_start(s1)) < 0) {
- close(s->fd);
+ v4l2_close(s->fd);
return res;
}
- s->top_field_first = first_field(s->fd);
+ s->top_field_first = first_field(s, s->fd);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = codec_id;
if (codec_id == AV_CODEC_ID_RAWVIDEO)
st->codec->codec_tag =
avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
+ if (desired_format == V4L2_PIX_FMT_YVU420)
+ st->codec->codec_tag = MKTAG('Y', 'V', '1', '2');
+ else if (desired_format == V4L2_PIX_FMT_YVU410)
+ st->codec->codec_tag = MKTAG('Y', 'V', 'U', '9');
st->codec->width = s->width;
st->codec->height = s->height;
st->codec->bit_rate = s->frame_size * av_q2d(st->avg_frame_rate) * 8;
@@ -861,23 +971,35 @@ static int v4l2_read_close(AVFormatContext *s1)
mmap_close(s);
- close(s->fd);
+ v4l2_close(s->fd);
return 0;
}
#define OFFSET(x) offsetof(struct video_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
+
static const AVOption options[] = {
- { "standard", "TV standard, used only by analog frame grabber", OFFSET(standard), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC },
- { "channel", "TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC },
- { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "pixel_format", "Preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "input_format", "Preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
- { "list_formats", "List available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" },
- { "all", "Show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" },
- { "raw", "Show only non-compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_RAWFORMATS }, 0, INT_MAX, DEC, "list_formats" },
- { "compressed", "Show only compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_COMPFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+ { "standard", "set TV standard, used only by analog frame grabber", OFFSET(standard), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC },
+ { "channel", "set TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, DEC },
+ { "video_size", "set frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
+ { "pixel_format", "set preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "input_format", "set preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+
+ { "list_formats", "list available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" },
+ { "all", "show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+ { "raw", "show only non-compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_RAWFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+ { "compressed", "show only compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_COMPFORMATS }, 0, INT_MAX, DEC, "list_formats" },
+
+ { "list_standards", "list supported standards and exit", OFFSET(list_standard), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, DEC, "list_standards" },
+ { "all", "show all supported standards", OFFSET(list_standard), AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, DEC, "list_standards" },
+
+ { "timestamps", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" },
+ { "ts", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" },
+ { "default", "use timestamps from the kernel", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_DEFAULT }, 0, 2, DEC, "timestamps" },
+ { "abs", "use absolute timestamps (wall clock)", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_ABS }, 0, 2, DEC, "timestamps" },
+ { "mono2abs", "force conversion from monotonic to absolute timestamps", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_MONO2ABS }, 0, 2, DEC, "timestamps" },
+ { "use_libv4l2", "use libv4l2 (v4l-utils) convertion functions", OFFSET(use_libv4l2), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ NULL },
};
@@ -889,7 +1011,7 @@ static const AVClass v4l2_class = {
};
AVInputFormat ff_v4l2_demuxer = {
- .name = "video4linux2",
+ .name = "video4linux2,v4l2",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 device grab"),
.priv_data_size = sizeof(struct video_data),
.read_header = v4l2_read_header,
diff --git a/libavdevice/v4l2enc.c b/libavdevice/v4l2enc.c
new file mode 100644
index 0000000000..21f0ef6983
--- /dev/null
+++ b/libavdevice/v4l2enc.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "v4l2-common.h"
+#include "avdevice.h"
+
+typedef struct {
+ int fd;
+} V4L2Context;
+
+static av_cold int write_header(AVFormatContext *s1)
+{
+ int res = 0, flags = O_RDWR;
+ struct v4l2_format fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT
+ };
+ V4L2Context *s = s1->priv_data;
+ AVCodecContext *enc_ctx;
+ uint32_t v4l2_pixfmt;
+
+ if (s1->flags & AVFMT_FLAG_NONBLOCK)
+ flags |= O_NONBLOCK;
+
+ s->fd = open(s1->filename, flags);
+ if (s->fd < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "Unable to open V4L2 device '%s'\n", s1->filename);
+ return res;
+ }
+
+ if (s1->nb_streams != 1 ||
+ s1->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
+ s1->streams[0]->codec->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s1, AV_LOG_ERROR,
+ "V4L2 output device supports only a single raw video stream\n");
+ return AVERROR(EINVAL);
+ }
+
+ enc_ctx = s1->streams[0]->codec;
+
+ v4l2_pixfmt = avpriv_fmt_ff2v4l(enc_ctx->pix_fmt, AV_CODEC_ID_RAWVIDEO);
+ if (!v4l2_pixfmt) { // XXX: try to force them one by one?
+ av_log(s1, AV_LOG_ERROR, "Unknown V4L2 pixel format equivalent for %s\n",
+ av_get_pix_fmt_name(enc_ctx->pix_fmt));
+ return AVERROR(EINVAL);
+ }
+
+ if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
+ return res;
+ }
+
+ fmt.fmt.pix.width = enc_ctx->width;
+ fmt.fmt.pix.height = enc_ctx->height;
+ fmt.fmt.pix.pixelformat = v4l2_pixfmt;
+ fmt.fmt.pix.sizeimage = av_image_get_buffer_size(enc_ctx->pix_fmt, enc_ctx->width, enc_ctx->height, 1);
+
+ if (ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_FMT): %s\n", av_err2str(res));
+ return res;
+ }
+
+ return res;
+}
+
+static int write_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ const V4L2Context *s = s1->priv_data;
+ if (write(s->fd, pkt->data, pkt->size) == -1)
+ return AVERROR(errno);
+ return 0;
+}
+
+static int write_trailer(AVFormatContext *s1)
+{
+ const V4L2Context *s = s1->priv_data;
+ close(s->fd);
+ return 0;
+}
+
+AVOutputFormat ff_v4l2_muxer = {
+ .name = "v4l2",
+ .long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 output device"),
+ .priv_data_size = sizeof(V4L2Context),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = write_header,
+ .write_packet = write_packet,
+ .write_trailer = write_trailer,
+ .flags = AVFMT_NOFILE,
+};
diff --git a/libavdevice/version.h b/libavdevice/version.h
index 9731606adc..1e18f51d4a 100644
--- a/libavdevice/version.h
+++ b/libavdevice/version.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,9 +27,9 @@
#include "libavutil/avutil.h"
-#define LIBAVDEVICE_VERSION_MAJOR 54
-#define LIBAVDEVICE_VERSION_MINOR 0
-#define LIBAVDEVICE_VERSION_MICRO 0
+#define LIBAVDEVICE_VERSION_MAJOR 55
+#define LIBAVDEVICE_VERSION_MINOR 3
+#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
@@ -39,6 +39,8 @@
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
+#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
+
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
diff --git a/libavdevice/vfwcap.c b/libavdevice/vfwcap.c
index fea85fc5ae..014f18c27e 100644
--- a/libavdevice/vfwcap.c
+++ b/libavdevice/vfwcap.c
@@ -2,37 +2,35 @@
* VFW capture interface
* Copyright (c) 2006-2008 Ramiro Polla
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include <windows.h>
#include <vfw.h>
+#include "avdevice.h"
/* Defines for VFW missing from MinGW.
* Remove this when MinGW incorporates them. */
#define HWND_MESSAGE ((HWND)-3)
-#define BI_RGB 0
-
/* End of missing MinGW defines */
struct vfw_ctx {
@@ -157,7 +155,7 @@ static void dump_bih(AVFormatContext *s, BITMAPINFOHEADER *bih)
static int shall_we_drop(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
- const uint8_t dropscore[] = {62, 75, 87, 100};
+ static const uint8_t dropscore[] = {62, 75, 87, 100};
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
@@ -245,7 +243,7 @@ static int vfw_read_header(AVFormatContext *s)
AVStream *st;
int devnum;
int bisize;
- BITMAPINFO *bi;
+ BITMAPINFO *bi = NULL;
CAPTUREPARMS cparms;
DWORD biCompression;
WORD biBitCount;
@@ -291,7 +289,7 @@ static int vfw_read_header(AVFormatContext *s)
(LPARAM) videostream_cb);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
- goto fail_io;
+ goto fail;
}
SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s);
@@ -305,7 +303,7 @@ static int vfw_read_header(AVFormatContext *s)
/* Set video format */
bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
if(!bisize)
- goto fail_io;
+ goto fail;
bi = av_malloc(bisize);
if(!bi) {
vfw_read_close(s);
@@ -313,16 +311,21 @@ static int vfw_read_header(AVFormatContext *s)
}
ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret)
- goto fail_bi;
+ goto fail;
dump_bih(s, &bi->bmiHeader);
+ ret = av_parse_video_rate(&framerate_q, ctx->framerate);
+ if (ret < 0) {
+ av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
+ goto fail;
+ }
if (ctx->video_size) {
ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
- goto fail_bi;
+ goto fail;
}
}
@@ -341,19 +344,17 @@ static int vfw_read_header(AVFormatContext *s)
ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
- goto fail_bi;
+ goto fail;
}
biCompression = bi->bmiHeader.biCompression;
biBitCount = bi->bmiHeader.biBitCount;
- av_free(bi);
-
/* Set sequence setup */
ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
- goto fail_io;
+ goto fail;
dump_captureparms(s, &cparms);
@@ -368,10 +369,10 @@ static int vfw_read_header(AVFormatContext *s)
ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
- goto fail_io;
+ goto fail;
codec = st->codec;
- codec->time_base = (AVRational){framerate_q.den, framerate_q.num};
+ codec->time_base = av_inv_q(framerate_q);
codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec->width = bi->bmiHeader.biWidth;
codec->height = bi->bmiHeader.biHeight;
@@ -397,31 +398,31 @@ static int vfw_read_header(AVFormatContext *s)
}
}
+ av_freep(&bi);
+
avpriv_set_pts_info(st, 32, 1, 1000);
ctx->mutex = CreateMutex(NULL, 0, NULL);
if(!ctx->mutex) {
av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
- goto fail_io;
+ goto fail;
}
ctx->event = CreateEvent(NULL, 1, 0, NULL);
if(!ctx->event) {
av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
- goto fail_io;
+ goto fail;
}
ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
- goto fail_io;
+ goto fail;
}
return 0;
-fail_bi:
- av_free(bi);
-
-fail_io:
+fail:
+ av_freep(&bi);
vfw_read_close(s);
return AVERROR(EIO);
}
diff --git a/libavdevice/x11grab.c b/libavdevice/x11grab.c
index 8edbf7bf2b..eb23ec332d 100644
--- a/libavdevice/x11grab.c
+++ b/libavdevice/x11grab.c
@@ -1,9 +1,9 @@
/*
* X11 video grab interface
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav integration:
+ * FFmpeg integration:
* Copyright (C) 2006 Clemens Fruhwirth <clemens@endorphin.org>
* Edouard Gomez <ed.gomez@free.fr>
*
@@ -14,18 +14,18 @@
* Copyright (C) 1997-1998 Rasca, Berlin
* 2003-2004 Karl H. Beckers, Frankfurt
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with Libav; if not, write to the Free Software
+ * along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -37,13 +37,13 @@
*/
#include "config.h"
-#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include <time.h>
+#include <X11/cursorfont.h>
#include <X11/X.h>
#include <X11/Xlib.h>
#include <X11/Xlibint.h>
@@ -53,6 +53,7 @@
#include <X11/extensions/shape.h>
#include <X11/extensions/XShm.h>
#include <X11/extensions/Xfixes.h>
+#include "avdevice.h"
/**
* X11 Device Demuxer context
@@ -63,9 +64,8 @@ struct x11grab {
AVRational time_base; /**< Time base */
int64_t time_frame; /**< Current time */
- char *video_size; /**< String describing video size, set by a private option. */
- int height; /**< Height of the grab frame */
int width; /**< Width of the grab frame */
+ int height; /**< Height of the grab frame */
int x_off; /**< Horizontal top-left corner coordinate */
int y_off; /**< Vertical top-left corner coordinate */
@@ -76,8 +76,9 @@ struct x11grab {
int draw_mouse; /**< Set by a private option. */
int follow_mouse; /**< Set by a private option. */
int show_region; /**< set by a private option. */
- char *framerate; /**< Set by a private option. */
+ AVRational framerate; /**< Set by a private option. */
+ Cursor c;
Window region_win; /**< This is used by show_region option. */
};
@@ -164,33 +165,30 @@ x11grab_read_header(AVFormatContext *s1)
int y_off = 0;
int screen;
int use_shm;
- char *param, *offset;
+ char *dpyname, *offset;
int ret = 0;
- AVRational framerate;
- param = av_strdup(s1->filename);
- if (!param)
+ dpyname = av_strdup(s1->filename);
+ if (!dpyname)
goto out;
- offset = strchr(param, '+');
+ offset = strchr(dpyname, '+');
if (offset) {
sscanf(offset, "%d,%d", &x_off, &y_off);
- x11grab->draw_mouse = !strstr(offset, "nomouse");
+ if (strstr(offset, "nomouse")) {
+ av_log(s1, AV_LOG_WARNING,
+ "'nomouse' specification in argument is deprecated: "
+ "use 'draw_mouse' option with value 0 instead\n");
+ x11grab->draw_mouse = 0;
+ }
*offset= 0;
}
- if ((ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
- goto out;
- }
- if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate);
- goto out;
- }
av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
- s1->filename, param, x_off, y_off, x11grab->width, x11grab->height);
+ s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height);
- dpy = XOpenDisplay(param);
+ dpy = XOpenDisplay(dpyname);
+ av_freep(&dpyname);
if(!dpy) {
av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
ret = AVERROR(EIO);
@@ -221,7 +219,7 @@ x11grab_read_header(AVFormatContext *s1)
}
use_shm = XShmQueryExtension(dpy);
- av_log(s1, AV_LOG_INFO, "shared memory extension %s found\n", use_shm ? "" : "not");
+ av_log(s1, AV_LOG_INFO, "shared memory extension%s found\n", use_shm ? "" : " not");
if(use_shm) {
int scr = XDefaultScreen(dpy);
@@ -296,7 +294,7 @@ x11grab_read_header(AVFormatContext *s1)
}
break;
case 32:
- input_pixfmt = AV_PIX_FMT_RGB32;
+ input_pixfmt = AV_PIX_FMT_0RGB32;
break;
default:
av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
@@ -306,7 +304,7 @@ x11grab_read_header(AVFormatContext *s1)
x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8;
x11grab->dpy = dpy;
- x11grab->time_base = (AVRational){framerate.den, framerate.num};
+ x11grab->time_base = av_inv_q(x11grab->framerate);
x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base);
x11grab->x_off = x_off;
x11grab->y_off = y_off;
@@ -322,7 +320,7 @@ x11grab_read_header(AVFormatContext *s1)
st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8;
out:
- av_free(param);
+ av_free(dpyname);
return ret;
}
@@ -351,11 +349,19 @@ paint_mouse_pointer(XImage *image, struct x11grab *s)
* Anyone who performs further investigation of the xlib API likely risks
* permanent brain damage. */
uint8_t *pix = image->data;
+ Window w;
+ XSetWindowAttributes attr;
/* Code doesn't currently support 16-bit or PAL8 */
if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32)
return;
+ if(!s->c)
+ s->c = XCreateFontCursor(dpy, XC_left_ptr);
+ w = DefaultRootWindow(dpy);
+ attr.cursor = s->c;
+ XChangeWindowAttributes(dpy, w, CWCursor, &attr);
+
xcim = XFixesGetCursorImage(dpy);
x = xcim->x - xcim->xhot;
@@ -585,13 +591,16 @@ x11grab_read_close(AVFormatContext *s1)
#define OFFSET(x) offsetof(struct x11grab, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
- { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC },
- { "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
- { "draw_mouse", "Draw the mouse pointer.", OFFSET(draw_mouse), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, DEC },
- { "follow_mouse", "Move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region.",
- OFFSET(follow_mouse), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT_MAX, DEC, "follow_mouse" },
- { "centered", "Keep the mouse pointer at the center of grabbing region when following.", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, INT_MIN, INT_MAX, DEC, "follow_mouse" },
- { "show_region", "Show the grabbing region.", OFFSET(show_region), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, DEC },
+ { "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
+
+ { "follow_mouse", "move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region",
+ OFFSET(follow_mouse), AV_OPT_TYPE_INT, {.i64 = 0}, -1, INT_MAX, DEC, "follow_mouse" },
+ { "centered", "keep the mouse pointer at the center of grabbing region when following",
+ 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "follow_mouse" },
+
+ { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, DEC },
+ { "show_region", "show the grabbing region", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
+ { "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ NULL },
};
diff --git a/libavdevice/xv.c b/libavdevice/xv.c
new file mode 100644
index 0000000000..670c4dee8d
--- /dev/null
+++ b/libavdevice/xv.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2013 Jeff Moguillansky
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * XVideo output device
+ *
+ * TODO:
+ * - add support to more formats
+ * - add support to window id specification
+ */
+
+#include <X11/Xlib.h>
+#include <X11/extensions/Xv.h>
+#include <X11/extensions/Xvlib.h>
+#include <X11/extensions/XShm.h>
+#include <sys/shm.h>
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class;
+ GC gc;
+
+ Window window;
+ char *window_title;
+ int window_width, window_height;
+ int window_x, window_y;
+
+ Display* display;
+ char *display_name;
+
+ XvImage* yuv_image;
+ int image_width, image_height;
+ XShmSegmentInfo yuv_shminfo;
+ int xv_port;
+} XVContext;
+
+static int xv_write_header(AVFormatContext *s)
+{
+ XVContext *xv = s->priv_data;
+ unsigned int num_adaptors;
+ XvAdaptorInfo *ai;
+ XvImageFormatValues *fv;
+ int num_formats = 0, j;
+ AVCodecContext *encctx = s->streams[0]->codec;
+
+ if ( s->nb_streams > 1
+ || encctx->codec_type != AVMEDIA_TYPE_VIDEO
+ || encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
+ av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
+ return AVERROR(EINVAL);
+ }
+
+ xv->display = XOpenDisplay(xv->display_name);
+ if (!xv->display) {
+ av_log(s, AV_LOG_ERROR, "Could not open the X11 display '%s'\n", xv->display_name);
+ return AVERROR(EINVAL);
+ }
+
+ xv->image_width = encctx->width;
+ xv->image_height = encctx->height;
+ if (!xv->window_width && !xv->window_height) {
+ xv->window_width = encctx->width;
+ xv->window_height = encctx->height;
+ }
+ xv->window = XCreateSimpleWindow(xv->display, DefaultRootWindow(xv->display),
+ xv->window_x, xv->window_y,
+ xv->window_width, xv->window_height,
+ 0, 0, 0);
+ if (!xv->window_title) {
+ if (!(xv->window_title = av_strdup(s->filename)))
+ return AVERROR(ENOMEM);
+ }
+ XStoreName(xv->display, xv->window, xv->window_title);
+ XMapWindow(xv->display, xv->window);
+
+ if (XvQueryAdaptors(xv->display, DefaultRootWindow(xv->display), &num_adaptors, &ai) != Success)
+ return AVERROR_EXTERNAL;
+ xv->xv_port = ai[0].base_id;
+
+ if (encctx->pix_fmt != AV_PIX_FMT_YUV420P) {
+ av_log(s, AV_LOG_ERROR,
+ "Unsupported pixel format '%s', only yuv420p is currently supported\n",
+ av_get_pix_fmt_name(encctx->pix_fmt));
+ return AVERROR_PATCHWELCOME;
+ }
+
+ fv = XvListImageFormats(xv->display, xv->xv_port, &num_formats);
+ if (!fv)
+ return AVERROR_EXTERNAL;
+ for (j = 0; j < num_formats; j++) {
+ if (fv[j].id == MKTAG('I','4','2','0')) {
+ break;
+ }
+ }
+ XFree(fv);
+
+ if (j >= num_formats) {
+ av_log(s, AV_LOG_ERROR,
+ "Device does not support pixel format yuv420p, aborting\n");
+ return AVERROR(EINVAL);
+ }
+
+ xv->gc = XCreateGC(xv->display, xv->window, 0, 0);
+ xv->image_width = encctx->width;
+ xv->image_height = encctx->height;
+ xv->yuv_image = XvShmCreateImage(xv->display, xv->xv_port,
+ MKTAG('I','4','2','0'), 0,
+ xv->image_width, xv->image_height, &xv->yuv_shminfo);
+ xv->yuv_shminfo.shmid = shmget(IPC_PRIVATE, xv->yuv_image->data_size,
+ IPC_CREAT | 0777);
+ xv->yuv_shminfo.shmaddr = (char *)shmat(xv->yuv_shminfo.shmid, 0, 0);
+ xv->yuv_image->data = xv->yuv_shminfo.shmaddr;
+ xv->yuv_shminfo.readOnly = False;
+
+ XShmAttach(xv->display, &xv->yuv_shminfo);
+ XSync(xv->display, False);
+ shmctl(xv->yuv_shminfo.shmid, IPC_RMID, 0);
+
+ return 0;
+}
+
+static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ XVContext *xv = s->priv_data;
+ XvImage *img = xv->yuv_image;
+ XWindowAttributes window_attrs;
+ AVPicture pict;
+ AVCodecContext *ctx = s->streams[0]->codec;
+ int y, h;
+
+ h = img->height / 2;
+
+ avpicture_fill(&pict, pkt->data, ctx->pix_fmt, ctx->width, ctx->height);
+ for (y = 0; y < img->height; y++) {
+ memcpy(&img->data[img->offsets[0] + (y * img->pitches[0])],
+ &pict.data[0][y * pict.linesize[0]], img->pitches[0]);
+ }
+
+ for (y = 0; y < h; ++y) {
+ memcpy(&img->data[img->offsets[1] + (y * img->pitches[1])],
+ &pict.data[1][y * pict.linesize[1]], img->pitches[1]);
+ memcpy(&img->data[img->offsets[2] + (y * img->pitches[2])],
+ &pict.data[2][y * pict.linesize[2]], img->pitches[2]);
+ }
+
+ XGetWindowAttributes(xv->display, xv->window, &window_attrs);
+ if (XvShmPutImage(xv->display, xv->xv_port, xv->window, xv->gc,
+ xv->yuv_image, 0, 0, xv->image_width, xv->image_height, 0, 0,
+ window_attrs.width, window_attrs.height, True) != Success) {
+ av_log(s, AV_LOG_ERROR, "Could not copy image to XV shared memory buffer\n");
+ return AVERROR_EXTERNAL;
+ }
+ return 0;
+}
+
+static int xv_write_trailer(AVFormatContext *s)
+{
+ XVContext *xv = s->priv_data;
+
+ XShmDetach(xv->display, &xv->yuv_shminfo);
+ shmdt(xv->yuv_image->data);
+ XFree(xv->yuv_image);
+ XCloseDisplay(xv->display);
+ return 0;
+}
+
+#define OFFSET(x) offsetof(XVContext, x)
+static const AVOption options[] = {
+ { "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { NULL }
+
+};
+
+static const AVClass xv_class = {
+ .class_name = "xvideo outdev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVOutputFormat ff_xv_muxer = {
+ .name = "xv",
+ .long_name = NULL_IF_CONFIG_SMALL("XV (XVideo) output device"),
+ .priv_data_size = sizeof(XVContext),
+ .audio_codec = AV_CODEC_ID_NONE,
+ .video_codec = AV_CODEC_ID_RAWVIDEO,
+ .write_header = xv_write_header,
+ .write_packet = xv_write_packet,
+ .write_trailer = xv_write_trailer,
+ .flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
+ .priv_class = &xv_class,
+};