summaryrefslogtreecommitdiffstats
path: root/src/uscxml/plugins/invoker/ffmpeg/FFMPEGInvoker.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/uscxml/plugins/invoker/ffmpeg/FFMPEGInvoker.cpp')
-rw-r--r--src/uscxml/plugins/invoker/ffmpeg/FFMPEGInvoker.cpp706
1 files changed, 304 insertions, 402 deletions
diff --git a/src/uscxml/plugins/invoker/ffmpeg/FFMPEGInvoker.cpp b/src/uscxml/plugins/invoker/ffmpeg/FFMPEGInvoker.cpp
index ccf65ce..e637f8b 100644
--- a/src/uscxml/plugins/invoker/ffmpeg/FFMPEGInvoker.cpp
+++ b/src/uscxml/plugins/invoker/ffmpeg/FFMPEGInvoker.cpp
@@ -1,15 +1,16 @@
#include "FFMPEGInvoker.h"
#include <glog/logging.h>
+#include <libavutil/imgutils.h>
+#include <libavcodec/avcodec.h>
+#include <fstream>
#ifdef BUILD_AS_PLUGINS
#include <Pluma/Connector.hpp>
#endif
-#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
-#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
-#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
+#define BMP_FORMAT PIX_FMT_BGR24
namespace uscxml {
@@ -29,190 +30,363 @@ FFMPEGInvoker::~FFMPEGInvoker() {
boost::shared_ptr<InvokerImpl> FFMPEGInvoker::create(InterpreterImpl* interpreter) {
boost::shared_ptr<FFMPEGInvoker> invoker = boost::shared_ptr<FFMPEGInvoker>(new FFMPEGInvoker());
- invoker->_interpreter = interpreter;
+ // Register all formats and codecs - this ought to be done just once
+ av_register_all();
return invoker;
}
Data FFMPEGInvoker::getDataModelVariables() {
Data data;
+
+ AVCodec* codec = NULL;
+ while((codec = av_codec_next(codec))) {
+ AVCodec* codecInst = avcodec_find_encoder(codec->id);
+ if (!codecInst)
+ continue;
+
+ switch (codec->type) {
+ case AVMEDIA_TYPE_VIDEO: {
+ Data codecData;
+ codecData.compound["name"] = Data(codec->name, Data::VERBATIM);
+ codecData.compound["longName"] = Data(codec->long_name, Data::VERBATIM);
+ data.compound["video"].compound[codec->name] = codecData;
+ break;
+ }
+ case AVMEDIA_TYPE_AUDIO: {
+ Data codecData;
+ codecData.compound["name"] = Data(codec->name, Data::VERBATIM);
+ codecData.compound["longName"] = Data(codec->long_name, Data::VERBATIM);
+ data.compound["audio"].compound[codec->name] = codecData;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
return data;
}
+void FFMPEGInvoker::invoke(const InvokeRequest& req) {
+ int nrThreads = 1;
+ Event::getParam(req.params, "threads", nrThreads);
+
+ _isRunning = true;
+ for (int i = 0; i < nrThreads; i++) {
+ _threads.insert(new tthread::thread(FFMPEGInvoker::run, this));
+ }
+}
+
void FFMPEGInvoker::send(const SendRequest& req) {
- if (boost::iequals(req.name, "add")) {
+ SendRequest reqCopy = req;
+
+ if (boost::iequals(req.name, "render.start")) {
+ // create a new encoding context
+ int ret;
+ EncodingContext* ctx = new EncodingContext();
+ tthread::lock_guard<tthread::recursive_mutex> lock(ctx->mutex);
+
+ std::string context;
+ Event::getParam(req.params, "context", context);
+
+ ctx->extension = "mpeg";
+ Event::getParam(req.params, "format", ctx->extension);
+
+ Event::getParam(req.params, "width", ctx->width);
+ Event::getParam(req.params, "height", ctx->height);
+
+ if (!ctx->width || !ctx->height)
+ return;
+
+ ctx->filename = URL::getTmpFilename();
+
+ /* allocate the output media context */
+ avformat_alloc_output_context2(&ctx->formatCtx, NULL, ctx->extension.c_str(), ctx->filename.c_str());
+ if (!ctx->formatCtx) {
+ printf("Could not deduce output format from file extension: using MPEG.\n");
+ avformat_alloc_output_context2(&ctx->formatCtx, NULL, "mpeg", ctx->filename.c_str());
+ }
+ if (!ctx->formatCtx) {
+ return;
+ }
+ ctx->format = ctx->formatCtx->oformat;
+
+ /* Add the audio and video streams using the default format codecs
+ * and initialize the codecs. */
+ ctx->videoStream = NULL;
+
+ if (ctx->format->video_codec != AV_CODEC_ID_NONE) {
+ ctx->videoStream = addStream(ctx, ctx->formatCtx, &ctx->videoCodec, ctx->format->video_codec);
+ }
+
+ /* Now that all the parameters are set, we can open the audio and
+ * video codecs and allocate the necessary encode buffers. */
+ if (ctx->videoStream)
+ openVideo(ctx, ctx->formatCtx, ctx->videoCodec, ctx->videoStream);
+
+ /* open the output file, if needed */
+ if (!(ctx->format->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&ctx->formatCtx->pb, ctx->filename.c_str(), AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ // fprintf(stderr, "Could not open '%s': %s\n", ctx->filename.c_str(),
+ // av_err2str(ret));
+ return;
+ }
+ }
+
+ /* Write the stream header, if any. */
+ ret = avformat_write_header(ctx->formatCtx, NULL);
+ if (ret < 0) {
+ // fprintf(stderr, "Error occurred when opening output file: %s\n",
+ // av_err2str(ret));
+ return;
+ }
+
+ if (ctx->frame)
+ ctx->frame->pts = 0;
+
+ _encoders[context] = ctx;
+ } else if(boost::iequals(req.name, "render.frame")) {
+ _workQueue.push(req);
+ } else if(boost::iequals(req.name, "render.end")) {
+ _workQueue.push(req);
+ }
+}
- } else if(boost::iequals(req.name, "render")) {
+void FFMPEGInvoker::cancel(const std::string sendId) {
+}
+void FFMPEGInvoker::run(void* instance) {
+ FFMPEGInvoker* INSTANCE = (FFMPEGInvoker*)instance;
+ while(true) {
+ SendRequest req = INSTANCE->_workQueue.pop();
+ if (INSTANCE->_isRunning) {
+ INSTANCE->process(req);
+ } else {
+ return;
+ }
}
}
-void FFMPEGInvoker::cancel(const std::string sendId) {
+void FFMPEGInvoker::finish(EncodingContext* ctx, const SendRequest& req) {
+ av_write_trailer(ctx->formatCtx);
+
+ /* Close each codec. */
+ if (ctx->videoStream)
+ closeVideo(ctx, ctx->formatCtx, ctx->videoStream);
+
+ if (!(ctx->formatCtx->oformat->flags & AVFMT_NOFILE))
+ /* Close the output file. */
+ avio_close(ctx->formatCtx->pb);
+
+ /* free the stream */
+ avformat_free_context(ctx->formatCtx);
+
+ // read file
+ std::ifstream movieFile(ctx->filename.c_str());
+ movieFile.seekg(0, std::ios::end);
+ size_t length = movieFile.tellg();
+ movieFile.seekg(0, std::ios::beg);
+
+ char* movieBuffer = (char*)malloc(length);
+ movieFile.read(movieBuffer, length);
+
+ // move to desktop for checking
+// int err = rename(ctx->filename.c_str(), "/Users/sradomski/Desktop/foo.mpg");
+// if (err) {
+// printf("%s", strerror(errno));
+// }
+
+ std::string context;
+ Event::getParam(req.params, "context", context);
+
+ Event event;
+ event.name = "render.done";
+ event.data.compound["context"] = context;
+ event.data.compound["movie"] = Data(movieBuffer, length, true);
+ event.data.compound["mimetype"] = Data("video/mpeg", Data::VERBATIM);
+ event.data.compound["filename"] = Data(std::string("movie.") + ctx->extension, Data::VERBATIM);
+
+ returnEvent(event);
}
-static AVFrame *frame;
-static AVPicture src_picture, dst_picture;
-static int frame_count;
-static int sws_flags = SWS_BICUBIC;
+void FFMPEGInvoker::process(const SendRequest& req) {
-static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
- enum AVCodecID codec_id) {
+ std::string context;
+ Event::getParam(req.params, "context", context);
+ if (_encoders.find(context) == _encoders.end()) {
+ return;
+ }
+
+ EncodingContext* ctx = _encoders[context];
+ tthread::lock_guard<tthread::recursive_mutex> lock(ctx->mutex);
+
+ // finish encoding and return
+ if(boost::iequals(req.name, "render.end")) {
+ finish(ctx, req);
+ delete _encoders[context];
+ _encoders.erase(context);
+ }
+
+ Data image;
+ Event::getParam(req.params, "frame", image);
+ if (!image) {
+ return;
+ }
+
+ std::string format = "bmp";
+ Event::getParam(req.params, "format", format);
+
+ writeVideoFrame(ctx, ctx->formatCtx, ctx->videoStream, image.binary);
+ ctx->frame->pts += av_rescale_q(1, ctx->videoStream->codec->time_base, ctx->videoStream->time_base);
+
+}
+
+AVStream* FFMPEGInvoker::addStream(EncodingContext* ctx, AVFormatContext *oc, AVCodec **codec,
+ enum AVCodecID codec_id) {
AVCodecContext *c;
AVStream *st;
-
+
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
- avcodec_get_name(codec_id));
- exit(1);
+ avcodec_get_name(codec_id));
+ return NULL;
}
-
+
st = avformat_new_stream(oc, *codec);
+ ctx->videoPixFmt = (*codec)->pix_fmts[0];
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
- exit(1);
+ return NULL;
}
st->id = oc->nb_streams-1;
c = st->codec;
-
+
switch ((*codec)->type) {
- case AVMEDIA_TYPE_AUDIO:
- c->sample_fmt = AV_SAMPLE_FMT_FLTP;
- c->bit_rate = 64000;
- c->sample_rate = 44100;
- c->channels = 2;
- break;
-
- case AVMEDIA_TYPE_VIDEO:
- c->codec_id = codec_id;
-
- c->bit_rate = 400000;
- /* Resolution must be a multiple of two. */
- c->width = 352;
- c->height = 288;
- /* timebase: This is the fundamental unit of time (in seconds) in terms
- * of which frame timestamps are represented. For fixed-fps content,
- * timebase should be 1/framerate and timestamp increments should be
- * identical to 1. */
- c->time_base.den = STREAM_FRAME_RATE;
- c->time_base.num = 1;
- c->gop_size = 12; /* emit one intra frame every twelve frames at most */
- c->pix_fmt = STREAM_PIX_FMT;
- if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
- /* just for testing, we also add B frames */
- c->max_b_frames = 2;
- }
- if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
- /* Needed to avoid using macroblocks in which some coeffs overflow.
- * This does not happen with normal video, it just happens here as
- * the motion of the chroma plane does not match the luma plane. */
- c->mb_decision = 2;
- }
- break;
-
- default:
- break;
+ case AVMEDIA_TYPE_AUDIO:
+ c->sample_fmt = AV_SAMPLE_FMT_FLTP;
+ c->bit_rate = 64000;
+ c->sample_rate = 44100;
+ c->channels = 2;
+ break;
+
+ case AVMEDIA_TYPE_VIDEO:
+ c->codec_id = codec_id;
+
+ c->bit_rate = 800000;
+ /* Resolution must be a multiple of two. */
+ c->width = ctx->width;
+ c->height = ctx->height;
+ /* timebase: This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identical to 1. */
+ c->time_base.den = STREAM_FRAME_RATE;
+ c->time_base.num = 1;
+ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+ c->pix_fmt = ctx->videoPixFmt;
+ if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+ /* just for testing, we also add B frames */
+ c->max_b_frames = 2;
+ }
+ if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
+ /* Needed to avoid using macroblocks in which some coeffs overflow.
+ * This does not happen with normal video, it just happens here as
+ * the motion of the chroma plane does not match the luma plane. */
+ c->mb_decision = 2;
+ }
+ break;
+
+ default:
+ break;
}
-
+
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
+
return st;
}
-static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) {
+void FFMPEGInvoker::openVideo(EncodingContext* ctx, AVFormatContext *oc, AVCodec *codec, AVStream *st) {
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
- fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
- exit(1);
+ // fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
+ return;
}
/* allocate and init a re-usable frame */
- frame = avcodec_alloc_frame();
- if (!frame) {
+ ctx->frame = avcodec_alloc_frame();
+ if (!ctx->frame) {
fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
+ return;
}
/* Allocate the encoded raw picture. */
- ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
+ ret = avpicture_alloc(&ctx->dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
- fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
- exit(1);
+// fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
+ return;
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
+ if (c->pix_fmt != BMP_FORMAT) {
+ ret = avpicture_alloc(&ctx->src_picture, BMP_FORMAT, c->width, c->height);
if (ret < 0) {
- fprintf(stderr, "Could not allocate temporary picture: %s\n",
- av_err2str(ret));
- exit(1);
+ // fprintf(stderr, "Could not allocate temporary picture: %s\n",
+ // av_err2str(ret));
+ return;
}
}
/* copy data and linesize picture pointers to frame */
- *((AVPicture *)frame) = dst_picture;
+ *((AVPicture *)ctx->frame) = ctx->dst_picture;
}
-
-/* Prepare a dummy image. */
-static void fill_yuv_image(AVPicture *pict, int frame_index,
- int width, int height) {
- int x, y, i;
-
- i = frame_index;
-
- /* Y */
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
-
- /* Cb and Cr */
- for (y = 0; y < height / 2; y++) {
- for (x = 0; x < width / 2; x++) {
- pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
- pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
- }
- }
-}
-
-static void write_video_frame(AVFormatContext *oc, AVStream *st) {
+
+void FFMPEGInvoker::writeVideoFrame(EncodingContext* ctx, AVFormatContext *oc, AVStream *st, boost::shared_ptr<Blob> image) {
int ret;
- static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;
- if (frame_count >= STREAM_NB_FRAMES) {
- /* No more frames to compress. The codec has a latency of a few
- * frames if using B-frames, so we get the last frames by
- * passing the same picture again. */
- } else {
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- /* as we only generate a YUV420P picture, we must convert it
- * to the codec pixel format if needed */
- if (!sws_ctx) {
- sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
- c->width, c->height, c->pix_fmt,
- sws_flags, NULL, NULL, NULL);
- if (!sws_ctx) {
- fprintf(stderr,
- "Could not initialize the conversion context\n");
- exit(1);
- }
+ if (c->pix_fmt != BMP_FORMAT) {
+ /* as we only generate a YUV420P picture, we must convert it
+ * to the codec pixel format if needed */
+ if (!ctx->sws_ctx) {
+ ctx->sws_ctx = sws_getContext(c->width, c->height, BMP_FORMAT,
+ c->width, c->height, c->pix_fmt,
+ ctx->sws_flags, NULL, NULL, NULL);
+ if (!ctx->sws_ctx) {
+ fprintf(stderr,
+ "Could not initialize the conversion context\n");
+ return;
}
- fill_yuv_image(&src_picture, frame_count, c->width, c->height);
- sws_scale(sws_ctx,
- (const uint8_t * const *)src_picture.data, src_picture.linesize,
- 0, c->height, dst_picture.data, dst_picture.linesize);
- } else {
- fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
}
+
+ uint32_t headerOffset = 0;
+ headerOffset += image->_data[10] << 0;
+ headerOffset += image->_data[11] << 8;
+ headerOffset += image->_data[12] << 16;
+ headerOffset += image->_data[13] << 24;
+
+// std::cout << headerOffset + (c->width * c->height) << " / " << image->_size << std::endl;
+
+ ret = avpicture_fill(&ctx->src_picture, (uint8_t*)(image->_data + headerOffset), BMP_FORMAT, c->width, c->height);
+ if (ret < 0) {
+ fprintf(stderr,
+ "Could not fill image from given bitmap\n");
+ }
+ sws_scale(ctx->sws_ctx,
+ (const uint8_t * const *)ctx->src_picture.data, ctx->src_picture.linesize,
+ 0, c->height, ctx->dst_picture.data, ctx->dst_picture.linesize);
+ } else {
+ avpicture_fill(&ctx->dst_picture, (uint8_t*)image->_data, c->pix_fmt, c->width, c->height);
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
@@ -222,7 +396,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) {
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
- pkt.data = dst_picture.data[0];
+ pkt.data = ctx->dst_picture.data[0];
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
@@ -232,10 +406,10 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) {
av_init_packet(&pkt);
/* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
+ ret = avcodec_encode_video2(c, &pkt, ctx->frame, &got_packet);
if (ret < 0) {
- fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
- exit(1);
+ // fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
+ return;
}
/* If size is zero, it means the image was buffered. */
@@ -243,297 +417,25 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) {
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
+// ret = av_write_frame(oc, &pkt);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
}
}
if (ret != 0) {
- fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- frame_count++;
-}
-
-static void close_video(AVFormatContext *oc, AVStream *st) {
- avcodec_close(st->codec);
- av_free(src_picture.data[0]);
- av_free(dst_picture.data[0]);
- av_free(frame);
-}
-
-static float t, tincr, tincr2;
-
-static uint8_t **src_samples_data;
-static int src_samples_linesize;
-static int src_nb_samples;
-
-static int max_dst_nb_samples;
-uint8_t **dst_samples_data;
-int dst_samples_linesize;
-int dst_samples_size;
-
-struct SwrContext *swr_ctx = NULL;
-
-static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) {
- AVCodecContext *c;
- int ret;
-
- c = st->codec;
-
- /* open it */
- ret = avcodec_open2(c, codec, NULL);
- if (ret < 0) {
- fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
- exit(1);
- }
-
- /* init signal generator */
- t = 0;
- tincr = 2 * M_PI * 110.0 / c->sample_rate;
- /* increment frequency by 110 Hz per second */
- tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
-
- src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
- 10000 : c->frame_size;
-
- ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
- src_nb_samples, c->sample_fmt, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate source samples\n");
- exit(1);
- }
-
- /* create resampler context */
- if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
- swr_ctx = swr_alloc();
- if (!swr_ctx) {
- fprintf(stderr, "Could not allocate resampler context\n");
- exit(1);
- }
-
- /* set options */
- av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
- av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
- av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
- av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
- av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
-
- /* initialize the resampling context */
- if ((ret = swr_init(swr_ctx)) < 0) {
- fprintf(stderr, "Failed to initialize the resampling context\n");
- exit(1);
- }
- }
-
- /* compute the number of converted samples: buffering is avoided
- * ensuring that the output buffer will contain at least all the
- * converted input samples */
- max_dst_nb_samples = src_nb_samples;
- ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
- max_dst_nb_samples, c->sample_fmt, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate destination samples\n");
- exit(1);
- }
- dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
- c->sample_fmt, 0);
-}
-
-/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
- * 'nb_channels' channels. */
-static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) {
- int j, i, v;
- int16_t *q;
-
- q = samples;
- for (j = 0; j < frame_size; j++) {
- v = (int)(sin(t) * 10000);
- for (i = 0; i < nb_channels; i++)
- *q++ = v;
- t += tincr;
- tincr += tincr2;
- }
-}
-
-static void write_audio_frame(AVFormatContext *oc, AVStream *st) {
- AVCodecContext *c;
- AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = avcodec_alloc_frame();
- int got_packet, ret, dst_nb_samples;
-
- av_init_packet(&pkt);
- c = st->codec;
-
- get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
-
- /* convert samples from native format to destination codec format, using the resampler */
- if (swr_ctx) {
- /* compute destination number of samples */
- dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
- c->sample_rate, c->sample_rate, AV_ROUND_UP);
- if (dst_nb_samples > max_dst_nb_samples) {
- av_free(dst_samples_data[0]);
- ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
- dst_nb_samples, c->sample_fmt, 0);
- if (ret < 0)
- exit(1);
- max_dst_nb_samples = dst_nb_samples;
- dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
- c->sample_fmt, 0);
- }
-
- /* convert to destination format */
- ret = swr_convert(swr_ctx,
- dst_samples_data, dst_nb_samples,
- (const uint8_t **)src_samples_data, src_nb_samples);
- if (ret < 0) {
- fprintf(stderr, "Error while converting\n");
- exit(1);
- }
- } else {
- dst_samples_data[0] = src_samples_data[0];
- dst_nb_samples = src_nb_samples;
- }
-
- frame->nb_samples = dst_nb_samples;
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- dst_samples_data[0], dst_samples_size, 0);
-
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
- if (ret < 0) {
- fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (!got_packet)
+// fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
return;
-
- pkt.stream_index = st->index;
-
- /* Write the compressed frame to the media file. */
- ret = av_interleaved_write_frame(oc, &pkt);
- if (ret != 0) {
- fprintf(stderr, "Error while writing audio frame: %s\n",
- av_err2str(ret));
- exit(1);
}
- avcodec_free_frame(&frame);
+ ctx->frame_count++;
}
-static void close_audio(AVFormatContext *oc, AVStream *st) {
+void FFMPEGInvoker::closeVideo(EncodingContext* ctx, AVFormatContext *oc, AVStream *st) {
avcodec_close(st->codec);
- av_free(src_samples_data[0]);
- av_free(dst_samples_data[0]);
+// av_free(ctx->src_picture.data[0]);
+ av_free(ctx->dst_picture.data[0]);
+ av_free(ctx->frame);
}
-void FFMPEGInvoker::invoke(const InvokeRequest& req) {
-
-#if 0
- const char *filename;
- AVOutputFormat *fmt;
- AVFormatContext *oc;
- AVStream *audio_st, *video_st;
- AVCodec *audio_codec, *video_codec;
- double audio_time, video_time;
- int ret;
-
- filename = "foo.avi";
- // Register all formats and codecs
- av_register_all();
-
- /* allocate the output media context */
- avformat_alloc_output_context2(&oc, NULL, NULL, filename);
- if (!oc) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
- }
- if (!oc) {
- return 1;
- }
- fmt = oc->oformat;
-
- /* Add the audio and video streams using the default format codecs
- * and initialize the codecs. */
- video_st = NULL;
- audio_st = NULL;
-
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
- video_st = add_stream(oc, &video_codec, fmt->video_codec);
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
- }
-
- /* Now that all the parameters are set, we can open the audio and
- * video codecs and allocate the necessary encode buffers. */
- if (video_st)
- open_video(oc, video_codec, video_st);
- if (audio_st)
- open_audio(oc, audio_codec, audio_st);
-
- av_dump_format(oc, 0, filename, 1);
-
- /* open the output file, if needed */
- if (!(fmt->flags & AVFMT_NOFILE)) {
- ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
- if (ret < 0) {
- fprintf(stderr, "Could not open '%s': %s\n", filename,
- av_err2str(ret));
- return 1;
- }
- }
-
- /* Write the stream header, if any. */
- ret = avformat_write_header(oc, NULL);
- if (ret < 0) {
- fprintf(stderr, "Error occurred when opening output file: %s\n",
- av_err2str(ret));
- return 1;
- }
-
- if (frame)
- frame->pts = 0;
- for (;;) {
- /* Compute current audio and video time. */
- audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0;
- video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;
-
- if ((!audio_st || audio_time >= STREAM_DURATION) &&
- (!video_st || video_time >= STREAM_DURATION))
- break;
-
- /* write interleaved audio and video frames */
- if (!video_st || (video_st && audio_st && audio_time < video_time)) {
- write_audio_frame(oc, audio_st);
- } else {
- write_video_frame(oc, video_st);
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
- }
-
- /* Write the trailer, if any. The trailer must be written before you
- * close the CodecContexts open when you wrote the header; otherwise
- * av_write_trailer() may try to use memory that was freed on
- * av_codec_close(). */
- av_write_trailer(oc);
-
- /* Close each codec. */
- if (video_st)
- close_video(oc, video_st);
- if (audio_st)
- close_audio(oc, audio_st);
-
- if (!(fmt->flags & AVFMT_NOFILE))
- /* Close the output file. */
- avio_close(oc->pb);
-
- /* free the stream */
- avformat_free_context(oc);
-
- return 0;
-#endif
-}
} \ No newline at end of file