static AVFrame *current_frame = 0;
static struct SwsContext *img_convert_ctx = 0;
-static uint8_t *video_buffer = 0;
-static int video_buffersize = 0;
-
static uint8_t *audio_input_buffer = 0;
static uint8_t *audio_deinterleave_buffer = 0;
static int audio_input_samples = 0;
/* Write a frame to the output file */
static int write_video_frame(RenderData *rd, int cfra, AVFrame *frame, ReportList *reports)
{
- int outsize = 0;
+ int got_output;
int ret, success = 1;
AVCodecContext *c = video_stream->codec;
+ AVPacket packet = { 0 };
+
+ av_init_packet(&packet);
frame->pts = cfra;
frame->top_field_first = ((rd->mode & R_ODDFIELD) != 0);
}
- outsize = avcodec_encode_video(c, video_buffer, video_buffersize, frame);
-
- if (outsize > 0) {
- AVPacket packet;
- av_init_packet(&packet);
+ ret = avcodec_encode_video2(c, &packet, frame, &got_output);
- if (c->coded_frame->pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_stream->time_base);
+ if (ret >= 0 && got_output) {
+ if (packet.pts != AV_NOPTS_VALUE) {
+ packet.pts = av_rescale_q(packet.pts, c->time_base, video_stream->time_base);
PRINT("Video Frame PTS: %d\n", (int)packet.pts);
}
else {
PRINT("Video Frame PTS: not set\n");
}
- if (c->coded_frame->key_frame)
- packet.flags |= AV_PKT_FLAG_KEY;
+ if (packet.dts != AV_NOPTS_VALUE) {
+ packet.dts = av_rescale_q(packet.dts, c->time_base, video_stream->time_base);
+ PRINT("Video Frame DTS: %d\n", (int)packet.dts);
+ } else {
+ PRINT("Video Frame DTS: not set\n");
+ }
+
packet.stream_index = video_stream->index;
- packet.data = video_buffer;
- packet.size = outsize;
ret = av_interleaved_write_frame(outfile, &packet);
success = (ret == 0);
}
- else if (outsize < 0) {
+ else if (ret < 0) {
success = 0;
}
return NULL;
}
- if (codec_id == AV_CODEC_ID_QTRLE) {
- /* normally it should be enough to have buffer with actual image size,
- * but some codecs like QTRLE might store extra information in this buffer,
- * so it should be a way larger */
-
- /* maximum video buffer size is 6-bytes per pixel, plus DPX header size (1664)
- * (from FFmpeg sources) */
- int size = c->width * c->height;
- video_buffersize = 7 * size + 10000;
- }
- else
- video_buffersize = avpicture_get_size(c->pix_fmt, c->width, c->height);
-
- video_buffer = (uint8_t *)MEM_mallocN(video_buffersize * sizeof(uint8_t), "FFMPEG video buffer");
-
current_frame = alloc_picture(c->pix_fmt, c->width, c->height);
img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
*/
static void flush_ffmpeg(void)
{
- int outsize = 0;
int ret = 0;
AVCodecContext *c = video_stream->codec;
/* get the delayed frames */
while (1) {
- AVPacket packet;
+ int got_output;
+ AVPacket packet = { 0 };
av_init_packet(&packet);
- outsize = avcodec_encode_video(c, video_buffer, video_buffersize, NULL);
- if (outsize < 0) {
- fprintf(stderr, "Error encoding delayed frame %d\n", outsize);
+ ret = avcodec_encode_video2(c, &packet, NULL, &got_output);
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding delayed frame %d\n", ret);
break;
}
- if (outsize == 0) {
+ if (!got_output) {
break;
}
- if (c->coded_frame->pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_stream->time_base);
+ if (packet.pts != AV_NOPTS_VALUE) {
+ packet.pts = av_rescale_q(packet.pts, c->time_base, video_stream->time_base);
PRINT("Video Frame PTS: %d\n", (int) packet.pts);
}
else {
PRINT("Video Frame PTS: not set\n");
}
- if (c->coded_frame->key_frame) {
- packet.flags |= AV_PKT_FLAG_KEY;
+ if (packet.dts != AV_NOPTS_VALUE) {
+ packet.dts = av_rescale_q(packet.dts, c->time_base, video_stream->time_base);
+ PRINT("Video Frame DTS: %d\n", (int) packet.dts);
+ } else {
+ PRINT("Video Frame DTS: not set\n");
}
+
packet.stream_index = video_stream->index;
- packet.data = video_buffer;
- packet.size = outsize;
ret = av_interleaved_write_frame(outfile, &packet);
if (ret != 0) {
fprintf(stderr, "Error writing delayed frame %d\n", ret);
av_free(outfile);
outfile = 0;
}
- if (video_buffer) {
- MEM_freeN(video_buffer);
- video_buffer = 0;
- }
if (audio_input_buffer) {
av_free(audio_input_buffer);
audio_input_buffer = 0;
AVCodec *codec;
struct SwsContext *sws_ctx;
AVFrame *frame;
- uint8_t *video_buffer;
- int video_buffersize;
int cfra;
int proxy_size;
int orig_height;
avcodec_open2(rv->c, rv->codec, NULL);
- rv->video_buffersize = 2000000;
- rv->video_buffer = (uint8_t *)MEM_mallocN(
- rv->video_buffersize, "FFMPEG video buffer");
-
rv->orig_height = av_get_cropped_height_from_codec(st->codec);
if (st->codec->width != width || st->codec->height != height ||
static int add_to_proxy_output_ffmpeg(
struct proxy_output_ctx *ctx, AVFrame *frame)
{
- int outsize = 0;
+ AVPacket packet = { 0 };
+ int ret, got_output;
+
+ av_init_packet(&packet);
if (!ctx) {
return 0;
frame->pts = ctx->cfra++;
}
- outsize = avcodec_encode_video(
- ctx->c, ctx->video_buffer, ctx->video_buffersize,
- frame);
-
- if (outsize < 0) {
+ ret = avcodec_encode_video2(ctx->c, &packet, frame, &got_output);
+ if (ret < 0) {
fprintf(stderr, "Error encoding proxy frame %d for '%s'\n",
ctx->cfra - 1, ctx->of->filename);
return 0;
}
- if (outsize != 0) {
- AVPacket packet;
- av_init_packet(&packet);
-
- if (ctx->c->coded_frame->pts != AV_NOPTS_VALUE) {
- packet.pts = av_rescale_q(ctx->c->coded_frame->pts,
+ if (got_output) {
+ if (packet.pts != AV_NOPTS_VALUE) {
+ packet.pts = av_rescale_q(packet.pts,
+ ctx->c->time_base,
+ ctx->st->time_base);
+ }
+ if (packet.dts != AV_NOPTS_VALUE) {
+ packet.dts = av_rescale_q(packet.dts,
ctx->c->time_base,
ctx->st->time_base);
}
- if (ctx->c->coded_frame->key_frame)
- packet.flags |= AV_PKT_FLAG_KEY;
packet.stream_index = ctx->st->index;
- packet.data = ctx->video_buffer;
- packet.size = outsize;
if (av_interleaved_write_frame(ctx->of, &packet) != 0) {
fprintf(stderr, "Error writing proxy frame %d "
}
avformat_free_context(ctx->of);
- MEM_freeN(ctx->video_buffer);
-
if (ctx->sws_ctx) {
sws_freeContext(ctx->sws_ctx);