Make Blender compilable with FFmpeg-3.0
authorSergey Sharybin <sergey.vfx@gmail.com>
Tue, 16 Feb 2016 11:32:42 +0000 (12:32 +0100)
committerSergey Sharybin <sergey.vfx@gmail.com>
Tue, 16 Feb 2016 11:34:15 +0000 (12:34 +0100)
While it's not something we'll be using for the official release,
it's nice to support new libraries at least on "it compiles" level,
so it's not that many frustrated developers around.

Nexyon, please have a look into Audaspace changes :)

intern/audaspace/ffmpeg/AUD_FFMPEGReader.cpp
intern/audaspace/ffmpeg/AUD_FFMPEGWriter.cpp
intern/ffmpeg/ffmpeg_compat.h
source/blender/blenkernel/intern/writeffmpeg.c
source/blender/imbuf/intern/anim_movie.c
source/blender/imbuf/intern/indexer.c

index ff2c526bf499e83a1b83b35d95ea10bc3ef8ec33..e9eea195208202e20d787dc1f9c595bf8d3c6648 100644 (file)
@@ -58,9 +58,9 @@ int AUD_FFMPEGReader::decode(AVPacket& packet, AUD_Buffer& buffer)
                got_frame = 0;
 
                if(!frame)
-                       frame = avcodec_alloc_frame();
+                       frame = av_frame_alloc();
                else
-                       avcodec_get_frame_defaults(frame);
+                       av_frame_unref(frame);
 
                read_length = avcodec_decode_audio4(m_codecCtx, frame, &got_frame, &packet);
                if(read_length < 0)
index 4ee99c723aa844058fbfde2f7308e19420801898..3f95ac7a4da6c66808b7c17dddada38d7246c11b 100644 (file)
@@ -202,7 +202,7 @@ AUD_FFMPEGWriter::AUD_FFMPEGWriter(std::string filename, AUD_DeviceSpecs specs,
                        m_frame = av_frame_alloc();
                        if (!m_frame)
                                AUD_THROW(AUD_ERROR_FFMPEG, codec_error);
-                       avcodec_get_frame_defaults(m_frame);
+                       av_frame_unref(m_frame);
                        m_frame->linesize[0]    = m_input_size * samplesize;
                        m_frame->format         = m_codecCtx->sample_fmt;
                        m_frame->nb_samples     = m_input_size;
@@ -224,7 +224,9 @@ AUD_FFMPEGWriter::AUD_FFMPEGWriter(std::string filename, AUD_DeviceSpecs specs,
                                if(avio_open(&m_formatCtx->pb, filename.c_str(), AVIO_FLAG_WRITE))
                                        AUD_THROW(AUD_ERROR_FILE, file_error);
 
-                               avformat_write_header(m_formatCtx, NULL);
+                               if(avformat_write_header(m_formatCtx, NULL) < 0) {
+                                       throw;
+                               }
                        }
                        catch(AUD_Exception&)
                        {
index ac4da5b61333e1dd6af57165e177ed9405c16130..b591032cc7afb67a4cf58df919c7cee9df0f5875 100644 (file)
@@ -446,4 +446,205 @@ AVRational av_get_r_frame_rate_compat(const AVStream *stream)
 #  define FFMPEG_HAVE_DEPRECATED_FLAGS2
 #endif
 
+/* Since FFmpeg-1.1 this constant have AV_ prefix. */
+#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 13, 100)
+#  define AV_PIX_FMT_BGR32 PIX_FMT_BGR32
+#  define AV_PIX_FMT_YUV422P PIX_FMT_YUV422P
+#  define AV_PIX_FMT_BGRA PIX_FMT_BGRA
+#  define AV_PIX_FMT_ARGB PIX_FMT_ARGB
+#  define AV_PIX_FMT_RGBA PIX_FMT_RGBA
+#endif
+
+/* New API from FFmpeg-2.0 which soon became recommended one. */
+#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(52, 38, 100)
+#  define av_frame_alloc avcodec_alloc_frame
+#  define av_frame_free avcodec_free_frame
+#  define av_frame_unref avcodec_get_frame_defaults
+#endif
+
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 24, 102)
+
+/* NOTE: The code in this block are from FFmpeg 2.6.4, which is licensed by LGPL. */
+
+#define MAX_NEG_CROP 1024
+
+extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
+
+/* filter parameters: [-1 4 2 4 -1] // 8 */
+FFMPEG_INLINE
+void deinterlace_line(uint8_t *dst,
+                      const uint8_t *lum_m4, const uint8_t *lum_m3,
+                      const uint8_t *lum_m2, const uint8_t *lum_m1,
+                      const uint8_t *lum,
+                      int size)
+{
+       const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
+       int sum;
+
+       for(;size > 0;size--) {
+               sum = -lum_m4[0];
+               sum += lum_m3[0] << 2;
+               sum += lum_m2[0] << 1;
+               sum += lum_m1[0] << 2;
+               sum += -lum[0];
+               dst[0] = cm[(sum + 4) >> 3];
+               lum_m4++;
+               lum_m3++;
+               lum_m2++;
+               lum_m1++;
+               lum++;
+               dst++;
+       }
+}
+
+FFMPEG_INLINE
+void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3,
+                              uint8_t *lum_m2, uint8_t *lum_m1,
+                              uint8_t *lum, int size)
+{
+       const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
+       int sum;
+
+       for(;size > 0;size--) {
+               sum = -lum_m4[0];
+               sum += lum_m3[0] << 2;
+               sum += lum_m2[0] << 1;
+               lum_m4[0]=lum_m2[0];
+               sum += lum_m1[0] << 2;
+               sum += -lum[0];
+               lum_m2[0] = cm[(sum + 4) >> 3];
+               lum_m4++;
+               lum_m3++;
+               lum_m2++;
+               lum_m1++;
+               lum++;
+       }
+}
+
+/* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
+   top field is copied as is, but the bottom field is deinterlaced
+   against the top field. */
+FFMPEG_INLINE
+void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
+                              const uint8_t *src1, int src_wrap,
+                              int width, int height)
+{
+       const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
+       int y;
+
+       src_m2 = src1;
+       src_m1 = src1;
+       src_0=&src_m1[src_wrap];
+       src_p1=&src_0[src_wrap];
+       src_p2=&src_p1[src_wrap];
+       for(y=0;y<(height-2);y+=2) {
+               memcpy(dst,src_m1,width);
+               dst += dst_wrap;
+               deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
+               src_m2 = src_0;
+               src_m1 = src_p1;
+               src_0 = src_p2;
+               src_p1 += 2*src_wrap;
+               src_p2 += 2*src_wrap;
+               dst += dst_wrap;
+       }
+       memcpy(dst,src_m1,width);
+       dst += dst_wrap;
+       /* do last line */
+       deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
+}
+
+FFMPEG_INLINE
+int deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
+                                     int width, int height)
+{
+       uint8_t *src_m1, *src_0, *src_p1, *src_p2;
+       int y;
+       uint8_t *buf = (uint8_t *)av_malloc(width);
+       if (!buf)
+               return AVERROR(ENOMEM);
+
+       src_m1 = src1;
+       memcpy(buf,src_m1,width);
+       src_0=&src_m1[src_wrap];
+       src_p1=&src_0[src_wrap];
+       src_p2=&src_p1[src_wrap];
+       for(y=0;y<(height-2);y+=2) {
+               deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
+               src_m1 = src_p1;
+               src_0 = src_p2;
+               src_p1 += 2*src_wrap;
+               src_p2 += 2*src_wrap;
+       }
+       /* do last line */
+       deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
+       av_free(buf);
+       return 0;
+}
+
+#ifdef __GNUC__
+#  pragma GCC diagnostic push
+#  pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+FFMPEG_INLINE
+int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
+                          enum AVPixelFormat pix_fmt, int width, int height)
+{
+       int i, ret;
+
+       if (pix_fmt != AV_PIX_FMT_YUV420P &&
+           pix_fmt != AV_PIX_FMT_YUVJ420P &&
+           pix_fmt != AV_PIX_FMT_YUV422P &&
+           pix_fmt != AV_PIX_FMT_YUVJ422P &&
+           pix_fmt != AV_PIX_FMT_YUV444P &&
+           pix_fmt != AV_PIX_FMT_YUV411P &&
+           pix_fmt != AV_PIX_FMT_GRAY8)
+               return -1;
+       if ((width & 3) != 0 || (height & 3) != 0)
+               return -1;
+
+       for(i=0;i<3;i++) {
+               if (i == 1) {
+                       switch(pix_fmt) {
+                       case AV_PIX_FMT_YUVJ420P:
+                       case AV_PIX_FMT_YUV420P:
+                               width >>= 1;
+                               height >>= 1;
+                               break;
+                       case AV_PIX_FMT_YUV422P:
+                       case AV_PIX_FMT_YUVJ422P:
+                               width >>= 1;
+                               break;
+                       case AV_PIX_FMT_YUV411P:
+                               width >>= 2;
+                               break;
+                       default:
+                               break;
+                       }
+                       if (pix_fmt == AV_PIX_FMT_GRAY8) {
+                               break;
+                       }
+               }
+               if (src == dst) {
+                       ret = deinterlace_bottom_field_inplace(dst->data[i],
+                                                              dst->linesize[i],
+                                                              width, height);
+                       if (ret < 0)
+                               return ret;
+               } else {
+                       deinterlace_bottom_field(dst->data[i],dst->linesize[i],
+                                                src->data[i], src->linesize[i],
+                                                width, height);
+               }
+       }
+       return 0;
+}
+
+#ifdef __GNUC__
+#  pragma GCC diagnostic pop
+#endif
+
+#endif
+
 #endif
index 35fd94d0747248b1471ee8ffc572ce53ebdd2b3d..edeccf472c833c864483eb8f17331d8c74e6a1b0 100644 (file)
@@ -138,8 +138,8 @@ static int write_audio_frame(FFMpegContext *context)
        context->audio_time += (double) context->audio_input_samples / (double) c->sample_rate;
 
 #ifdef FFMPEG_HAVE_ENCODE_AUDIO2
-       frame = avcodec_alloc_frame();
-       avcodec_get_frame_defaults(frame);
+       frame = av_frame_alloc();
+       av_frame_unref(frame);
        frame->pts = context->audio_time / av_q2d(c->time_base);
        frame->nb_samples = context->audio_input_samples;
        frame->format = c->sample_fmt;
@@ -172,7 +172,7 @@ static int write_audio_frame(FFMpegContext *context)
        }
 
        if (!got_output) {
-               avcodec_free_frame(&frame);
+               av_frame_free(&frame);
                return 0;
        }
 #else
@@ -202,7 +202,7 @@ static int write_audio_frame(FFMpegContext *context)
                if (av_interleaved_write_frame(context->outfile, &pkt) != 0) {
                        fprintf(stderr, "Error writing audio packet!\n");
                        if (frame)
-                               avcodec_free_frame(&frame);
+                               av_frame_free(&frame);
                        return -1;
                }
 
@@ -210,7 +210,7 @@ static int write_audio_frame(FFMpegContext *context)
        }
 
        if (frame)
-               avcodec_free_frame(&frame);
+               av_frame_free(&frame);
 
        return 0;
 }
@@ -224,7 +224,7 @@ static AVFrame *alloc_picture(int pix_fmt, int width, int height)
        int size;
        
        /* allocate space for the struct */
-       f = avcodec_alloc_frame();
+       f = av_frame_alloc();
        if (!f) return NULL;
        size = avpicture_get_size(pix_fmt, width, height);
        /* allocate the actual picture buffer */
@@ -363,8 +363,8 @@ static AVFrame *generate_video_frame(FFMpegContext *context, uint8_t *pixels, Re
        int height = c->height;
        AVFrame *rgb_frame;
 
-       if (c->pix_fmt != PIX_FMT_BGR32) {
-               rgb_frame = alloc_picture(PIX_FMT_BGR32, width, height);
+       if (c->pix_fmt != AV_PIX_FMT_BGR32) {
+               rgb_frame = alloc_picture(AV_PIX_FMT_BGR32, width, height);
                if (!rgb_frame) {
                        BKE_report(reports, RPT_ERROR, "Could not allocate temporary frame");
                        return NULL;
@@ -414,14 +414,14 @@ static AVFrame *generate_video_frame(FFMpegContext *context, uint8_t *pixels, Re
                }
        }
 
-       if (c->pix_fmt != PIX_FMT_BGR32) {
+       if (c->pix_fmt != AV_PIX_FMT_BGR32) {
                sws_scale(context->img_convert_ctx, (const uint8_t *const *) rgb_frame->data,
                          rgb_frame->linesize, 0, c->height,
                          context->current_frame->data, context->current_frame->linesize);
                delete_picture(rgb_frame);
        }
 
-       context->current_frame->format = PIX_FMT_BGR32;
+       context->current_frame->format = AV_PIX_FMT_BGR32;
        context->current_frame->width = width;
        context->current_frame->height = height;
 
@@ -586,12 +586,12 @@ static AVStream *alloc_video_stream(FFMpegContext *context, RenderData *rd, int
        }
        else {
                /* makes HuffYUV happy ... */
-               c->pix_fmt = PIX_FMT_YUV422P;
+               c->pix_fmt = AV_PIX_FMT_YUV422P;
        }
 
        if (context->ffmpeg_type == FFMPEG_XVID) {
                /* arghhhh ... */
-               c->pix_fmt = PIX_FMT_YUV420P;
+               c->pix_fmt = AV_PIX_FMT_YUV420P;
                c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
        }
 
@@ -604,26 +604,26 @@ static AVStream *alloc_video_stream(FFMpegContext *context, RenderData *rd, int
        /* Keep lossless encodes in the RGB domain. */
        if (codec_id == AV_CODEC_ID_HUFFYUV) {
                if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
-                       c->pix_fmt = PIX_FMT_BGRA;
+                       c->pix_fmt = AV_PIX_FMT_BGRA;
                }
                else {
-                       c->pix_fmt = PIX_FMT_RGB32;
+                       c->pix_fmt = AV_PIX_FMT_RGB32;
                }
        }
 
        if (codec_id == AV_CODEC_ID_FFV1) {
-               c->pix_fmt = PIX_FMT_RGB32;
+               c->pix_fmt = AV_PIX_FMT_RGB32;
        }
 
        if (codec_id == AV_CODEC_ID_QTRLE) {
                if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
-                       c->pix_fmt = PIX_FMT_ARGB;
+                       c->pix_fmt = AV_PIX_FMT_ARGB;
                }
        }
 
        if (codec_id == AV_CODEC_ID_PNG) {
                if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
-                       c->pix_fmt = PIX_FMT_RGBA;
+                       c->pix_fmt = AV_PIX_FMT_RGBA;
                }
        }
 
@@ -661,8 +661,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context, RenderData *rd, int
 
        context->current_frame = alloc_picture(c->pix_fmt, c->width, c->height);
 
-       context->img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
-                                        NULL, NULL, NULL);
+       context->img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
+                                                 NULL, NULL, NULL);
        return st;
 }
 
index e2da2e464783e3d7c13d5ba899cd77088e782dda..a32cf401f70b68264fb32725dab6f34e6f735b6b 100644 (file)
@@ -556,12 +556,12 @@ static int startffmpeg(struct anim *anim)
        anim->next_pts = -1;
        anim->next_packet.stream_index = -1;
 
-       anim->pFrame = avcodec_alloc_frame();
+       anim->pFrame = av_frame_alloc();
        anim->pFrameComplete = false;
-       anim->pFrameDeinterlaced = avcodec_alloc_frame();
-       anim->pFrameRGB = avcodec_alloc_frame();
+       anim->pFrameDeinterlaced = av_frame_alloc();
+       anim->pFrameRGB = av_frame_alloc();
 
-       if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) !=
+       if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) !=
            anim->x * anim->y * 4)
        {
                fprintf(stderr,
@@ -600,7 +600,7 @@ static int startffmpeg(struct anim *anim)
                anim->pCodecCtx->pix_fmt,
                anim->x,
                anim->y,
-               PIX_FMT_RGBA,
+               AV_PIX_FMT_RGBA,
                SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
                NULL, NULL, NULL);
                
@@ -689,7 +689,7 @@ static void ffmpeg_postprocess(struct anim *anim)
        
        avpicture_fill((AVPicture *) anim->pFrameRGB,
                       (unsigned char *) ibuf->rect,
-                      PIX_FMT_RGBA, anim->x, anim->y);
+                      AV_PIX_FMT_RGBA, anim->x, anim->y);
 
        if (ENDIAN_ORDER == B_ENDIAN) {
                int *dstStride   = anim->pFrameRGB->linesize;
index ac57b095800f7089abfb9e7612259d537c553f6d..a6012b1e88da72725c71ae5393e99a79b3c240cc 100644 (file)
@@ -519,7 +519,7 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
                rv->c->pix_fmt = rv->codec->pix_fmts[0];
        }
        else {
-               rv->c->pix_fmt = PIX_FMT_YUVJ420P;
+               rv->c->pix_fmt = AV_PIX_FMT_YUVJ420P;
        }
 
        rv->c->sample_aspect_ratio =
@@ -554,7 +554,7 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
        if (st->codec->width != width || st->codec->height != height ||
            st->codec->pix_fmt != rv->c->pix_fmt)
        {
-               rv->frame = avcodec_alloc_frame();
+               rv->frame = av_frame_alloc();
                avpicture_fill((AVPicture *) rv->frame,
                               MEM_mallocN(avpicture_get_size(
                                               rv->c->pix_fmt,
@@ -905,7 +905,7 @@ static int index_rebuild_ffmpeg(FFmpegIndexBuilderContext *context,
 
        memset(&next_packet, 0, sizeof(AVPacket));
 
-       in_frame = avcodec_alloc_frame();
+       in_frame = av_frame_alloc();
 
        stream_size = avio_size(context->iFormatCtx->pb);