Reduce amount of deprecated symbols used from FFmpeg
[blender.git] / source / gameengine / VideoTexture / VideoFFmpeg.cpp
1 /*
2 -----------------------------------------------------------------------------
3 This source file is part of VideoTexture library
4
5 Copyright (c) 2007 The Zdeno Ash Miklas
6
7 This program is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the Free Software
9 Foundation; either version 2 of the License, or (at your option) any later
10 version.
11
12 This program is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 Place - Suite 330, Boston, MA 02111-1307, USA, or go to
19 http://www.gnu.org/copyleft/lesser.txt.
20 -----------------------------------------------------------------------------
21 */
22
23 /** \file gameengine/VideoTexture/VideoFFmpeg.cpp
24  *  \ingroup bgevideotex
25  */
26
27
28 #ifdef WITH_FFMPEG
29
30 // INT64_C fix for some linux machines (C99ism)
31 #ifndef __STDC_CONSTANT_MACROS
32 #define __STDC_CONSTANT_MACROS
33 #endif
34 #include <stdint.h>
35
36
37 #include "MEM_guardedalloc.h"
38 #include "PIL_time.h"
39
40 #include <string>
41
42 #include "VideoFFmpeg.h"
43 #include "Exception.h"
44
45
46 // default framerate
47 const double defFrameRate = 25.0;
48 // time scale constant
49 const long timeScale = 1000;
50
51 // macro for exception handling and logging
52 #define CATCH_EXCP catch (Exception & exp) \
53 { exp.report(); m_status = SourceError; }
54
55 extern "C" void do_init_ffmpeg();
56
57 // class RenderVideo
58
59 // constructor
60 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
61 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
62 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
63 m_deinterlace(false), m_preseek(0),     m_videoStream(-1), m_baseFrameRate(25.0),
64 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
65 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
66 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
67 {
68         // set video format
69         m_format = RGB24;
70         // force flip because ffmpeg always return the image in the wrong orientation for texture
71         setFlip(true);
72         // construction is OK
73         *hRslt = S_OK;
74         m_thread.first = m_thread.last = NULL;
75         pthread_mutex_init(&m_cacheMutex, NULL);
76         m_frameCacheFree.first = m_frameCacheFree.last = NULL;
77         m_frameCacheBase.first = m_frameCacheBase.last = NULL;
78         m_packetCacheFree.first = m_packetCacheFree.last = NULL;
79         m_packetCacheBase.first = m_packetCacheBase.last = NULL;
80 }
81
82 // destructor
83 VideoFFmpeg::~VideoFFmpeg () 
84 {
85 }
86
87
88 // release components
89 bool VideoFFmpeg::release()
90 {
91         // release
92         stopCache();
93         if (m_codecCtx)
94         {
95                 avcodec_close(m_codecCtx);
96                 m_codecCtx = NULL;
97         }
98         if (m_formatCtx)
99         {
100                 av_close_input_file(m_formatCtx);
101                 m_formatCtx = NULL;
102         }
103         if (m_frame)
104         {
105                 av_free(m_frame);
106                 m_frame = NULL;
107         }
108         if (m_frameDeinterlaced)
109         {
110                 MEM_freeN(m_frameDeinterlaced->data[0]);
111                 av_free(m_frameDeinterlaced);
112                 m_frameDeinterlaced = NULL;
113         }
114         if (m_frameRGB)
115         {
116                 MEM_freeN(m_frameRGB->data[0]);
117                 av_free(m_frameRGB);
118                 m_frameRGB = NULL;
119         }
120         if (m_imgConvertCtx)
121         {
122                 sws_freeContext(m_imgConvertCtx);
123                 m_imgConvertCtx = NULL;
124         }
125         m_codec = NULL;
126         m_status = SourceStopped;
127         m_lastFrame = -1;
128         return true;
129 }
130
131 AVFrame *VideoFFmpeg::allocFrameRGB()
132 {
133         AVFrame *frame;
134         frame = avcodec_alloc_frame();
135         if (m_format == RGBA32)
136         {
137                 avpicture_fill((AVPicture*)frame, 
138                         (uint8_t*)MEM_callocN(avpicture_get_size(
139                                 PIX_FMT_RGBA,
140                                 m_codecCtx->width, m_codecCtx->height),
141                                 "ffmpeg rgba"),
142                         PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
143         } else 
144         {
145                 avpicture_fill((AVPicture*)frame, 
146                         (uint8_t*)MEM_callocN(avpicture_get_size(
147                                 PIX_FMT_RGB24,
148                                 m_codecCtx->width, m_codecCtx->height),
149                                 "ffmpeg rgb"),
150                         PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
151         }
152         return frame;
153 }
154
155 // set initial parameters
156 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
157 {
158         m_captWidth = width;
159         m_captHeight = height;
160         m_captRate = rate;
161         m_isImage = image;
162 }
163
164
165 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
166 {
167         AVFormatContext *formatCtx = NULL;
168         int                             i, videoStream;
169         AVCodec                 *codec;
170         AVCodecContext  *codecCtx;
171
172         if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
173                 return -1;
174
175         if (av_find_stream_info(formatCtx)<0) 
176         {
177                 av_close_input_file(formatCtx);
178                 return -1;
179         }
180
181         /* Find the first video stream */
182         videoStream=-1;
183         for (i=0; i<formatCtx->nb_streams; i++)
184         {
185                 if (formatCtx->streams[i] &&
186                         get_codec_from_stream(formatCtx->streams[i]) && 
187                         (get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
188                 {
189                         videoStream=i;
190                         break;
191                 }
192         }
193
194         if (videoStream==-1) 
195         {
196                 av_close_input_file(formatCtx);
197                 return -1;
198         }
199
200         codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
201
202         /* Find the decoder for the video stream */
203         codec=avcodec_find_decoder(codecCtx->codec_id);
204         if (codec==NULL) 
205         {
206                 av_close_input_file(formatCtx);
207                 return -1;
208         }
209         codecCtx->workaround_bugs = 1;
210         if (avcodec_open(codecCtx, codec)<0) 
211         {
212                 av_close_input_file(formatCtx);
213                 return -1;
214         }
215
216 #ifdef FFMPEG_OLD_FRAME_RATE
217         if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
218                 codecCtx->frame_rate_base=1000;
219         m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
220 #else
221         m_baseFrameRate = av_q2d(formatCtx->streams[videoStream]->r_frame_rate);
222 #endif
223         if (m_baseFrameRate <= 0.0) 
224                 m_baseFrameRate = defFrameRate;
225
226         m_codec = codec;
227         m_codecCtx = codecCtx;
228         m_formatCtx = formatCtx;
229         m_videoStream = videoStream;
230         m_frame = avcodec_alloc_frame();
231         m_frameDeinterlaced = avcodec_alloc_frame();
232
233         // allocate buffer if deinterlacing is required
234         avpicture_fill((AVPicture*)m_frameDeinterlaced, 
235                 (uint8_t*)MEM_callocN(avpicture_get_size(
236                 m_codecCtx->pix_fmt,
237                 m_codecCtx->width, m_codecCtx->height), 
238                 "ffmpeg deinterlace"), 
239                 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
240
241         // check if the pixel format supports Alpha
242         if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
243                 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
244                 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
245                 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
246         {
247                 // allocate buffer to store final decoded frame
248                 m_format = RGBA32;
249                 // allocate sws context
250                 m_imgConvertCtx = sws_getContext(
251                         m_codecCtx->width,
252                         m_codecCtx->height,
253                         m_codecCtx->pix_fmt,
254                         m_codecCtx->width,
255                         m_codecCtx->height,
256                         PIX_FMT_RGBA,
257                         SWS_FAST_BILINEAR,
258                         NULL, NULL, NULL);
259         } else
260         {
261                 // allocate buffer to store final decoded frame
262                 m_format = RGB24;
263                 // allocate sws context
264                 m_imgConvertCtx = sws_getContext(
265                         m_codecCtx->width,
266                         m_codecCtx->height,
267                         m_codecCtx->pix_fmt,
268                         m_codecCtx->width,
269                         m_codecCtx->height,
270                         PIX_FMT_RGB24,
271                         SWS_FAST_BILINEAR,
272                         NULL, NULL, NULL);
273         }
274         m_frameRGB = allocFrameRGB();
275
276         if (!m_imgConvertCtx) {
277                 avcodec_close(m_codecCtx);
278                 m_codecCtx = NULL;
279                 av_close_input_file(m_formatCtx);
280                 m_formatCtx = NULL;
281                 av_free(m_frame);
282                 m_frame = NULL;
283                 MEM_freeN(m_frameDeinterlaced->data[0]);
284                 av_free(m_frameDeinterlaced);
285                 m_frameDeinterlaced = NULL;
286                 MEM_freeN(m_frameRGB->data[0]);
287                 av_free(m_frameRGB);
288                 m_frameRGB = NULL;
289                 return -1;
290         }
291         return 0;
292 }
293
294 /*
295  * This thread is used to load video frame asynchronously.
296  * It provides a frame caching service. 
297  * The main thread is responsible for positioning the frame pointer in the
298  * file correctly before calling startCache() which starts this thread.
299  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
300  * memory and CPU low 2) a cache of 5 decoded frames. 
301  * If the main thread does not find the frame in the cache (because the video has restarted
302  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
303  * function: it sends a signal to stop the cache thread and wait for confirmation), then
304  * change the position in the stream and restarts the cache thread.
305  */
306 void *VideoFFmpeg::cacheThread(void *data)
307 {
308         VideoFFmpeg* video = (VideoFFmpeg*)data;
309         // holds the frame that is being decoded
310         CacheFrame *currentFrame = NULL;
311         CachePacket *cachePacket;
312         bool endOfFile = false;
313         int frameFinished = 0;
314         double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
315         int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
316
317         if (startTs == AV_NOPTS_VALUE)
318                 startTs = 0;
319
320         while (!video->m_stopThread)
321         {
322                 // packet cache is used solely by this thread, no need to lock
323                 // In case the stream/file contains other stream than the one we are looking for,
324                 // allow a bit of cycling to get rid quickly of those frames
325                 frameFinished = 0;
326                 while (    !endOfFile 
327                                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
328                                 && frameFinished < 25)
329                 {
330                         // free packet => packet cache is not full yet, just read more
331                         if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
332                         {
333                                 if (cachePacket->packet.stream_index == video->m_videoStream)
334                                 {
335                                         // make sure fresh memory is allocated for the packet and move it to queue
336                                         av_dup_packet(&cachePacket->packet);
337                                         BLI_remlink(&video->m_packetCacheFree, cachePacket);
338                                         BLI_addtail(&video->m_packetCacheBase, cachePacket);
339                                         break;
340                                 } else {
341                                         // this is not a good packet for us, just leave it on free queue
342                                         // Note: here we could handle sound packet
343                                         av_free_packet(&cachePacket->packet);
344                                         frameFinished++;
345                                 }
346                                 
347                         } else {
348                                 if (video->m_isFile)
349                                         // this mark the end of the file
350                                         endOfFile = true;
351                                 // if we cannot read a packet, no need to continue
352                                 break;
353                         }
354                 }
355                 // frame cache is also used by main thread, lock
356                 if (currentFrame == NULL) 
357                 {
358                         // no current frame being decoded, take free one
359                         pthread_mutex_lock(&video->m_cacheMutex);
360                         if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
361                                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
362                         pthread_mutex_unlock(&video->m_cacheMutex);
363                 }
364                 if (currentFrame != NULL)
365                 {
366                         // this frame is out of free and busy queue, we can manipulate it without locking
367                         frameFinished = 0;
368                         while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
369                         {
370                                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
371                                 // use m_frame because when caching, it is not used in main thread
372                                 // we can't use currentFrame directly because we need to convert to RGB first
373                                 avcodec_decode_video2(video->m_codecCtx, 
374                                         video->m_frame, &frameFinished, 
375                                         &cachePacket->packet);
376                                 if (frameFinished) 
377                                 {
378                                         AVFrame * input = video->m_frame;
379
380                                         /* This means the data wasnt read properly, this check stops crashing */
381                                         if (   input->data[0]!=0 || input->data[1]!=0 
382                                                 || input->data[2]!=0 || input->data[3]!=0)
383                                         {
384                                                 if (video->m_deinterlace) 
385                                                 {
386                                                         if (avpicture_deinterlace(
387                                                                 (AVPicture*) video->m_frameDeinterlaced,
388                                                                 (const AVPicture*) video->m_frame,
389                                                                 video->m_codecCtx->pix_fmt,
390                                                                 video->m_codecCtx->width,
391                                                                 video->m_codecCtx->height) >= 0)
392                                                         {
393                                                                 input = video->m_frameDeinterlaced;
394                                                         }
395                                                 }
396                                                 // convert to RGB24
397                                                 sws_scale(video->m_imgConvertCtx,
398                                                         input->data,
399                                                         input->linesize,
400                                                         0,
401                                                         video->m_codecCtx->height,
402                                                         currentFrame->frame->data,
403                                                         currentFrame->frame->linesize);
404                                                 // move frame to queue, this frame is necessarily the next one
405                                                 video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
406                                                 currentFrame->framePosition = video->m_curPosition;
407                                                 pthread_mutex_lock(&video->m_cacheMutex);
408                                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
409                                                 pthread_mutex_unlock(&video->m_cacheMutex);
410                                                 currentFrame = NULL;
411                                         }
412                                 }
413                                 av_free_packet(&cachePacket->packet);
414                                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
415                         } 
416                         if (currentFrame && endOfFile) 
417                         {
418                                 // no more packet and end of file => put a special frame that indicates that
419                                 currentFrame->framePosition = -1;
420                                 pthread_mutex_lock(&video->m_cacheMutex);
421                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
422                                 pthread_mutex_unlock(&video->m_cacheMutex);
423                                 currentFrame = NULL;
424                                 // no need to stay any longer in this thread
425                                 break;
426                         }
427                 }
428                 // small sleep to avoid unnecessary looping
429                 PIL_sleep_ms(10);
430         }
431         // before quitting, put back the current frame to queue to allow freeing
432         if (currentFrame)
433         {
434                 pthread_mutex_lock(&video->m_cacheMutex);
435                 BLI_addtail(&video->m_frameCacheFree, currentFrame);
436                 pthread_mutex_unlock(&video->m_cacheMutex);
437         }
438         return 0;
439 }
440
441 // start thread to cache video frame from file/capture/stream
442 // this function should be called only when the position in the stream is set for the
443 // first frame to cache
444 bool VideoFFmpeg::startCache()
445 {
446         if (!m_cacheStarted && m_isThreaded)
447         {
448                 m_stopThread = false;
449                 for (int i=0; i<CACHE_FRAME_SIZE; i++)
450                 {
451                         CacheFrame *frame = new CacheFrame();
452                         frame->frame = allocFrameRGB();
453                         BLI_addtail(&m_frameCacheFree, frame);
454                 }
455                 for (int i=0; i<CACHE_PACKET_SIZE; i++) 
456                 {
457                         CachePacket *packet = new CachePacket();
458                         BLI_addtail(&m_packetCacheFree, packet);
459                 }
460                 BLI_init_threads(&m_thread, cacheThread, 1);
461                 BLI_insert_thread(&m_thread, this);
462                 m_cacheStarted = true;
463         }
464         return m_cacheStarted;
465 }
466
467 void VideoFFmpeg::stopCache()
468 {
469         if (m_cacheStarted)
470         {
471                 m_stopThread = true;
472                 BLI_end_threads(&m_thread);
473                 // now delete the cache
474                 CacheFrame *frame;
475                 CachePacket *packet;
476                 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
477                 {
478                         BLI_remlink(&m_frameCacheBase, frame);
479                         MEM_freeN(frame->frame->data[0]);
480                         av_free(frame->frame);
481                         delete frame;
482                 }
483                 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
484                 {
485                         BLI_remlink(&m_frameCacheFree, frame);
486                         MEM_freeN(frame->frame->data[0]);
487                         av_free(frame->frame);
488                         delete frame;
489                 }
490                 while((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
491                 {
492                         BLI_remlink(&m_packetCacheBase, packet);
493                         av_free_packet(&packet->packet);
494                         delete packet;
495                 }
496                 while((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
497                 {
498                         BLI_remlink(&m_packetCacheFree, packet);
499                         delete packet;
500                 }
501                 m_cacheStarted = false;
502         }
503 }
504
505 void VideoFFmpeg::releaseFrame(AVFrame* frame)
506 {
507         if (frame == m_frameRGB)
508         {
509                 // this is not a frame from the cache, ignore
510                 return;
511         }
512         // this frame MUST be the first one of the queue
513         pthread_mutex_lock(&m_cacheMutex);
514         CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
515         assert (cacheFrame != NULL && cacheFrame->frame == frame);
516         BLI_remlink(&m_frameCacheBase, cacheFrame);
517         BLI_addtail(&m_frameCacheFree, cacheFrame);
518         pthread_mutex_unlock(&m_cacheMutex);
519 }
520
521 // open video file
522 void VideoFFmpeg::openFile (char * filename)
523 {
524         do_init_ffmpeg();
525
526         if (openStream(filename, NULL, NULL) != 0)
527                 return;
528
529         if (m_codecCtx->gop_size)
530                 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
531         else if (m_codecCtx->has_b_frames)              
532                 m_preseek = 25; // should determine gopsize
533         else
534                 m_preseek = 0;
535
536         // get video time range
537         m_range[0] = 0.0;
538         m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
539
540         // open base class
541         VideoBase::openFile(filename);
542
543         if (
544                 // ffmpeg reports that http source are actually non stream
545                 // but it is really not desirable to seek on http file, so force streaming.
546                 // It would be good to find this information from the context but there are no simple indication
547                 !strncmp(filename, "http://", 7) ||
548                 (m_formatCtx->pb && !m_formatCtx->pb->seekable)
549                 )
550         {
551                 // the file is in fact a streaming source, treat as cam to prevent seeking
552                 m_isFile = false;
553                 // but it's not handled exactly like a camera.
554                 m_isStreaming = true;
555                 // for streaming it is important to do non blocking read
556                 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
557         }
558
559         if (m_isImage) 
560         {
561                 // the file is to be treated as an image, i.e. load the first frame only
562                 m_isFile = false;
563                 // in case of reload, the filename is taken from m_imageName, no need to change it
564                 if (m_imageName.Ptr() != filename)
565                         m_imageName = filename;
566                 m_preseek = 0;
567                 m_avail = false;
568                 play();
569         }
570         // check if we should do multi-threading?
571         if (!m_isImage && BLI_system_thread_count() > 1)
572         {
573                 // never thread image: there are no frame to read ahead
574                 // no need to thread if the system has a single core
575                 m_isThreaded =  true;
576         }
577 }
578
579
580 // open video capture device
581 void VideoFFmpeg::openCam (char * file, short camIdx)
582 {
583         // open camera source
584         AVInputFormat           *inputFormat;
585         AVDictionary            *formatParams = NULL;
586         char                            filename[28], rateStr[20];
587         char                *p;
588
589         do_init_ffmpeg();
590
591 #ifdef WIN32
592         // video capture on windows only through Video For Windows driver
593         inputFormat = av_find_input_format("vfwcap");
594         if (!inputFormat)
595                 // Video For Windows not supported??
596                 return;
597         sprintf(filename, "%d", camIdx);
598 #else
599         // In Linux we support two types of devices: VideoForLinux and DV1394. 
600         // the user specify it with the filename:
601         // [<device_type>][:<standard>]
602         // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
603         // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
604         // The driver name is constructed automatically from the device type:
605         // v4l   : /dev/video<camIdx>
606         // dv1394: /dev/dv1394/<camIdx>
607         // If you have different driver name, you can specify the driver name explicitly
608         // instead of device type. Examples of valid filename:
609         //    /dev/v4l/video0:pal
610         //    /dev/ieee1394/1:ntsc
611         //    dv1394:secam
612         //    v4l:pal
613         if (file && strstr(file, "1394") != NULL) 
614         {
615                 // the user specifies a driver, check if it is v4l or d41394
616                 inputFormat = av_find_input_format("dv1394");
617                 sprintf(filename, "/dev/dv1394/%d", camIdx);
618         } else 
619         {
620                 const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"};
621                 int i, formatsCount = sizeof(formats) / sizeof(char*);
622                 for (i = 0; i < formatsCount; i++) {
623                         inputFormat = av_find_input_format(formats[i]);
624                         if (inputFormat)
625                                 break;
626                 }
627                 sprintf(filename, "/dev/video%d", camIdx);
628         }
629         if (!inputFormat)
630                 // these format should be supported, check ffmpeg compilation
631                 return;
632         if (file && strncmp(file, "/dev", 4) == 0) 
633         {
634                 // user does not specify a driver
635                 strncpy(filename, file, sizeof(filename));
636                 filename[sizeof(filename)-1] = 0;
637                 if ((p = strchr(filename, ':')) != 0)
638                         *p = 0;
639         }
640         if (file && (p = strchr(file, ':')) != NULL) {
641                 av_dict_set(&formatParams, "standard", p+1, 0);
642         }
643 #endif
644         //frame rate
645         if (m_captRate <= 0.f)
646                 m_captRate = defFrameRate;
647         sprintf(rateStr, "%f", m_captRate);
648
649         av_dict_set(&formatParams, "framerate", rateStr, 0);
650
651         if (m_captWidth > 0 && m_captHeight > 0) {
652                 char video_size[64];
653                 BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight);
654                 av_dict_set(&formatParams, "video_size", video_size, 0);
655         }
656
657         if (openStream(filename, inputFormat, &formatParams) != 0)
658                 return;
659
660         // for video capture it is important to do non blocking read
661         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
662         // open base class
663         VideoBase::openCam(file, camIdx);
664         // check if we should do multi-threading?
665         if (BLI_system_thread_count() > 1)
666         {
667                 // no need to thread if the system has a single core
668                 m_isThreaded =  true;
669         }
670
671         av_dict_free(&formatParams);
672 }
673
674 // play video
675 bool VideoFFmpeg::play (void)
676 {
677         try
678         {
679                 // if object is able to play
680                 if (VideoBase::play())
681                 {
682                         // set video position
683                         setPositions();
684                         // return success
685                         return true;
686                 }
687         }
688         CATCH_EXCP;
689         return false;
690 }
691
692
693 // pause video
694 bool VideoFFmpeg::pause (void)
695 {
696         try
697         {
698                 if (VideoBase::pause())
699                 {
700                         return true;
701                 }
702         }
703         CATCH_EXCP;
704         return false;
705 }
706
707 // stop video
708 bool VideoFFmpeg::stop (void)
709 {
710         try
711         {
712                 VideoBase::stop();
713                 // force restart when play
714                 m_lastFrame = -1;
715                 return true;
716         }
717         CATCH_EXCP;
718         return false;
719 }
720
721
722 // set video range
723 void VideoFFmpeg::setRange (double start, double stop)
724 {
725         try
726         {
727                 // set range
728                 if (m_isFile)
729                 {
730                         VideoBase::setRange(start, stop);
731                         // set range for video
732                         setPositions();
733                 }
734         }
735         CATCH_EXCP;
736 }
737
738 // set framerate
739 void VideoFFmpeg::setFrameRate (float rate)
740 {
741         VideoBase::setFrameRate(rate);
742 }
743
744
745 // image calculation
746 // load frame from video
747 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
748 {
749         if (m_status == SourcePlaying)
750         {
751                 // get actual time
752                 double startTime = PIL_check_seconds_timer();
753                 double actTime;
754                 // timestamp passed from audio actuators can sometimes be slightly negative
755                 if (m_isFile && ts >= -0.5)
756                 {
757                         // allow setting timestamp only when not streaming
758                         actTime = ts;
759                         if (actTime * actFrameRate() < m_lastFrame) 
760                         {
761                                 // user is asking to rewind, force a cache clear to make sure we will do a seek
762                                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
763                                 stopCache();
764                         }
765                 }
766                 else
767                 {
768                         if (m_lastFrame == -1 && !m_isFile)
769                                 m_startTime = startTime;
770                         actTime = startTime - m_startTime;
771                 }
772                 // if video has ended
773                 if (m_isFile && actTime * m_frameRate >= m_range[1])
774                 {
775                         // in any case, this resets the cache
776                         stopCache();
777                         // if repeats are set, decrease them
778                         if (m_repeat > 0) 
779                                 --m_repeat;
780                         // if video has to be replayed
781                         if (m_repeat != 0)
782                         {
783                                 // reset its position
784                                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
785                                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
786                         }
787                         // if video has to be stopped, stop it
788                         else 
789                         {
790                                 m_status = SourceStopped;
791                                 return;
792                         }
793                 }
794                 // actual frame
795                 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
796                 // if actual frame differs from last frame
797                 if (actFrame != m_lastFrame)
798                 {
799                         AVFrame* frame;
800                         // get image
801                         if ((frame = grabFrame(actFrame)) != NULL)
802                         {
803                                 if (!m_isFile && !m_cacheStarted) 
804                                 {
805                                         // streaming without cache: detect synchronization problem
806                                         double execTime = PIL_check_seconds_timer() - startTime;
807                                         if (execTime > 0.005) 
808                                         {
809                                                 // exec time is too long, it means that the function was blocking
810                                                 // resynchronize the stream from this time
811                                                 m_startTime += execTime;
812                                         }
813                                 }
814                                 // save actual frame
815                                 m_lastFrame = actFrame;
816                                 // init image, if needed
817                                 init(short(m_codecCtx->width), short(m_codecCtx->height));
818                                 // process image
819                                 process((BYTE*)(frame->data[0]));
820                                 // finished with the frame, release it so that cache can reuse it
821                                 releaseFrame(frame);
822                                 // in case it is an image, automatically stop reading it
823                                 if (m_isImage)
824                                 {
825                                         m_status = SourceStopped;
826                                         // close the file as we don't need it anymore
827                                         release();
828                                 }
829                         } else if (m_isStreaming)
830                         {
831                                 // we didn't get a frame and we are streaming, this may be due to
832                                 // a delay in the network or because we are getting the frame too fast.
833                                 // In the later case, shift time by a small amount to compensate for a drift
834                                 m_startTime += 0.001;
835                         }
836                 }
837         }
838 }
839
840
841 // set actual position
842 void VideoFFmpeg::setPositions (void)
843 {
844         // set video start time
845         m_startTime = PIL_check_seconds_timer();
846         // if file is played and actual position is before end position
847         if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
848                 // continue from actual position
849                 m_startTime -= double(m_lastFrame) / actFrameRate();
850         else {
851                 m_startTime -= m_range[0];
852                 // start from beginning, stop cache just in case
853                 stopCache();
854         }
855 }
856
857 // position pointer in file, position in second
858 AVFrame *VideoFFmpeg::grabFrame(long position)
859 {
860         AVPacket packet;
861         int frameFinished;
862         int posFound = 1;
863         bool frameLoaded = false;
864         int64_t targetTs = 0;
865         CacheFrame *frame;
866         int64_t dts = 0;
867
868         if (m_cacheStarted)
869         {
870                 // when cache is active, we must not read the file directly
871                 do {
872                         pthread_mutex_lock(&m_cacheMutex);
873                         frame = (CacheFrame *)m_frameCacheBase.first;
874                         pthread_mutex_unlock(&m_cacheMutex);
875                         // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
876                         if (frame == NULL)
877                         {
878                                 // no frame in cache, in case of file it is an abnormal situation
879                                 if (m_isFile)
880                                 {
881                                         // go back to no threaded reading
882                                         stopCache();
883                                         break;
884                                 }
885                                 return NULL;
886                         }
887                         if (frame->framePosition == -1) 
888                         {
889                                 // this frame mark the end of the file (only used for file)
890                                 // leave in cache to make sure we don't miss it
891                                 m_eof = true;
892                                 return NULL;
893                         }
894                         // for streaming, always return the next frame, 
895                         // that's what grabFrame does in non cache mode anyway.
896                         if (m_isStreaming || frame->framePosition == position)
897                         {
898                                 return frame->frame;
899                         }
900                         // for cam, skip old frames to keep image realtime.
901                         // There should be no risk of clock drift since it all happens on the same CPU
902                         if (frame->framePosition > position) 
903                         {
904                                 // this can happen after rewind if the seek didn't find the first frame
905                                 // the frame in the buffer is ahead of time, just leave it there
906                                 return NULL;
907                         }
908                         // this frame is not useful, release it
909                         pthread_mutex_lock(&m_cacheMutex);
910                         BLI_remlink(&m_frameCacheBase, frame);
911                         BLI_addtail(&m_frameCacheFree, frame);
912                         pthread_mutex_unlock(&m_cacheMutex);
913                 } while (true);
914         }
915         double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
916         int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
917         if (startTs == AV_NOPTS_VALUE)
918                 startTs = 0;
919
920         // come here when there is no cache or cache has been stopped
921         // locate the frame, by seeking if necessary (seeking is only possible for files)
922         if (m_isFile)
923         {
924                 // first check if the position that we are looking for is in the preseek range
925                 // if so, just read the frame until we get there
926                 if (position > m_curPosition + 1 
927                         && m_preseek 
928                         && position - (m_curPosition + 1) < m_preseek) 
929                 {
930                         while(av_read_frame(m_formatCtx, &packet)>=0) 
931                         {
932                                 if (packet.stream_index == m_videoStream) 
933                                 {
934                                         avcodec_decode_video2(
935                                                 m_codecCtx, 
936                                                 m_frame, &frameFinished, 
937                                                 &packet);
938                                         if (frameFinished)
939                                         {
940                                                 m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
941                                         }
942                                 }
943                                 av_free_packet(&packet);
944                                 if (position == m_curPosition+1)
945                                         break;
946                         }
947                 }
948                 // if the position is not in preseek, do a direct jump
949                 if (position != m_curPosition + 1) 
950                 { 
951                         int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
952
953                         if (pos < 0)
954                                 pos = 0;
955
956                         pos += startTs;
957
958                         if (position <= m_curPosition || !m_eof)
959                         {
960 #if 0
961                                 // Tried to make this work but couldn't: seeking on byte is ignored by the
962                                 // format plugin and it will generally continue to read from last timestamp.
963                                 // Too bad because frame seek is not always able to get the first frame
964                                 // of the file.
965                                 if (position <= m_preseek)
966                                 {
967                                         // we can safely go the beginning of the file
968                                         if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
969                                         {
970                                                 // binary seek does not reset the timestamp, must do it now
971                                                 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
972                                                 m_curPosition = 0;
973                                         }
974                                 }
975                                 else
976 #endif
977                                 {
978                                         // current position is now lost, guess a value. 
979                                         if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
980                                         {
981                                                 // current position is now lost, guess a value. 
982                                                 // It's not important because it will be set at this end of this function
983                                                 m_curPosition = position - m_preseek - 1;
984                                         }
985                                 }
986                         }
987                         // this is the timestamp of the frame we're looking for
988                         targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
989
990                         posFound = 0;
991                         avcodec_flush_buffers(m_codecCtx);
992                 }
993         } else if (m_isThreaded)
994         {
995                 // cache is not started but threading is possible
996                 // better not read the stream => make take some time, better start caching
997                 if (startCache())
998                         return NULL;
999                 // Abnormal!!! could not start cache, fall back on direct read
1000                 m_isThreaded = false;
1001         }
1002
1003         // find the correct frame, in case of streaming and no cache, it means just
1004         // return the next frame. This is not quite correct, may need more work
1005         while(av_read_frame(m_formatCtx, &packet)>=0) 
1006         {
1007                 if (packet.stream_index == m_videoStream) 
1008                 {
1009                         avcodec_decode_video2(m_codecCtx, 
1010                                 m_frame, &frameFinished, 
1011                                 &packet);
1012                         // remember dts to compute exact frame number
1013                         dts = packet.dts;
1014                         if (frameFinished && !posFound) 
1015                         {
1016                                 if (dts >= targetTs)
1017                                 {
1018                                         posFound = 1;
1019                                 }
1020                         } 
1021
1022                         if (frameFinished && posFound == 1) 
1023                         {
1024                                 AVFrame * input = m_frame;
1025
1026                                 /* This means the data wasnt read properly, 
1027                                 this check stops crashing */
1028                                 if (   input->data[0]==0 && input->data[1]==0 
1029                                         && input->data[2]==0 && input->data[3]==0)
1030                                 {
1031                                         av_free_packet(&packet);
1032                                         break;
1033                                 }
1034
1035                                 if (m_deinterlace) 
1036                                 {
1037                                         if (avpicture_deinterlace(
1038                                                 (AVPicture*) m_frameDeinterlaced,
1039                                                 (const AVPicture*) m_frame,
1040                                                 m_codecCtx->pix_fmt,
1041                                                 m_codecCtx->width,
1042                                                 m_codecCtx->height) >= 0)
1043                                         {
1044                                                 input = m_frameDeinterlaced;
1045                                         }
1046                                 }
1047                                 // convert to RGB24
1048                                 sws_scale(m_imgConvertCtx,
1049                                         input->data,
1050                                         input->linesize,
1051                                         0,
1052                                         m_codecCtx->height,
1053                                         m_frameRGB->data,
1054                                         m_frameRGB->linesize);
1055                                 av_free_packet(&packet);
1056                                 frameLoaded = true;
1057                                 break;
1058                         }
1059                 }
1060                 av_free_packet(&packet);
1061         }
1062         m_eof = m_isFile && !frameLoaded;
1063         if (frameLoaded)
1064         {
1065                 m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
1066                 if (m_isThreaded)
1067                 {
1068                         // normal case for file: first locate, then start cache
1069                         if (!startCache())
1070                         {
1071                                 // Abnormal!! could not start cache, return to non-cache mode
1072                                 m_isThreaded = false;
1073                         }
1074                 }
1075                 return m_frameRGB;
1076         }
1077         return NULL;
1078 }
1079
1080
1081 // python methods
1082
1083
1084 // cast Image pointer to VideoFFmpeg
1085 inline VideoFFmpeg * getVideoFFmpeg (PyImage * self)
1086 { return static_cast<VideoFFmpeg*>(self->m_image); }
1087
1088
1089 // object initialization
1090 static int VideoFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1091 {
1092         PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1093         // parameters - video source
1094         // file name or format type for capture (only for Linux: video4linux or dv1394)
1095         char * file = NULL;
1096         // capture device number
1097         short capt = -1;
1098         // capture width, only if capt is >= 0
1099         short width = 0;
1100         // capture height, only if capt is >= 0
1101         short height = 0;
1102         // capture rate, only if capt is >= 0
1103         float rate = 25.f;
1104
1105         static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1106
1107         // get parameters
1108         if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
1109                 const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
1110                 return -1; 
1111
1112         try
1113         {
1114                 // create video object
1115                 Video_init<VideoFFmpeg>(self);
1116
1117                 // set thread usage
1118                 getVideoFFmpeg(self)->initParams(width, height, rate);
1119
1120                 // open video source
1121                 Video_open(getVideo(self), file, capt);
1122         }
1123         catch (Exception & exp)
1124         {
1125                 exp.report();
1126                 return -1;
1127         }
1128         // initialization succeded
1129         return 0;
1130 }
1131
1132 PyObject * VideoFFmpeg_getPreseek (PyImage *self, void * closure)
1133 {
1134         return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1135 }
1136
1137 // set range
1138 int VideoFFmpeg_setPreseek (PyImage * self, PyObject * value, void * closure)
1139 {
1140         // check validity of parameter
1141         if (value == NULL || !PyLong_Check(value))
1142         {
1143                 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1144                 return -1;
1145         }
1146         // set preseek
1147         getFFmpeg(self)->setPreseek(PyLong_AsSsize_t(value));
1148         // success
1149         return 0;
1150 }
1151
1152 // get deinterlace
1153 PyObject * VideoFFmpeg_getDeinterlace (PyImage * self, void * closure)
1154 {
1155         if (getFFmpeg(self)->getDeinterlace())
1156                 Py_RETURN_TRUE;
1157         else
1158                 Py_RETURN_FALSE;
1159 }
1160
1161 // set flip
1162 int VideoFFmpeg_setDeinterlace (PyImage * self, PyObject * value, void * closure)
1163 {
1164         // check parameter, report failure
1165         if (value == NULL || !PyBool_Check(value))
1166         {
1167                 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1168                 return -1;
1169         }
1170         // set deinterlace
1171         getFFmpeg(self)->setDeinterlace(value == Py_True);
1172         // success
1173         return 0;
1174 }
1175
1176 // methods structure
1177 static PyMethodDef videoMethods[] =
1178 { // methods from VideoBase class
1179         {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1180         {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1181         {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1182         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1183         {NULL}
1184 };
1185 // attributes structure
1186 static PyGetSetDef videoGetSets[] =
1187 { // methods from VideoBase class
1188         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1189         {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1190         {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1191         {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1192         // attributes from ImageBase class
1193         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1194         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1195         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1196         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1197         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1198         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1199         {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1200         {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1201         {NULL}
1202 };
1203
1204 // python type declaration
1205 PyTypeObject VideoFFmpegType =
1206
1207         PyVarObject_HEAD_INIT(NULL, 0)
1208         "VideoTexture.VideoFFmpeg",   /*tp_name*/
1209         sizeof(PyImage),          /*tp_basicsize*/
1210         0,                         /*tp_itemsize*/
1211         (destructor)Image_dealloc, /*tp_dealloc*/
1212         0,                         /*tp_print*/
1213         0,                         /*tp_getattr*/
1214         0,                         /*tp_setattr*/
1215         0,                         /*tp_compare*/
1216         0,                         /*tp_repr*/
1217         0,                         /*tp_as_number*/
1218         0,                         /*tp_as_sequence*/
1219         0,                         /*tp_as_mapping*/
1220         0,                         /*tp_hash */
1221         0,                         /*tp_call*/
1222         0,                         /*tp_str*/
1223         0,                         /*tp_getattro*/
1224         0,                         /*tp_setattro*/
1225         &imageBufferProcs,         /*tp_as_buffer*/
1226         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1227         "FFmpeg video source",       /* tp_doc */
1228         0,                             /* tp_traverse */
1229         0,                             /* tp_clear */
1230         0,                             /* tp_richcompare */
1231         0,                             /* tp_weaklistoffset */
1232         0,                             /* tp_iter */
1233         0,                             /* tp_iternext */
1234         videoMethods,    /* tp_methods */
1235         0,                   /* tp_members */
1236         videoGetSets,          /* tp_getset */
1237         0,                         /* tp_base */
1238         0,                         /* tp_dict */
1239         0,                         /* tp_descr_get */
1240         0,                         /* tp_descr_set */
1241         0,                         /* tp_dictoffset */
1242         (initproc)VideoFFmpeg_init,     /* tp_init */
1243         0,                         /* tp_alloc */
1244         Image_allocNew,           /* tp_new */
1245 };
1246
1247 // object initialization
1248 static int ImageFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1249 {
1250         PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1251         // parameters - video source
1252         // file name or format type for capture (only for Linux: video4linux or dv1394)
1253         char * file = NULL;
1254
1255         // get parameters
1256         if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1257                 return -1; 
1258
1259         try
1260         {
1261                 // create video object
1262                 Video_init<VideoFFmpeg>(self);
1263
1264                 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1265
1266                 // open video source
1267                 Video_open(getVideo(self), file, -1);
1268         }
1269         catch (Exception & exp)
1270         {
1271                 exp.report();
1272                 return -1;
1273         }
1274         // initialization succeded
1275         return 0;
1276 }
1277
1278 PyObject * Image_reload (PyImage * self, PyObject *args)
1279 {
1280         char * newname = NULL;
1281         if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1282                 return NULL;
1283         if (self->m_image != NULL)
1284         {
1285                 VideoFFmpeg* video = getFFmpeg(self);
1286                 // check type of object
1287                 if (!newname)
1288                         newname = video->getImageName();
1289                 if (!newname) {
1290                         // if not set, retport error
1291                         PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1292                         return NULL;
1293                 }
1294                 // make sure the previous file is cleared
1295                 video->release();
1296                 // open the new file
1297                 video->openFile(newname);
1298         }
1299         Py_RETURN_NONE;
1300 }
1301
1302 // methods structure
1303 static PyMethodDef imageMethods[] =
1304 { // methods from VideoBase class
1305         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1306         {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1307         {NULL}
1308 };
1309 // attributes structure
1310 static PyGetSetDef imageGetSets[] =
1311 { // methods from VideoBase class
1312         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1313         // attributes from ImageBase class
1314         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1315         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1316         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1317         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1318         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1319         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1320         {NULL}
1321 };
1322
1323 // python type declaration
1324 PyTypeObject ImageFFmpegType =
1325
1326         PyVarObject_HEAD_INIT(NULL, 0)
1327         "VideoTexture.ImageFFmpeg",   /*tp_name*/
1328         sizeof(PyImage),          /*tp_basicsize*/
1329         0,                         /*tp_itemsize*/
1330         (destructor)Image_dealloc, /*tp_dealloc*/
1331         0,                         /*tp_print*/
1332         0,                         /*tp_getattr*/
1333         0,                         /*tp_setattr*/
1334         0,                         /*tp_compare*/
1335         0,                         /*tp_repr*/
1336         0,                         /*tp_as_number*/
1337         0,                         /*tp_as_sequence*/
1338         0,                         /*tp_as_mapping*/
1339         0,                         /*tp_hash */
1340         0,                         /*tp_call*/
1341         0,                         /*tp_str*/
1342         0,                         /*tp_getattro*/
1343         0,                         /*tp_setattro*/
1344         &imageBufferProcs,         /*tp_as_buffer*/
1345         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1346         "FFmpeg image source",       /* tp_doc */
1347         0,                             /* tp_traverse */
1348         0,                             /* tp_clear */
1349         0,                             /* tp_richcompare */
1350         0,                             /* tp_weaklistoffset */
1351         0,                             /* tp_iter */
1352         0,                             /* tp_iternext */
1353         imageMethods,    /* tp_methods */
1354         0,                   /* tp_members */
1355         imageGetSets,          /* tp_getset */
1356         0,                         /* tp_base */
1357         0,                         /* tp_dict */
1358         0,                         /* tp_descr_get */
1359         0,                         /* tp_descr_set */
1360         0,                         /* tp_dictoffset */
1361         (initproc)ImageFFmpeg_init,     /* tp_init */
1362         0,                         /* tp_alloc */
1363         Image_allocNew,           /* tp_new */
1364 };
1365
1366 #endif  //WITH_FFMPEG
1367
1368