93a1d09869bdddfd1e516554974ad733ba86e0c4
[blender.git] / source / gameengine / VideoTexture / VideoFFmpeg.cpp
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software  Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Copyright (c) 2007 The Zdeno Ash Miklas
19  *
20  * This source file is part of VideoTexture library
21  *
22  * Contributor(s):
23  *
24  * ***** END GPL LICENSE BLOCK *****
25  */
26
27 /** \file gameengine/VideoTexture/VideoFFmpeg.cpp
28  *  \ingroup bgevideotex
29  */
30
31
32 #ifdef WITH_FFMPEG
33
34 // INT64_C fix for some linux machines (C99ism)
35 #ifndef __STDC_CONSTANT_MACROS
36 #define __STDC_CONSTANT_MACROS
37 #endif
38 #include <stdint.h>
39
40
41 #include "MEM_guardedalloc.h"
42 #include "PIL_time.h"
43
44 #include <string>
45
46 #include "VideoFFmpeg.h"
47 #include "Exception.h"
48
49
50 // default framerate
51 const double defFrameRate = 25.0;
52 // time scale constant
53 const long timeScale = 1000;
54
55 // macro for exception handling and logging
56 #define CATCH_EXCP catch (Exception & exp) \
57 { exp.report(); m_status = SourceError; }
58
59 // class RenderVideo
60
61 // constructor
62 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
63 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
64 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
65 m_deinterlace(false), m_preseek(0),     m_videoStream(-1), m_baseFrameRate(25.0),
66 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
67 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
68 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
69 {
70         // set video format
71         m_format = RGB24;
72         // force flip because ffmpeg always return the image in the wrong orientation for texture
73         setFlip(true);
74         // construction is OK
75         *hRslt = S_OK;
76         m_thread.first = m_thread.last = NULL;
77         pthread_mutex_init(&m_cacheMutex, NULL);
78         m_frameCacheFree.first = m_frameCacheFree.last = NULL;
79         m_frameCacheBase.first = m_frameCacheBase.last = NULL;
80         m_packetCacheFree.first = m_packetCacheFree.last = NULL;
81         m_packetCacheBase.first = m_packetCacheBase.last = NULL;
82 }
83
84 // destructor
85 VideoFFmpeg::~VideoFFmpeg () 
86 {
87 }
88
89
90 // release components
91 bool VideoFFmpeg::release()
92 {
93         // release
94         stopCache();
95         if (m_codecCtx)
96         {
97                 avcodec_close(m_codecCtx);
98                 m_codecCtx = NULL;
99         }
100         if (m_formatCtx)
101         {
102                 av_close_input_file(m_formatCtx);
103                 m_formatCtx = NULL;
104         }
105         if (m_frame)
106         {
107                 av_free(m_frame);
108                 m_frame = NULL;
109         }
110         if (m_frameDeinterlaced)
111         {
112                 MEM_freeN(m_frameDeinterlaced->data[0]);
113                 av_free(m_frameDeinterlaced);
114                 m_frameDeinterlaced = NULL;
115         }
116         if (m_frameRGB)
117         {
118                 MEM_freeN(m_frameRGB->data[0]);
119                 av_free(m_frameRGB);
120                 m_frameRGB = NULL;
121         }
122         if (m_imgConvertCtx)
123         {
124                 sws_freeContext(m_imgConvertCtx);
125                 m_imgConvertCtx = NULL;
126         }
127         m_codec = NULL;
128         m_status = SourceStopped;
129         m_lastFrame = -1;
130         return true;
131 }
132
133 AVFrame *VideoFFmpeg::allocFrameRGB()
134 {
135         AVFrame *frame;
136         frame = avcodec_alloc_frame();
137         if (m_format == RGBA32)
138         {
139                 avpicture_fill((AVPicture*)frame, 
140                         (uint8_t*)MEM_callocN(avpicture_get_size(
141                                 PIX_FMT_RGBA,
142                                 m_codecCtx->width, m_codecCtx->height),
143                                 "ffmpeg rgba"),
144                         PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
145         } else 
146         {
147                 avpicture_fill((AVPicture*)frame, 
148                         (uint8_t*)MEM_callocN(avpicture_get_size(
149                                 PIX_FMT_RGB24,
150                                 m_codecCtx->width, m_codecCtx->height),
151                                 "ffmpeg rgb"),
152                         PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
153         }
154         return frame;
155 }
156
157 // set initial parameters
158 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
159 {
160         m_captWidth = width;
161         m_captHeight = height;
162         m_captRate = rate;
163         m_isImage = image;
164 }
165
166
167 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
168 {
169         AVFormatContext *formatCtx = NULL;
170         int                             i, videoStream;
171         AVCodec                 *codec;
172         AVCodecContext  *codecCtx;
173
174         if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
175                 return -1;
176
177         if (avformat_find_stream_info(formatCtx, NULL) < 0)
178         {
179                 av_close_input_file(formatCtx);
180                 return -1;
181         }
182
183         /* Find the first video stream */
184         videoStream=-1;
185         for (i=0; i<formatCtx->nb_streams; i++)
186         {
187                 if (formatCtx->streams[i] &&
188                         get_codec_from_stream(formatCtx->streams[i]) && 
189                         (get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
190                 {
191                         videoStream=i;
192                         break;
193                 }
194         }
195
196         if (videoStream==-1) 
197         {
198                 av_close_input_file(formatCtx);
199                 return -1;
200         }
201
202         codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
203
204         /* Find the decoder for the video stream */
205         codec=avcodec_find_decoder(codecCtx->codec_id);
206         if (codec==NULL) 
207         {
208                 av_close_input_file(formatCtx);
209                 return -1;
210         }
211         codecCtx->workaround_bugs = 1;
212         if (avcodec_open2(codecCtx, codec, NULL) < 0)
213         {
214                 av_close_input_file(formatCtx);
215                 return -1;
216         }
217
218 #ifdef FFMPEG_OLD_FRAME_RATE
219         if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
220                 codecCtx->frame_rate_base=1000;
221         m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
222 #else
223         m_baseFrameRate = av_q2d(formatCtx->streams[videoStream]->r_frame_rate);
224 #endif
225         if (m_baseFrameRate <= 0.0) 
226                 m_baseFrameRate = defFrameRate;
227
228         m_codec = codec;
229         m_codecCtx = codecCtx;
230         m_formatCtx = formatCtx;
231         m_videoStream = videoStream;
232         m_frame = avcodec_alloc_frame();
233         m_frameDeinterlaced = avcodec_alloc_frame();
234
235         // allocate buffer if deinterlacing is required
236         avpicture_fill((AVPicture*)m_frameDeinterlaced, 
237                 (uint8_t*)MEM_callocN(avpicture_get_size(
238                 m_codecCtx->pix_fmt,
239                 m_codecCtx->width, m_codecCtx->height), 
240                 "ffmpeg deinterlace"), 
241                 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
242
243         // check if the pixel format supports Alpha
244         if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
245                 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
246                 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
247                 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
248         {
249                 // allocate buffer to store final decoded frame
250                 m_format = RGBA32;
251                 // allocate sws context
252                 m_imgConvertCtx = sws_getContext(
253                         m_codecCtx->width,
254                         m_codecCtx->height,
255                         m_codecCtx->pix_fmt,
256                         m_codecCtx->width,
257                         m_codecCtx->height,
258                         PIX_FMT_RGBA,
259                         SWS_FAST_BILINEAR,
260                         NULL, NULL, NULL);
261         } else
262         {
263                 // allocate buffer to store final decoded frame
264                 m_format = RGB24;
265                 // allocate sws context
266                 m_imgConvertCtx = sws_getContext(
267                         m_codecCtx->width,
268                         m_codecCtx->height,
269                         m_codecCtx->pix_fmt,
270                         m_codecCtx->width,
271                         m_codecCtx->height,
272                         PIX_FMT_RGB24,
273                         SWS_FAST_BILINEAR,
274                         NULL, NULL, NULL);
275         }
276         m_frameRGB = allocFrameRGB();
277
278         if (!m_imgConvertCtx) {
279                 avcodec_close(m_codecCtx);
280                 m_codecCtx = NULL;
281                 av_close_input_file(m_formatCtx);
282                 m_formatCtx = NULL;
283                 av_free(m_frame);
284                 m_frame = NULL;
285                 MEM_freeN(m_frameDeinterlaced->data[0]);
286                 av_free(m_frameDeinterlaced);
287                 m_frameDeinterlaced = NULL;
288                 MEM_freeN(m_frameRGB->data[0]);
289                 av_free(m_frameRGB);
290                 m_frameRGB = NULL;
291                 return -1;
292         }
293         return 0;
294 }
295
296 /*
297  * This thread is used to load video frame asynchronously.
298  * It provides a frame caching service. 
299  * The main thread is responsible for positioning the frame pointer in the
300  * file correctly before calling startCache() which starts this thread.
301  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
302  * memory and CPU low 2) a cache of 5 decoded frames. 
303  * If the main thread does not find the frame in the cache (because the video has restarted
304  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
305  * function: it sends a signal to stop the cache thread and wait for confirmation), then
306  * change the position in the stream and restarts the cache thread.
307  */
308 void *VideoFFmpeg::cacheThread(void *data)
309 {
310         VideoFFmpeg* video = (VideoFFmpeg*)data;
311         // holds the frame that is being decoded
312         CacheFrame *currentFrame = NULL;
313         CachePacket *cachePacket;
314         bool endOfFile = false;
315         int frameFinished = 0;
316         double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
317         int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
318
319         if (startTs == AV_NOPTS_VALUE)
320                 startTs = 0;
321
322         while (!video->m_stopThread)
323         {
324                 // packet cache is used solely by this thread, no need to lock
325                 // In case the stream/file contains other stream than the one we are looking for,
326                 // allow a bit of cycling to get rid quickly of those frames
327                 frameFinished = 0;
328                 while (    !endOfFile 
329                                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
330                                 && frameFinished < 25)
331                 {
332                         // free packet => packet cache is not full yet, just read more
333                         if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
334                         {
335                                 if (cachePacket->packet.stream_index == video->m_videoStream)
336                                 {
337                                         // make sure fresh memory is allocated for the packet and move it to queue
338                                         av_dup_packet(&cachePacket->packet);
339                                         BLI_remlink(&video->m_packetCacheFree, cachePacket);
340                                         BLI_addtail(&video->m_packetCacheBase, cachePacket);
341                                         break;
342                                 } else {
343                                         // this is not a good packet for us, just leave it on free queue
344                                         // Note: here we could handle sound packet
345                                         av_free_packet(&cachePacket->packet);
346                                         frameFinished++;
347                                 }
348                                 
349                         } else {
350                                 if (video->m_isFile)
351                                         // this mark the end of the file
352                                         endOfFile = true;
353                                 // if we cannot read a packet, no need to continue
354                                 break;
355                         }
356                 }
357                 // frame cache is also used by main thread, lock
358                 if (currentFrame == NULL) 
359                 {
360                         // no current frame being decoded, take free one
361                         pthread_mutex_lock(&video->m_cacheMutex);
362                         if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
363                                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
364                         pthread_mutex_unlock(&video->m_cacheMutex);
365                 }
366                 if (currentFrame != NULL)
367                 {
368                         // this frame is out of free and busy queue, we can manipulate it without locking
369                         frameFinished = 0;
370                         while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
371                         {
372                                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
373                                 // use m_frame because when caching, it is not used in main thread
374                                 // we can't use currentFrame directly because we need to convert to RGB first
375                                 avcodec_decode_video2(video->m_codecCtx, 
376                                         video->m_frame, &frameFinished, 
377                                         &cachePacket->packet);
378                                 if (frameFinished) 
379                                 {
380                                         AVFrame * input = video->m_frame;
381
382                                         /* This means the data wasnt read properly, this check stops crashing */
383                                         if (   input->data[0]!=0 || input->data[1]!=0 
384                                                 || input->data[2]!=0 || input->data[3]!=0)
385                                         {
386                                                 if (video->m_deinterlace) 
387                                                 {
388                                                         if (avpicture_deinterlace(
389                                                                 (AVPicture*) video->m_frameDeinterlaced,
390                                                                 (const AVPicture*) video->m_frame,
391                                                                 video->m_codecCtx->pix_fmt,
392                                                                 video->m_codecCtx->width,
393                                                                 video->m_codecCtx->height) >= 0)
394                                                         {
395                                                                 input = video->m_frameDeinterlaced;
396                                                         }
397                                                 }
398                                                 // convert to RGB24
399                                                 sws_scale(video->m_imgConvertCtx,
400                                                         input->data,
401                                                         input->linesize,
402                                                         0,
403                                                         video->m_codecCtx->height,
404                                                         currentFrame->frame->data,
405                                                         currentFrame->frame->linesize);
406                                                 // move frame to queue, this frame is necessarily the next one
407                                                 video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
408                                                 currentFrame->framePosition = video->m_curPosition;
409                                                 pthread_mutex_lock(&video->m_cacheMutex);
410                                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
411                                                 pthread_mutex_unlock(&video->m_cacheMutex);
412                                                 currentFrame = NULL;
413                                         }
414                                 }
415                                 av_free_packet(&cachePacket->packet);
416                                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
417                         } 
418                         if (currentFrame && endOfFile) 
419                         {
420                                 // no more packet and end of file => put a special frame that indicates that
421                                 currentFrame->framePosition = -1;
422                                 pthread_mutex_lock(&video->m_cacheMutex);
423                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
424                                 pthread_mutex_unlock(&video->m_cacheMutex);
425                                 currentFrame = NULL;
426                                 // no need to stay any longer in this thread
427                                 break;
428                         }
429                 }
430                 // small sleep to avoid unnecessary looping
431                 PIL_sleep_ms(10);
432         }
433         // before quitting, put back the current frame to queue to allow freeing
434         if (currentFrame)
435         {
436                 pthread_mutex_lock(&video->m_cacheMutex);
437                 BLI_addtail(&video->m_frameCacheFree, currentFrame);
438                 pthread_mutex_unlock(&video->m_cacheMutex);
439         }
440         return 0;
441 }
442
443 // start thread to cache video frame from file/capture/stream
444 // this function should be called only when the position in the stream is set for the
445 // first frame to cache
446 bool VideoFFmpeg::startCache()
447 {
448         if (!m_cacheStarted && m_isThreaded)
449         {
450                 m_stopThread = false;
451                 for (int i=0; i<CACHE_FRAME_SIZE; i++)
452                 {
453                         CacheFrame *frame = new CacheFrame();
454                         frame->frame = allocFrameRGB();
455                         BLI_addtail(&m_frameCacheFree, frame);
456                 }
457                 for (int i=0; i<CACHE_PACKET_SIZE; i++) 
458                 {
459                         CachePacket *packet = new CachePacket();
460                         BLI_addtail(&m_packetCacheFree, packet);
461                 }
462                 BLI_init_threads(&m_thread, cacheThread, 1);
463                 BLI_insert_thread(&m_thread, this);
464                 m_cacheStarted = true;
465         }
466         return m_cacheStarted;
467 }
468
469 void VideoFFmpeg::stopCache()
470 {
471         if (m_cacheStarted)
472         {
473                 m_stopThread = true;
474                 BLI_end_threads(&m_thread);
475                 // now delete the cache
476                 CacheFrame *frame;
477                 CachePacket *packet;
478                 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
479                 {
480                         BLI_remlink(&m_frameCacheBase, frame);
481                         MEM_freeN(frame->frame->data[0]);
482                         av_free(frame->frame);
483                         delete frame;
484                 }
485                 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
486                 {
487                         BLI_remlink(&m_frameCacheFree, frame);
488                         MEM_freeN(frame->frame->data[0]);
489                         av_free(frame->frame);
490                         delete frame;
491                 }
492                 while ((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
493                 {
494                         BLI_remlink(&m_packetCacheBase, packet);
495                         av_free_packet(&packet->packet);
496                         delete packet;
497                 }
498                 while ((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
499                 {
500                         BLI_remlink(&m_packetCacheFree, packet);
501                         delete packet;
502                 }
503                 m_cacheStarted = false;
504         }
505 }
506
507 void VideoFFmpeg::releaseFrame(AVFrame* frame)
508 {
509         if (frame == m_frameRGB)
510         {
511                 // this is not a frame from the cache, ignore
512                 return;
513         }
514         // this frame MUST be the first one of the queue
515         pthread_mutex_lock(&m_cacheMutex);
516         CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
517         assert (cacheFrame != NULL && cacheFrame->frame == frame);
518         BLI_remlink(&m_frameCacheBase, cacheFrame);
519         BLI_addtail(&m_frameCacheFree, cacheFrame);
520         pthread_mutex_unlock(&m_cacheMutex);
521 }
522
523 // open video file
524 void VideoFFmpeg::openFile (char * filename)
525 {
526         if (openStream(filename, NULL, NULL) != 0)
527                 return;
528
529         if (m_codecCtx->gop_size)
530                 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
531         else if (m_codecCtx->has_b_frames)
532                 m_preseek = 25; // should determine gopsize
533         else
534                 m_preseek = 0;
535
536         // get video time range
537         m_range[0] = 0.0;
538         m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
539
540         // open base class
541         VideoBase::openFile(filename);
542
543         if (
544                 // ffmpeg reports that http source are actually non stream
545                 // but it is really not desirable to seek on http file, so force streaming.
546                 // It would be good to find this information from the context but there are no simple indication
547                 !strncmp(filename, "http://", 7) ||
548                 (m_formatCtx->pb && !m_formatCtx->pb->seekable)
549                 )
550         {
551                 // the file is in fact a streaming source, treat as cam to prevent seeking
552                 m_isFile = false;
553                 // but it's not handled exactly like a camera.
554                 m_isStreaming = true;
555                 // for streaming it is important to do non blocking read
556                 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
557         }
558
559         if (m_isImage) 
560         {
561                 // the file is to be treated as an image, i.e. load the first frame only
562                 m_isFile = false;
563                 // in case of reload, the filename is taken from m_imageName, no need to change it
564                 if (m_imageName.Ptr() != filename)
565                         m_imageName = filename;
566                 m_preseek = 0;
567                 m_avail = false;
568                 play();
569         }
570         // check if we should do multi-threading?
571         if (!m_isImage && BLI_system_thread_count() > 1)
572         {
573                 // never thread image: there are no frame to read ahead
574                 // no need to thread if the system has a single core
575                 m_isThreaded =  true;
576         }
577 }
578
579
580 // open video capture device
581 void VideoFFmpeg::openCam (char * file, short camIdx)
582 {
583         // open camera source
584         AVInputFormat           *inputFormat;
585         AVDictionary            *formatParams = NULL;
586         char                            filename[28], rateStr[20];
587         char                *p;
588
589 #ifdef WIN32
590         // video capture on windows only through Video For Windows driver
591         inputFormat = av_find_input_format("vfwcap");
592         if (!inputFormat)
593                 // Video For Windows not supported??
594                 return;
595         sprintf(filename, "%d", camIdx);
596 #else
597         // In Linux we support two types of devices: VideoForLinux and DV1394. 
598         // the user specify it with the filename:
599         // [<device_type>][:<standard>]
600         // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
601         // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
602         // The driver name is constructed automatically from the device type:
603         // v4l   : /dev/video<camIdx>
604         // dv1394: /dev/dv1394/<camIdx>
605         // If you have different driver name, you can specify the driver name explicitly
606         // instead of device type. Examples of valid filename:
607         //    /dev/v4l/video0:pal
608         //    /dev/ieee1394/1:ntsc
609         //    dv1394:secam
610         //    v4l:pal
611         if (file && strstr(file, "1394") != NULL) 
612         {
613                 // the user specifies a driver, check if it is v4l or d41394
614                 inputFormat = av_find_input_format("dv1394");
615                 sprintf(filename, "/dev/dv1394/%d", camIdx);
616         } else 
617         {
618                 const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"};
619                 int i, formatsCount = sizeof(formats) / sizeof(char*);
620                 for (i = 0; i < formatsCount; i++) {
621                         inputFormat = av_find_input_format(formats[i]);
622                         if (inputFormat)
623                                 break;
624                 }
625                 sprintf(filename, "/dev/video%d", camIdx);
626         }
627         if (!inputFormat)
628                 // these format should be supported, check ffmpeg compilation
629                 return;
630         if (file && strncmp(file, "/dev", 4) == 0) 
631         {
632                 // user does not specify a driver
633                 strncpy(filename, file, sizeof(filename));
634                 filename[sizeof(filename)-1] = 0;
635                 if ((p = strchr(filename, ':')) != 0)
636                         *p = 0;
637         }
638         if (file && (p = strchr(file, ':')) != NULL) {
639                 av_dict_set(&formatParams, "standard", p+1, 0);
640         }
641 #endif
642         //frame rate
643         if (m_captRate <= 0.f)
644                 m_captRate = defFrameRate;
645         sprintf(rateStr, "%f", m_captRate);
646
647         av_dict_set(&formatParams, "framerate", rateStr, 0);
648
649         if (m_captWidth > 0 && m_captHeight > 0) {
650                 char video_size[64];
651                 BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight);
652                 av_dict_set(&formatParams, "video_size", video_size, 0);
653         }
654
655         if (openStream(filename, inputFormat, &formatParams) != 0)
656                 return;
657
658         // for video capture it is important to do non blocking read
659         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
660         // open base class
661         VideoBase::openCam(file, camIdx);
662         // check if we should do multi-threading?
663         if (BLI_system_thread_count() > 1)
664         {
665                 // no need to thread if the system has a single core
666                 m_isThreaded =  true;
667         }
668
669         av_dict_free(&formatParams);
670 }
671
672 // play video
673 bool VideoFFmpeg::play (void)
674 {
675         try
676         {
677                 // if object is able to play
678                 if (VideoBase::play())
679                 {
680                         // set video position
681                         setPositions();
682                         // return success
683                         return true;
684                 }
685         }
686         CATCH_EXCP;
687         return false;
688 }
689
690
691 // pause video
692 bool VideoFFmpeg::pause (void)
693 {
694         try
695         {
696                 if (VideoBase::pause())
697                 {
698                         return true;
699                 }
700         }
701         CATCH_EXCP;
702         return false;
703 }
704
705 // stop video
706 bool VideoFFmpeg::stop (void)
707 {
708         try
709         {
710                 VideoBase::stop();
711                 // force restart when play
712                 m_lastFrame = -1;
713                 return true;
714         }
715         CATCH_EXCP;
716         return false;
717 }
718
719
720 // set video range
721 void VideoFFmpeg::setRange (double start, double stop)
722 {
723         try
724         {
725                 // set range
726                 if (m_isFile)
727                 {
728                         VideoBase::setRange(start, stop);
729                         // set range for video
730                         setPositions();
731                 }
732         }
733         CATCH_EXCP;
734 }
735
736 // set framerate
737 void VideoFFmpeg::setFrameRate (float rate)
738 {
739         VideoBase::setFrameRate(rate);
740 }
741
742
743 // image calculation
744 // load frame from video
745 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
746 {
747         if (m_status == SourcePlaying)
748         {
749                 // get actual time
750                 double startTime = PIL_check_seconds_timer();
751                 double actTime;
752                 // timestamp passed from audio actuators can sometimes be slightly negative
753                 if (m_isFile && ts >= -0.5)
754                 {
755                         // allow setting timestamp only when not streaming
756                         actTime = ts;
757                         if (actTime * actFrameRate() < m_lastFrame) 
758                         {
759                                 // user is asking to rewind, force a cache clear to make sure we will do a seek
760                                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
761                                 stopCache();
762                         }
763                 }
764                 else
765                 {
766                         if (m_lastFrame == -1 && !m_isFile)
767                                 m_startTime = startTime;
768                         actTime = startTime - m_startTime;
769                 }
770                 // if video has ended
771                 if (m_isFile && actTime * m_frameRate >= m_range[1])
772                 {
773                         // in any case, this resets the cache
774                         stopCache();
775                         // if repeats are set, decrease them
776                         if (m_repeat > 0) 
777                                 --m_repeat;
778                         // if video has to be replayed
779                         if (m_repeat != 0)
780                         {
781                                 // reset its position
782                                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
783                                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
784                         }
785                         // if video has to be stopped, stop it
786                         else 
787                         {
788                                 m_status = SourceStopped;
789                                 return;
790                         }
791                 }
792                 // actual frame
793                 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
794                 // if actual frame differs from last frame
795                 if (actFrame != m_lastFrame)
796                 {
797                         AVFrame* frame;
798                         // get image
799                         if ((frame = grabFrame(actFrame)) != NULL)
800                         {
801                                 if (!m_isFile && !m_cacheStarted) 
802                                 {
803                                         // streaming without cache: detect synchronization problem
804                                         double execTime = PIL_check_seconds_timer() - startTime;
805                                         if (execTime > 0.005) 
806                                         {
807                                                 // exec time is too long, it means that the function was blocking
808                                                 // resynchronize the stream from this time
809                                                 m_startTime += execTime;
810                                         }
811                                 }
812                                 // save actual frame
813                                 m_lastFrame = actFrame;
814                                 // init image, if needed
815                                 init(short(m_codecCtx->width), short(m_codecCtx->height));
816                                 // process image
817                                 process((BYTE*)(frame->data[0]));
818                                 // finished with the frame, release it so that cache can reuse it
819                                 releaseFrame(frame);
820                                 // in case it is an image, automatically stop reading it
821                                 if (m_isImage)
822                                 {
823                                         m_status = SourceStopped;
824                                         // close the file as we don't need it anymore
825                                         release();
826                                 }
827                         } else if (m_isStreaming)
828                         {
829                                 // we didn't get a frame and we are streaming, this may be due to
830                                 // a delay in the network or because we are getting the frame too fast.
831                                 // In the later case, shift time by a small amount to compensate for a drift
832                                 m_startTime += 0.001;
833                         }
834                 }
835         }
836 }
837
838
839 // set actual position
840 void VideoFFmpeg::setPositions (void)
841 {
842         // set video start time
843         m_startTime = PIL_check_seconds_timer();
844         // if file is played and actual position is before end position
845         if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
846                 // continue from actual position
847                 m_startTime -= double(m_lastFrame) / actFrameRate();
848         else {
849                 m_startTime -= m_range[0];
850                 // start from beginning, stop cache just in case
851                 stopCache();
852         }
853 }
854
855 // position pointer in file, position in second
856 AVFrame *VideoFFmpeg::grabFrame(long position)
857 {
858         AVPacket packet;
859         int frameFinished;
860         int posFound = 1;
861         bool frameLoaded = false;
862         int64_t targetTs = 0;
863         CacheFrame *frame;
864         int64_t dts = 0;
865
866         if (m_cacheStarted)
867         {
868                 // when cache is active, we must not read the file directly
869                 do {
870                         pthread_mutex_lock(&m_cacheMutex);
871                         frame = (CacheFrame *)m_frameCacheBase.first;
872                         pthread_mutex_unlock(&m_cacheMutex);
873                         // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
874                         if (frame == NULL)
875                         {
876                                 // no frame in cache, in case of file it is an abnormal situation
877                                 if (m_isFile)
878                                 {
879                                         // go back to no threaded reading
880                                         stopCache();
881                                         break;
882                                 }
883                                 return NULL;
884                         }
885                         if (frame->framePosition == -1) 
886                         {
887                                 // this frame mark the end of the file (only used for file)
888                                 // leave in cache to make sure we don't miss it
889                                 m_eof = true;
890                                 return NULL;
891                         }
892                         // for streaming, always return the next frame, 
893                         // that's what grabFrame does in non cache mode anyway.
894                         if (m_isStreaming || frame->framePosition == position)
895                         {
896                                 return frame->frame;
897                         }
898                         // for cam, skip old frames to keep image realtime.
899                         // There should be no risk of clock drift since it all happens on the same CPU
900                         if (frame->framePosition > position) 
901                         {
902                                 // this can happen after rewind if the seek didn't find the first frame
903                                 // the frame in the buffer is ahead of time, just leave it there
904                                 return NULL;
905                         }
906                         // this frame is not useful, release it
907                         pthread_mutex_lock(&m_cacheMutex);
908                         BLI_remlink(&m_frameCacheBase, frame);
909                         BLI_addtail(&m_frameCacheFree, frame);
910                         pthread_mutex_unlock(&m_cacheMutex);
911                 } while (true);
912         }
913         double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
914         int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
915         if (startTs == AV_NOPTS_VALUE)
916                 startTs = 0;
917
918         // come here when there is no cache or cache has been stopped
919         // locate the frame, by seeking if necessary (seeking is only possible for files)
920         if (m_isFile)
921         {
922                 // first check if the position that we are looking for is in the preseek range
923                 // if so, just read the frame until we get there
924                 if (position > m_curPosition + 1 
925                         && m_preseek 
926                         && position - (m_curPosition + 1) < m_preseek) 
927                 {
928                         while (av_read_frame(m_formatCtx, &packet)>=0)
929                         {
930                                 if (packet.stream_index == m_videoStream) 
931                                 {
932                                         avcodec_decode_video2(
933                                                 m_codecCtx, 
934                                                 m_frame, &frameFinished, 
935                                                 &packet);
936                                         if (frameFinished)
937                                         {
938                                                 m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
939                                         }
940                                 }
941                                 av_free_packet(&packet);
942                                 if (position == m_curPosition+1)
943                                         break;
944                         }
945                 }
946                 // if the position is not in preseek, do a direct jump
947                 if (position != m_curPosition + 1) 
948                 { 
949                         int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
950
951                         if (pos < 0)
952                                 pos = 0;
953
954                         pos += startTs;
955
956                         if (position <= m_curPosition || !m_eof)
957                         {
958 #if 0
959                                 // Tried to make this work but couldn't: seeking on byte is ignored by the
960                                 // format plugin and it will generally continue to read from last timestamp.
961                                 // Too bad because frame seek is not always able to get the first frame
962                                 // of the file.
963                                 if (position <= m_preseek)
964                                 {
965                                         // we can safely go the beginning of the file
966                                         if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
967                                         {
968                                                 // binary seek does not reset the timestamp, must do it now
969                                                 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
970                                                 m_curPosition = 0;
971                                         }
972                                 }
973                                 else
974 #endif
975                                 {
976                                         // current position is now lost, guess a value. 
977                                         if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
978                                         {
979                                                 // current position is now lost, guess a value. 
980                                                 // It's not important because it will be set at this end of this function
981                                                 m_curPosition = position - m_preseek - 1;
982                                         }
983                                 }
984                         }
985                         // this is the timestamp of the frame we're looking for
986                         targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
987
988                         posFound = 0;
989                         avcodec_flush_buffers(m_codecCtx);
990                 }
991         } else if (m_isThreaded)
992         {
993                 // cache is not started but threading is possible
994                 // better not read the stream => make take some time, better start caching
995                 if (startCache())
996                         return NULL;
997                 // Abnormal!!! could not start cache, fall back on direct read
998                 m_isThreaded = false;
999         }
1000
1001         // find the correct frame, in case of streaming and no cache, it means just
1002         // return the next frame. This is not quite correct, may need more work
1003         while (av_read_frame(m_formatCtx, &packet) >= 0)
1004         {
1005                 if (packet.stream_index == m_videoStream) 
1006                 {
1007                         avcodec_decode_video2(m_codecCtx, 
1008                                 m_frame, &frameFinished, 
1009                                 &packet);
1010                         // remember dts to compute exact frame number
1011                         dts = packet.dts;
1012                         if (frameFinished && !posFound) 
1013                         {
1014                                 if (dts >= targetTs)
1015                                 {
1016                                         posFound = 1;
1017                                 }
1018                         } 
1019
1020                         if (frameFinished && posFound == 1) 
1021                         {
1022                                 AVFrame * input = m_frame;
1023
1024                                 /* This means the data wasnt read properly, 
1025                                  * this check stops crashing */
1026                                 if (   input->data[0]==0 && input->data[1]==0 
1027                                         && input->data[2]==0 && input->data[3]==0)
1028                                 {
1029                                         av_free_packet(&packet);
1030                                         break;
1031                                 }
1032
1033                                 if (m_deinterlace) 
1034                                 {
1035                                         if (avpicture_deinterlace(
1036                                                 (AVPicture*) m_frameDeinterlaced,
1037                                                 (const AVPicture*) m_frame,
1038                                                 m_codecCtx->pix_fmt,
1039                                                 m_codecCtx->width,
1040                                                 m_codecCtx->height) >= 0)
1041                                         {
1042                                                 input = m_frameDeinterlaced;
1043                                         }
1044                                 }
1045                                 // convert to RGB24
1046                                 sws_scale(m_imgConvertCtx,
1047                                         input->data,
1048                                         input->linesize,
1049                                         0,
1050                                         m_codecCtx->height,
1051                                         m_frameRGB->data,
1052                                         m_frameRGB->linesize);
1053                                 av_free_packet(&packet);
1054                                 frameLoaded = true;
1055                                 break;
1056                         }
1057                 }
1058                 av_free_packet(&packet);
1059         }
1060         m_eof = m_isFile && !frameLoaded;
1061         if (frameLoaded)
1062         {
1063                 m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
1064                 if (m_isThreaded)
1065                 {
1066                         // normal case for file: first locate, then start cache
1067                         if (!startCache())
1068                         {
1069                                 // Abnormal!! could not start cache, return to non-cache mode
1070                                 m_isThreaded = false;
1071                         }
1072                 }
1073                 return m_frameRGB;
1074         }
1075         return NULL;
1076 }
1077
1078
1079 // python methods
1080
1081
1082 // cast Image pointer to VideoFFmpeg
1083 inline VideoFFmpeg * getVideoFFmpeg (PyImage *self)
1084 { return static_cast<VideoFFmpeg*>(self->m_image); }
1085
1086
1087 // object initialization
1088 static int VideoFFmpeg_init (PyObject *pySelf, PyObject *args, PyObject *kwds)
1089 {
1090         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1091         // parameters - video source
1092         // file name or format type for capture (only for Linux: video4linux or dv1394)
1093         char * file = NULL;
1094         // capture device number
1095         short capt = -1;
1096         // capture width, only if capt is >= 0
1097         short width = 0;
1098         // capture height, only if capt is >= 0
1099         short height = 0;
1100         // capture rate, only if capt is >= 0
1101         float rate = 25.f;
1102
1103         static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1104
1105         // get parameters
1106         if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
1107                 const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
1108                 return -1; 
1109
1110         try
1111         {
1112                 // create video object
1113                 Video_init<VideoFFmpeg>(self);
1114
1115                 // set thread usage
1116                 getVideoFFmpeg(self)->initParams(width, height, rate);
1117
1118                 // open video source
1119                 Video_open(getVideo(self), file, capt);
1120         }
1121         catch (Exception & exp)
1122         {
1123                 exp.report();
1124                 return -1;
1125         }
1126         // initialization succeded
1127         return 0;
1128 }
1129
1130 static PyObject *VideoFFmpeg_getPreseek(PyImage *self, void *closure)
1131 {
1132         return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1133 }
1134
1135 // set range
1136 static int VideoFFmpeg_setPreseek(PyImage *self, PyObject *value, void *closure)
1137 {
1138         // check validity of parameter
1139         if (value == NULL || !PyLong_Check(value))
1140         {
1141                 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1142                 return -1;
1143         }
1144         // set preseek
1145         getFFmpeg(self)->setPreseek(PyLong_AsLong(value));
1146         // success
1147         return 0;
1148 }
1149
1150 // get deinterlace
1151 static PyObject *VideoFFmpeg_getDeinterlace(PyImage *self, void *closure)
1152 {
1153         if (getFFmpeg(self)->getDeinterlace())
1154                 Py_RETURN_TRUE;
1155         else
1156                 Py_RETURN_FALSE;
1157 }
1158
1159 // set flip
1160 static int VideoFFmpeg_setDeinterlace(PyImage *self, PyObject *value, void *closure)
1161 {
1162         // check parameter, report failure
1163         if (value == NULL || !PyBool_Check(value))
1164         {
1165                 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1166                 return -1;
1167         }
1168         // set deinterlace
1169         getFFmpeg(self)->setDeinterlace(value == Py_True);
1170         // success
1171         return 0;
1172 }
1173
1174 // methods structure
1175 static PyMethodDef videoMethods[] =
1176 { // methods from VideoBase class
1177         {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1178         {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1179         {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1180         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1181         {NULL}
1182 };
1183 // attributes structure
1184 static PyGetSetDef videoGetSets[] =
1185 { // methods from VideoBase class
1186         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1187         {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1188         {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1189         {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1190         // attributes from ImageBase class
1191         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1192         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1193         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1194         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1195         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1196         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1197         {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1198         {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1199         {NULL}
1200 };
1201
1202 // python type declaration
1203 PyTypeObject VideoFFmpegType =
1204
1205         PyVarObject_HEAD_INIT(NULL, 0)
1206         "VideoTexture.VideoFFmpeg",   /*tp_name*/
1207         sizeof(PyImage),          /*tp_basicsize*/
1208         0,                         /*tp_itemsize*/
1209         (destructor)Image_dealloc, /*tp_dealloc*/
1210         0,                         /*tp_print*/
1211         0,                         /*tp_getattr*/
1212         0,                         /*tp_setattr*/
1213         0,                         /*tp_compare*/
1214         0,                         /*tp_repr*/
1215         0,                         /*tp_as_number*/
1216         0,                         /*tp_as_sequence*/
1217         0,                         /*tp_as_mapping*/
1218         0,                         /*tp_hash */
1219         0,                         /*tp_call*/
1220         0,                         /*tp_str*/
1221         0,                         /*tp_getattro*/
1222         0,                         /*tp_setattro*/
1223         &imageBufferProcs,         /*tp_as_buffer*/
1224         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1225         "FFmpeg video source",       /* tp_doc */
1226         0,                             /* tp_traverse */
1227         0,                             /* tp_clear */
1228         0,                             /* tp_richcompare */
1229         0,                             /* tp_weaklistoffset */
1230         0,                             /* tp_iter */
1231         0,                             /* tp_iternext */
1232         videoMethods,    /* tp_methods */
1233         0,                   /* tp_members */
1234         videoGetSets,          /* tp_getset */
1235         0,                         /* tp_base */
1236         0,                         /* tp_dict */
1237         0,                         /* tp_descr_get */
1238         0,                         /* tp_descr_set */
1239         0,                         /* tp_dictoffset */
1240         (initproc)VideoFFmpeg_init,     /* tp_init */
1241         0,                         /* tp_alloc */
1242         Image_allocNew,           /* tp_new */
1243 };
1244
1245 // object initialization
1246 static int ImageFFmpeg_init (PyObject *pySelf, PyObject *args, PyObject *kwds)
1247 {
1248         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1249         // parameters - video source
1250         // file name or format type for capture (only for Linux: video4linux or dv1394)
1251         char * file = NULL;
1252
1253         // get parameters
1254         if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1255                 return -1; 
1256
1257         try
1258         {
1259                 // create video object
1260                 Video_init<VideoFFmpeg>(self);
1261
1262                 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1263
1264                 // open video source
1265                 Video_open(getVideo(self), file, -1);
1266         }
1267         catch (Exception & exp)
1268         {
1269                 exp.report();
1270                 return -1;
1271         }
1272         // initialization succeded
1273         return 0;
1274 }
1275
1276 static PyObject *Image_reload(PyImage *self, PyObject *args)
1277 {
1278         char * newname = NULL;
1279         if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1280                 return NULL;
1281         if (self->m_image != NULL)
1282         {
1283                 VideoFFmpeg* video = getFFmpeg(self);
1284                 // check type of object
1285                 if (!newname)
1286                         newname = video->getImageName();
1287                 if (!newname) {
1288                         // if not set, retport error
1289                         PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1290                         return NULL;
1291                 }
1292                 // make sure the previous file is cleared
1293                 video->release();
1294                 // open the new file
1295                 video->openFile(newname);
1296         }
1297         Py_RETURN_NONE;
1298 }
1299
1300 // methods structure
1301 static PyMethodDef imageMethods[] =
1302 { // methods from VideoBase class
1303         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1304         {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1305         {NULL}
1306 };
1307 // attributes structure
1308 static PyGetSetDef imageGetSets[] =
1309 { // methods from VideoBase class
1310         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1311         // attributes from ImageBase class
1312         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1313         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1314         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1315         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1316         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1317         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1318         {NULL}
1319 };
1320
1321 // python type declaration
1322 PyTypeObject ImageFFmpegType =
1323
1324         PyVarObject_HEAD_INIT(NULL, 0)
1325         "VideoTexture.ImageFFmpeg",   /*tp_name*/
1326         sizeof(PyImage),          /*tp_basicsize*/
1327         0,                         /*tp_itemsize*/
1328         (destructor)Image_dealloc, /*tp_dealloc*/
1329         0,                         /*tp_print*/
1330         0,                         /*tp_getattr*/
1331         0,                         /*tp_setattr*/
1332         0,                         /*tp_compare*/
1333         0,                         /*tp_repr*/
1334         0,                         /*tp_as_number*/
1335         0,                         /*tp_as_sequence*/
1336         0,                         /*tp_as_mapping*/
1337         0,                         /*tp_hash */
1338         0,                         /*tp_call*/
1339         0,                         /*tp_str*/
1340         0,                         /*tp_getattro*/
1341         0,                         /*tp_setattro*/
1342         &imageBufferProcs,         /*tp_as_buffer*/
1343         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1344         "FFmpeg image source",       /* tp_doc */
1345         0,                             /* tp_traverse */
1346         0,                             /* tp_clear */
1347         0,                             /* tp_richcompare */
1348         0,                             /* tp_weaklistoffset */
1349         0,                             /* tp_iter */
1350         0,                             /* tp_iternext */
1351         imageMethods,    /* tp_methods */
1352         0,                   /* tp_members */
1353         imageGetSets,          /* tp_getset */
1354         0,                         /* tp_base */
1355         0,                         /* tp_dict */
1356         0,                         /* tp_descr_get */
1357         0,                         /* tp_descr_set */
1358         0,                         /* tp_dictoffset */
1359         (initproc)ImageFFmpeg_init,     /* tp_init */
1360         0,                         /* tp_alloc */
1361         Image_allocNew,           /* tp_new */
1362 };
1363
1364 #endif  //WITH_FFMPEG
1365
1366