Cleanup: quiet warning
[blender.git] / source / gameengine / VideoTexture / VideoFFmpeg.cpp
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software  Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Copyright (c) 2007 The Zdeno Ash Miklas
19  *
20  * This source file is part of VideoTexture library
21  *
22  * Contributor(s):
23  *
24  * ***** END GPL LICENSE BLOCK *****
25  */
26
27 /** \file gameengine/VideoTexture/VideoFFmpeg.cpp
28  *  \ingroup bgevideotex
29  */
30
31
32 #ifdef WITH_FFMPEG
33
34 // INT64_C fix for some linux machines (C99ism)
35 #ifndef __STDC_CONSTANT_MACROS
36 #define __STDC_CONSTANT_MACROS
37 #ifdef  __STDC_CONSTANT_MACROS  /* quiet warning */
38 #endif
39 #endif
40
41 #include <stdint.h>
42
43
44 #include "MEM_guardedalloc.h"
45 #include "PIL_time.h"
46
47 #include <string>
48
49 #include "VideoFFmpeg.h"
50 #include "Exception.h"
51
52
53 // default framerate
54 const double defFrameRate = 25.0;
55
56 // macro for exception handling and logging
57 #define CATCH_EXCP catch (Exception & exp) \
58 { exp.report(); m_status = SourceError; }
59
60 // class RenderVideo
61
62 // constructor
63 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
64 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
65 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
66 m_deinterlace(false), m_preseek(0),     m_videoStream(-1), m_baseFrameRate(25.0),
67 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
68 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
69 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
70 {
71         // set video format
72         m_format = RGB24;
73         // force flip because ffmpeg always return the image in the wrong orientation for texture
74         setFlip(true);
75         // construction is OK
76         *hRslt = S_OK;
77         BLI_listbase_clear(&m_thread);
78         pthread_mutex_init(&m_cacheMutex, NULL);
79         BLI_listbase_clear(&m_frameCacheFree);
80         BLI_listbase_clear(&m_frameCacheBase);
81         BLI_listbase_clear(&m_packetCacheFree);
82         BLI_listbase_clear(&m_packetCacheBase);
83 }
84
85 // destructor
86 VideoFFmpeg::~VideoFFmpeg () 
87 {
88 }
89
90 void VideoFFmpeg::refresh(void)
91 {
92     // a fixed image will not refresh because it is loaded only once at creation
93     if (m_isImage)
94         return;
95     m_avail = false;
96 }
97
98 // release components
99 bool VideoFFmpeg::release()
100 {
101         // release
102         stopCache();
103         if (m_codecCtx)
104         {
105                 avcodec_close(m_codecCtx);
106                 m_codecCtx = NULL;
107         }
108         if (m_formatCtx)
109         {
110                 avformat_close_input(&m_formatCtx);
111                 m_formatCtx = NULL;
112         }
113         if (m_frame)
114         {
115                 av_free(m_frame);
116                 m_frame = NULL;
117         }
118         if (m_frameDeinterlaced)
119         {
120                 MEM_freeN(m_frameDeinterlaced->data[0]);
121                 av_free(m_frameDeinterlaced);
122                 m_frameDeinterlaced = NULL;
123         }
124         if (m_frameRGB)
125         {
126                 MEM_freeN(m_frameRGB->data[0]);
127                 av_free(m_frameRGB);
128                 m_frameRGB = NULL;
129         }
130         if (m_imgConvertCtx)
131         {
132                 sws_freeContext(m_imgConvertCtx);
133                 m_imgConvertCtx = NULL;
134         }
135         m_codec = NULL;
136         m_status = SourceStopped;
137         m_lastFrame = -1;
138         return true;
139 }
140
141 AVFrame *VideoFFmpeg::allocFrameRGB()
142 {
143         AVFrame *frame;
144         frame = avcodec_alloc_frame();
145         if (m_format == RGBA32)
146         {
147                 avpicture_fill((AVPicture*)frame, 
148                         (uint8_t*)MEM_callocN(avpicture_get_size(
149                                 PIX_FMT_RGBA,
150                                 m_codecCtx->width, m_codecCtx->height),
151                                 "ffmpeg rgba"),
152                         PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
153         } else 
154         {
155                 avpicture_fill((AVPicture*)frame, 
156                         (uint8_t*)MEM_callocN(avpicture_get_size(
157                                 PIX_FMT_RGB24,
158                                 m_codecCtx->width, m_codecCtx->height),
159                                 "ffmpeg rgb"),
160                         PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
161         }
162         return frame;
163 }
164
165 // set initial parameters
166 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
167 {
168         m_captWidth = width;
169         m_captHeight = height;
170         m_captRate = rate;
171         m_isImage = image;
172 }
173
174
175 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
176 {
177         AVFormatContext *formatCtx = NULL;
178         int                             i, videoStream;
179         AVCodec                 *codec;
180         AVCodecContext  *codecCtx;
181
182         if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
183                 return -1;
184
185         if (avformat_find_stream_info(formatCtx, NULL) < 0)
186         {
187                 avformat_close_input(&formatCtx);
188                 return -1;
189         }
190
191         /* Find the first video stream */
192         videoStream=-1;
193         for (i=0; i<formatCtx->nb_streams; i++)
194         {
195                 if (formatCtx->streams[i] &&
196                         get_codec_from_stream(formatCtx->streams[i]) && 
197                         (get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
198                 {
199                         videoStream=i;
200                         break;
201                 }
202         }
203
204         if (videoStream==-1) 
205         {
206                 avformat_close_input(&formatCtx);
207                 return -1;
208         }
209
210         codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
211
212         /* Find the decoder for the video stream */
213         codec=avcodec_find_decoder(codecCtx->codec_id);
214         if (codec==NULL) 
215         {
216                 avformat_close_input(&formatCtx);
217                 return -1;
218         }
219         codecCtx->workaround_bugs = 1;
220         if (avcodec_open2(codecCtx, codec, NULL) < 0)
221         {
222                 avformat_close_input(&formatCtx);
223                 return -1;
224         }
225
226 #ifdef FFMPEG_OLD_FRAME_RATE
227         if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
228                 codecCtx->frame_rate_base=1000;
229         m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
230 #else
231         m_baseFrameRate = av_q2d(av_get_r_frame_rate_compat(formatCtx->streams[videoStream]));
232 #endif
233         if (m_baseFrameRate <= 0.0) 
234                 m_baseFrameRate = defFrameRate;
235
236         m_codec = codec;
237         m_codecCtx = codecCtx;
238         m_formatCtx = formatCtx;
239         m_videoStream = videoStream;
240         m_frame = avcodec_alloc_frame();
241         m_frameDeinterlaced = avcodec_alloc_frame();
242
243         // allocate buffer if deinterlacing is required
244         avpicture_fill((AVPicture*)m_frameDeinterlaced, 
245                 (uint8_t*)MEM_callocN(avpicture_get_size(
246                 m_codecCtx->pix_fmt,
247                 m_codecCtx->width, m_codecCtx->height), 
248                 "ffmpeg deinterlace"), 
249                 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
250
251         // check if the pixel format supports Alpha
252         if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
253                 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
254                 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
255                 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
256         {
257                 // allocate buffer to store final decoded frame
258                 m_format = RGBA32;
259                 // allocate sws context
260                 m_imgConvertCtx = sws_getContext(
261                         m_codecCtx->width,
262                         m_codecCtx->height,
263                         m_codecCtx->pix_fmt,
264                         m_codecCtx->width,
265                         m_codecCtx->height,
266                         PIX_FMT_RGBA,
267                         SWS_FAST_BILINEAR,
268                         NULL, NULL, NULL);
269         } else
270         {
271                 // allocate buffer to store final decoded frame
272                 m_format = RGB24;
273                 // allocate sws context
274                 m_imgConvertCtx = sws_getContext(
275                         m_codecCtx->width,
276                         m_codecCtx->height,
277                         m_codecCtx->pix_fmt,
278                         m_codecCtx->width,
279                         m_codecCtx->height,
280                         PIX_FMT_RGB24,
281                         SWS_FAST_BILINEAR,
282                         NULL, NULL, NULL);
283         }
284         m_frameRGB = allocFrameRGB();
285
286         if (!m_imgConvertCtx) {
287                 avcodec_close(m_codecCtx);
288                 m_codecCtx = NULL;
289                 avformat_close_input(&m_formatCtx);
290                 m_formatCtx = NULL;
291                 av_free(m_frame);
292                 m_frame = NULL;
293                 MEM_freeN(m_frameDeinterlaced->data[0]);
294                 av_free(m_frameDeinterlaced);
295                 m_frameDeinterlaced = NULL;
296                 MEM_freeN(m_frameRGB->data[0]);
297                 av_free(m_frameRGB);
298                 m_frameRGB = NULL;
299                 return -1;
300         }
301         return 0;
302 }
303
304 /*
305  * This thread is used to load video frame asynchronously.
306  * It provides a frame caching service. 
307  * The main thread is responsible for positioning the frame pointer in the
308  * file correctly before calling startCache() which starts this thread.
309  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
310  * memory and CPU low 2) a cache of 5 decoded frames. 
311  * If the main thread does not find the frame in the cache (because the video has restarted
312  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
313  * function: it sends a signal to stop the cache thread and wait for confirmation), then
314  * change the position in the stream and restarts the cache thread.
315  */
316 void *VideoFFmpeg::cacheThread(void *data)
317 {
318         VideoFFmpeg* video = (VideoFFmpeg*)data;
319         // holds the frame that is being decoded
320         CacheFrame *currentFrame = NULL;
321         CachePacket *cachePacket;
322         bool endOfFile = false;
323         int frameFinished = 0;
324         double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
325         int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
326
327         if (startTs == AV_NOPTS_VALUE)
328                 startTs = 0;
329
330         while (!video->m_stopThread)
331         {
332                 // packet cache is used solely by this thread, no need to lock
333                 // In case the stream/file contains other stream than the one we are looking for,
334                 // allow a bit of cycling to get rid quickly of those frames
335                 frameFinished = 0;
336                 while (    !endOfFile 
337                                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
338                                 && frameFinished < 25)
339                 {
340                         // free packet => packet cache is not full yet, just read more
341                         if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
342                         {
343                                 if (cachePacket->packet.stream_index == video->m_videoStream)
344                                 {
345                                         // make sure fresh memory is allocated for the packet and move it to queue
346                                         av_dup_packet(&cachePacket->packet);
347                                         BLI_remlink(&video->m_packetCacheFree, cachePacket);
348                                         BLI_addtail(&video->m_packetCacheBase, cachePacket);
349                                         break;
350                                 } else {
351                                         // this is not a good packet for us, just leave it on free queue
352                                         // Note: here we could handle sound packet
353                                         av_free_packet(&cachePacket->packet);
354                                         frameFinished++;
355                                 }
356                                 
357                         } else {
358                                 if (video->m_isFile)
359                                         // this mark the end of the file
360                                         endOfFile = true;
361                                 // if we cannot read a packet, no need to continue
362                                 break;
363                         }
364                 }
365                 // frame cache is also used by main thread, lock
366                 if (currentFrame == NULL) 
367                 {
368                         // no current frame being decoded, take free one
369                         pthread_mutex_lock(&video->m_cacheMutex);
370                         if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
371                                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
372                         pthread_mutex_unlock(&video->m_cacheMutex);
373                 }
374                 if (currentFrame != NULL)
375                 {
376                         // this frame is out of free and busy queue, we can manipulate it without locking
377                         frameFinished = 0;
378                         while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
379                         {
380                                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
381                                 // use m_frame because when caching, it is not used in main thread
382                                 // we can't use currentFrame directly because we need to convert to RGB first
383                                 avcodec_decode_video2(video->m_codecCtx, 
384                                         video->m_frame, &frameFinished, 
385                                         &cachePacket->packet);
386                                 if (frameFinished) 
387                                 {
388                                         AVFrame * input = video->m_frame;
389
390                                         /* This means the data wasnt read properly, this check stops crashing */
391                                         if (   input->data[0]!=0 || input->data[1]!=0 
392                                                 || input->data[2]!=0 || input->data[3]!=0)
393                                         {
394                                                 if (video->m_deinterlace) 
395                                                 {
396                                                         if (avpicture_deinterlace(
397                                                                 (AVPicture*) video->m_frameDeinterlaced,
398                                                                 (const AVPicture*) video->m_frame,
399                                                                 video->m_codecCtx->pix_fmt,
400                                                                 video->m_codecCtx->width,
401                                                                 video->m_codecCtx->height) >= 0)
402                                                         {
403                                                                 input = video->m_frameDeinterlaced;
404                                                         }
405                                                 }
406                                                 // convert to RGB24
407                                                 sws_scale(video->m_imgConvertCtx,
408                                                         input->data,
409                                                         input->linesize,
410                                                         0,
411                                                         video->m_codecCtx->height,
412                                                         currentFrame->frame->data,
413                                                         currentFrame->frame->linesize);
414                                                 // move frame to queue, this frame is necessarily the next one
415                                                 video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
416                                                 currentFrame->framePosition = video->m_curPosition;
417                                                 pthread_mutex_lock(&video->m_cacheMutex);
418                                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
419                                                 pthread_mutex_unlock(&video->m_cacheMutex);
420                                                 currentFrame = NULL;
421                                         }
422                                 }
423                                 av_free_packet(&cachePacket->packet);
424                                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
425                         } 
426                         if (currentFrame && endOfFile) 
427                         {
428                                 // no more packet and end of file => put a special frame that indicates that
429                                 currentFrame->framePosition = -1;
430                                 pthread_mutex_lock(&video->m_cacheMutex);
431                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
432                                 pthread_mutex_unlock(&video->m_cacheMutex);
433                                 currentFrame = NULL;
434                                 // no need to stay any longer in this thread
435                                 break;
436                         }
437                 }
438                 // small sleep to avoid unnecessary looping
439                 PIL_sleep_ms(10);
440         }
441         // before quitting, put back the current frame to queue to allow freeing
442         if (currentFrame)
443         {
444                 pthread_mutex_lock(&video->m_cacheMutex);
445                 BLI_addtail(&video->m_frameCacheFree, currentFrame);
446                 pthread_mutex_unlock(&video->m_cacheMutex);
447         }
448         return 0;
449 }
450
451 // start thread to cache video frame from file/capture/stream
452 // this function should be called only when the position in the stream is set for the
453 // first frame to cache
454 bool VideoFFmpeg::startCache()
455 {
456         if (!m_cacheStarted && m_isThreaded)
457         {
458                 m_stopThread = false;
459                 for (int i=0; i<CACHE_FRAME_SIZE; i++)
460                 {
461                         CacheFrame *frame = new CacheFrame();
462                         frame->frame = allocFrameRGB();
463                         BLI_addtail(&m_frameCacheFree, frame);
464                 }
465                 for (int i=0; i<CACHE_PACKET_SIZE; i++) 
466                 {
467                         CachePacket *packet = new CachePacket();
468                         BLI_addtail(&m_packetCacheFree, packet);
469                 }
470                 BLI_init_threads(&m_thread, cacheThread, 1);
471                 BLI_insert_thread(&m_thread, this);
472                 m_cacheStarted = true;
473         }
474         return m_cacheStarted;
475 }
476
477 void VideoFFmpeg::stopCache()
478 {
479         if (m_cacheStarted)
480         {
481                 m_stopThread = true;
482                 BLI_end_threads(&m_thread);
483                 // now delete the cache
484                 CacheFrame *frame;
485                 CachePacket *packet;
486                 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
487                 {
488                         BLI_remlink(&m_frameCacheBase, frame);
489                         MEM_freeN(frame->frame->data[0]);
490                         av_free(frame->frame);
491                         delete frame;
492                 }
493                 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
494                 {
495                         BLI_remlink(&m_frameCacheFree, frame);
496                         MEM_freeN(frame->frame->data[0]);
497                         av_free(frame->frame);
498                         delete frame;
499                 }
500                 while ((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
501                 {
502                         BLI_remlink(&m_packetCacheBase, packet);
503                         av_free_packet(&packet->packet);
504                         delete packet;
505                 }
506                 while ((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
507                 {
508                         BLI_remlink(&m_packetCacheFree, packet);
509                         delete packet;
510                 }
511                 m_cacheStarted = false;
512         }
513 }
514
515 void VideoFFmpeg::releaseFrame(AVFrame *frame)
516 {
517         if (frame == m_frameRGB)
518         {
519                 // this is not a frame from the cache, ignore
520                 return;
521         }
522         // this frame MUST be the first one of the queue
523         pthread_mutex_lock(&m_cacheMutex);
524         CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
525         assert (cacheFrame != NULL && cacheFrame->frame == frame);
526         BLI_remlink(&m_frameCacheBase, cacheFrame);
527         BLI_addtail(&m_frameCacheFree, cacheFrame);
528         pthread_mutex_unlock(&m_cacheMutex);
529 }
530
531 // open video file
532 void VideoFFmpeg::openFile (char *filename)
533 {
534         if (openStream(filename, NULL, NULL) != 0)
535                 return;
536
537         if (m_codecCtx->gop_size)
538                 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
539         else if (m_codecCtx->has_b_frames)
540                 m_preseek = 25; // should determine gopsize
541         else
542                 m_preseek = 0;
543
544         // get video time range
545         m_range[0] = 0.0;
546         m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
547
548         // open base class
549         VideoBase::openFile(filename);
550
551         if (
552                 // ffmpeg reports that http source are actually non stream
553                 // but it is really not desirable to seek on http file, so force streaming.
554                 // It would be good to find this information from the context but there are no simple indication
555                 !strncmp(filename, "http://", 7) ||
556                 !strncmp(filename, "rtsp://", 7) ||
557                 (m_formatCtx->pb && !m_formatCtx->pb->seekable)
558                 )
559         {
560                 // the file is in fact a streaming source, treat as cam to prevent seeking
561                 m_isFile = false;
562                 // but it's not handled exactly like a camera.
563                 m_isStreaming = true;
564                 // for streaming it is important to do non blocking read
565                 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
566         }
567
568         if (m_isImage) 
569         {
570                 // the file is to be treated as an image, i.e. load the first frame only
571                 m_isFile = false;
572                 // in case of reload, the filename is taken from m_imageName, no need to change it
573                 if (m_imageName.Ptr() != filename)
574                         m_imageName = filename;
575                 m_preseek = 0;
576                 m_avail = false;
577                 play();
578         }
579         // check if we should do multi-threading?
580         if (!m_isImage && BLI_system_thread_count() > 1)
581         {
582                 // never thread image: there are no frame to read ahead
583                 // no need to thread if the system has a single core
584                 m_isThreaded =  true;
585         }
586 }
587
588
589 // open video capture device
590 void VideoFFmpeg::openCam (char *file, short camIdx)
591 {
592         // open camera source
593         AVInputFormat           *inputFormat;
594         AVDictionary            *formatParams = NULL;
595         char                            filename[28], rateStr[20];
596
597 #ifdef WIN32
598         // video capture on windows only through Video For Windows driver
599         inputFormat = av_find_input_format("vfwcap");
600         if (!inputFormat)
601                 // Video For Windows not supported??
602                 return;
603         sprintf(filename, "%d", camIdx);
604 #else
605         // In Linux we support two types of devices: VideoForLinux and DV1394. 
606         // the user specify it with the filename:
607         // [<device_type>][:<standard>]
608         // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
609         // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
610         // The driver name is constructed automatically from the device type:
611         // v4l   : /dev/video<camIdx>
612         // dv1394: /dev/dv1394/<camIdx>
613         // If you have different driver name, you can specify the driver name explicitly
614         // instead of device type. Examples of valid filename:
615         //    /dev/v4l/video0:pal
616         //    /dev/ieee1394/1:ntsc
617         //    dv1394:secam
618         //    v4l:pal
619         char *p;
620
621         if (file && strstr(file, "1394") != NULL) 
622         {
623                 // the user specifies a driver, check if it is v4l or d41394
624                 inputFormat = av_find_input_format("dv1394");
625                 sprintf(filename, "/dev/dv1394/%d", camIdx);
626         } else 
627         {
628                 const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"};
629                 int i, formatsCount = sizeof(formats) / sizeof(char*);
630                 for (i = 0; i < formatsCount; i++) {
631                         inputFormat = av_find_input_format(formats[i]);
632                         if (inputFormat)
633                                 break;
634                 }
635                 sprintf(filename, "/dev/video%d", camIdx);
636         }
637         if (!inputFormat)
638                 // these format should be supported, check ffmpeg compilation
639                 return;
640         if (file && strncmp(file, "/dev", 4) == 0) 
641         {
642                 // user does not specify a driver
643                 strncpy(filename, file, sizeof(filename));
644                 filename[sizeof(filename)-1] = 0;
645                 if ((p = strchr(filename, ':')) != 0)
646                         *p = 0;
647         }
648         if (file && (p = strchr(file, ':')) != NULL) {
649                 av_dict_set(&formatParams, "standard", p+1, 0);
650         }
651 #endif
652         //frame rate
653         if (m_captRate <= 0.f)
654                 m_captRate = defFrameRate;
655         sprintf(rateStr, "%f", m_captRate);
656
657         av_dict_set(&formatParams, "framerate", rateStr, 0);
658
659         if (m_captWidth > 0 && m_captHeight > 0) {
660                 char video_size[64];
661                 BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight);
662                 av_dict_set(&formatParams, "video_size", video_size, 0);
663         }
664
665         if (openStream(filename, inputFormat, &formatParams) != 0)
666                 return;
667
668         // for video capture it is important to do non blocking read
669         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
670         // open base class
671         VideoBase::openCam(file, camIdx);
672         // check if we should do multi-threading?
673         if (BLI_system_thread_count() > 1)
674         {
675                 // no need to thread if the system has a single core
676                 m_isThreaded =  true;
677         }
678
679         av_dict_free(&formatParams);
680 }
681
682 // play video
683 bool VideoFFmpeg::play (void)
684 {
685         try
686         {
687                 // if object is able to play
688                 if (VideoBase::play())
689                 {
690                         // set video position
691                         setPositions();
692
693                         if (m_isStreaming)
694                         {
695                                 av_read_play(m_formatCtx);
696                         }
697
698                         // return success
699                         return true;
700                 }
701         }
702         CATCH_EXCP;
703         return false;
704 }
705
706
707 // pause video
708 bool VideoFFmpeg::pause (void)
709 {
710         try
711         {
712                 if (VideoBase::pause())
713                 {
714                         if (m_isStreaming)
715                         {
716                                 av_read_pause(m_formatCtx);
717                         }
718                         return true;
719                 }
720         }
721         CATCH_EXCP;
722         return false;
723 }
724
725 // stop video
726 bool VideoFFmpeg::stop (void)
727 {
728         try
729         {
730                 VideoBase::stop();
731                 // force restart when play
732                 m_lastFrame = -1;
733                 return true;
734         }
735         CATCH_EXCP;
736         return false;
737 }
738
739
740 // set video range
741 void VideoFFmpeg::setRange (double start, double stop)
742 {
743         try
744         {
745                 // set range
746                 if (m_isFile)
747                 {
748                         VideoBase::setRange(start, stop);
749                         // set range for video
750                         setPositions();
751                 }
752         }
753         CATCH_EXCP;
754 }
755
756 // set framerate
757 void VideoFFmpeg::setFrameRate (float rate)
758 {
759         VideoBase::setFrameRate(rate);
760 }
761
762
763 // image calculation
764 // load frame from video
765 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
766 {
767         if (m_status == SourcePlaying)
768         {
769                 // get actual time
770                 double startTime = PIL_check_seconds_timer();
771                 double actTime;
772                 // timestamp passed from audio actuators can sometimes be slightly negative
773                 if (m_isFile && ts >= -0.5)
774                 {
775                         // allow setting timestamp only when not streaming
776                         actTime = ts;
777                         if (actTime * actFrameRate() < m_lastFrame) 
778                         {
779                                 // user is asking to rewind, force a cache clear to make sure we will do a seek
780                                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
781                                 stopCache();
782                         }
783                 }
784                 else
785                 {
786                         if (m_lastFrame == -1 && !m_isFile)
787                                 m_startTime = startTime;
788                         actTime = startTime - m_startTime;
789                 }
790                 // if video has ended
791                 if (m_isFile && actTime * m_frameRate >= m_range[1])
792                 {
793                         // in any case, this resets the cache
794                         stopCache();
795                         // if repeats are set, decrease them
796                         if (m_repeat > 0) 
797                                 --m_repeat;
798                         // if video has to be replayed
799                         if (m_repeat != 0)
800                         {
801                                 // reset its position
802                                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
803                                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
804                         }
805                         // if video has to be stopped, stop it
806                         else 
807                         {
808                                 m_status = SourceStopped;
809                                 return;
810                         }
811                 }
812                 // actual frame
813                 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
814                 // if actual frame differs from last frame
815                 if (actFrame != m_lastFrame)
816                 {
817                         AVFrame* frame;
818                         // get image
819                         if ((frame = grabFrame(actFrame)) != NULL)
820                         {
821                                 if (!m_isFile && !m_cacheStarted) 
822                                 {
823                                         // streaming without cache: detect synchronization problem
824                                         double execTime = PIL_check_seconds_timer() - startTime;
825                                         if (execTime > 0.005) 
826                                         {
827                                                 // exec time is too long, it means that the function was blocking
828                                                 // resynchronize the stream from this time
829                                                 m_startTime += execTime;
830                                         }
831                                 }
832                                 // save actual frame
833                                 m_lastFrame = actFrame;
834                                 // init image, if needed
835                                 init(short(m_codecCtx->width), short(m_codecCtx->height));
836                                 // process image
837                                 process((BYTE*)(frame->data[0]));
838                                 // finished with the frame, release it so that cache can reuse it
839                                 releaseFrame(frame);
840                                 // in case it is an image, automatically stop reading it
841                                 if (m_isImage)
842                                 {
843                                         m_status = SourceStopped;
844                                         // close the file as we don't need it anymore
845                                         release();
846                                 }
847                         } else if (m_isStreaming)
848                         {
849                                 // we didn't get a frame and we are streaming, this may be due to
850                                 // a delay in the network or because we are getting the frame too fast.
851                                 // In the later case, shift time by a small amount to compensate for a drift
852                                 m_startTime += 0.001;
853                         }
854                 }
855         }
856 }
857
858
859 // set actual position
860 void VideoFFmpeg::setPositions (void)
861 {
862         // set video start time
863         m_startTime = PIL_check_seconds_timer();
864         // if file is played and actual position is before end position
865         if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
866                 // continue from actual position
867                 m_startTime -= double(m_lastFrame) / actFrameRate();
868         else {
869                 m_startTime -= m_range[0];
870                 // start from beginning, stop cache just in case
871                 stopCache();
872         }
873 }
874
875 // position pointer in file, position in second
876 AVFrame *VideoFFmpeg::grabFrame(long position)
877 {
878         AVPacket packet;
879         int frameFinished;
880         int posFound = 1;
881         bool frameLoaded = false;
882         int64_t targetTs = 0;
883         CacheFrame *frame;
884         int64_t dts = 0;
885
886         if (m_cacheStarted)
887         {
888                 // when cache is active, we must not read the file directly
889                 do {
890                         pthread_mutex_lock(&m_cacheMutex);
891                         frame = (CacheFrame *)m_frameCacheBase.first;
892                         pthread_mutex_unlock(&m_cacheMutex);
893                         // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
894                         if (frame == NULL)
895                         {
896                                 // no frame in cache, in case of file it is an abnormal situation
897                                 if (m_isFile)
898                                 {
899                                         // go back to no threaded reading
900                                         stopCache();
901                                         break;
902                                 }
903                                 return NULL;
904                         }
905                         if (frame->framePosition == -1) 
906                         {
907                                 // this frame mark the end of the file (only used for file)
908                                 // leave in cache to make sure we don't miss it
909                                 m_eof = true;
910                                 return NULL;
911                         }
912                         // for streaming, always return the next frame, 
913                         // that's what grabFrame does in non cache mode anyway.
914                         if (m_isStreaming || frame->framePosition == position)
915                         {
916                                 return frame->frame;
917                         }
918                         // for cam, skip old frames to keep image realtime.
919                         // There should be no risk of clock drift since it all happens on the same CPU
920                         if (frame->framePosition > position) 
921                         {
922                                 // this can happen after rewind if the seek didn't find the first frame
923                                 // the frame in the buffer is ahead of time, just leave it there
924                                 return NULL;
925                         }
926                         // this frame is not useful, release it
927                         pthread_mutex_lock(&m_cacheMutex);
928                         BLI_remlink(&m_frameCacheBase, frame);
929                         BLI_addtail(&m_frameCacheFree, frame);
930                         pthread_mutex_unlock(&m_cacheMutex);
931                 } while (true);
932         }
933         double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
934         int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
935         if (startTs == AV_NOPTS_VALUE)
936                 startTs = 0;
937
938         // come here when there is no cache or cache has been stopped
939         // locate the frame, by seeking if necessary (seeking is only possible for files)
940         if (m_isFile)
941         {
942                 // first check if the position that we are looking for is in the preseek range
943                 // if so, just read the frame until we get there
944                 if (position > m_curPosition + 1 
945                         && m_preseek 
946                         && position - (m_curPosition + 1) < m_preseek) 
947                 {
948                         while (av_read_frame(m_formatCtx, &packet)>=0)
949                         {
950                                 if (packet.stream_index == m_videoStream) 
951                                 {
952                                         avcodec_decode_video2(
953                                                 m_codecCtx, 
954                                                 m_frame, &frameFinished, 
955                                                 &packet);
956                                         if (frameFinished)
957                                         {
958                                                 m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
959                                         }
960                                 }
961                                 av_free_packet(&packet);
962                                 if (position == m_curPosition+1)
963                                         break;
964                         }
965                 }
966                 // if the position is not in preseek, do a direct jump
967                 if (position != m_curPosition + 1) 
968                 { 
969                         int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
970
971                         if (pos < 0)
972                                 pos = 0;
973
974                         pos += startTs;
975
976                         if (position <= m_curPosition || !m_eof)
977                         {
978 #if 0
979                                 // Tried to make this work but couldn't: seeking on byte is ignored by the
980                                 // format plugin and it will generally continue to read from last timestamp.
981                                 // Too bad because frame seek is not always able to get the first frame
982                                 // of the file.
983                                 if (position <= m_preseek)
984                                 {
985                                         // we can safely go the beginning of the file
986                                         if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
987                                         {
988                                                 // binary seek does not reset the timestamp, must do it now
989                                                 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
990                                                 m_curPosition = 0;
991                                         }
992                                 }
993                                 else
994 #endif
995                                 {
996                                         // current position is now lost, guess a value. 
997                                         if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
998                                         {
999                                                 // current position is now lost, guess a value. 
1000                                                 // It's not important because it will be set at this end of this function
1001                                                 m_curPosition = position - m_preseek - 1;
1002                                         }
1003                                 }
1004                         }
1005                         // this is the timestamp of the frame we're looking for
1006                         targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
1007
1008                         posFound = 0;
1009                         avcodec_flush_buffers(m_codecCtx);
1010                 }
1011         } else if (m_isThreaded)
1012         {
1013                 // cache is not started but threading is possible
1014                 // better not read the stream => make take some time, better start caching
1015                 if (startCache())
1016                         return NULL;
1017                 // Abnormal!!! could not start cache, fall back on direct read
1018                 m_isThreaded = false;
1019         }
1020
1021         // find the correct frame, in case of streaming and no cache, it means just
1022         // return the next frame. This is not quite correct, may need more work
1023         while (av_read_frame(m_formatCtx, &packet) >= 0)
1024         {
1025                 if (packet.stream_index == m_videoStream) 
1026                 {
1027                         AVFrame *input = m_frame;
1028                         short counter = 0;
1029
1030                         /* If m_isImage, while the data is not read properly (png, tiffs, etc formats may need several pass), else don't need while loop*/
1031                         do {
1032                                 avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &packet);
1033                                 counter++;
1034                         } while ((input->data[0] == 0 && input->data[1] == 0 && input->data[2] == 0 && input->data[3] == 0) && counter < 10 && m_isImage);
1035
1036                         // remember dts to compute exact frame number
1037                         dts = packet.dts;
1038                         if (frameFinished && !posFound) 
1039                         {
1040                                 if (dts >= targetTs)
1041                                 {
1042                                         posFound = 1;
1043                                 }
1044                         } 
1045
1046                         if (frameFinished && posFound == 1) 
1047                         {
1048                                 AVFrame * input = m_frame;
1049
1050                                 /* This means the data wasnt read properly, 
1051                                  * this check stops crashing */
1052                                 if (   input->data[0]==0 && input->data[1]==0 
1053                                         && input->data[2]==0 && input->data[3]==0)
1054                                 {
1055                                         av_free_packet(&packet);
1056                                         break;
1057                                 }
1058
1059                                 if (m_deinterlace) 
1060                                 {
1061                                         if (avpicture_deinterlace(
1062                                                 (AVPicture*) m_frameDeinterlaced,
1063                                                 (const AVPicture*) m_frame,
1064                                                 m_codecCtx->pix_fmt,
1065                                                 m_codecCtx->width,
1066                                                 m_codecCtx->height) >= 0)
1067                                         {
1068                                                 input = m_frameDeinterlaced;
1069                                         }
1070                                 }
1071                                 // convert to RGB24
1072                                 sws_scale(m_imgConvertCtx,
1073                                         input->data,
1074                                         input->linesize,
1075                                         0,
1076                                         m_codecCtx->height,
1077                                         m_frameRGB->data,
1078                                         m_frameRGB->linesize);
1079                                 av_free_packet(&packet);
1080                                 frameLoaded = true;
1081                                 break;
1082                         }
1083                 }
1084                 av_free_packet(&packet);
1085         }
1086         m_eof = m_isFile && !frameLoaded;
1087         if (frameLoaded)
1088         {
1089                 m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
1090                 if (m_isThreaded)
1091                 {
1092                         // normal case for file: first locate, then start cache
1093                         if (!startCache())
1094                         {
1095                                 // Abnormal!! could not start cache, return to non-cache mode
1096                                 m_isThreaded = false;
1097                         }
1098                 }
1099                 return m_frameRGB;
1100         }
1101         return NULL;
1102 }
1103
1104
1105 // python methods
1106
1107
1108 // cast Image pointer to VideoFFmpeg
1109 inline VideoFFmpeg * getVideoFFmpeg (PyImage *self)
1110 { return static_cast<VideoFFmpeg*>(self->m_image); }
1111
1112
1113 // object initialization
1114 static int VideoFFmpeg_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
1115 {
1116         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1117         // parameters - video source
1118         // file name or format type for capture (only for Linux: video4linux or dv1394)
1119         char * file = NULL;
1120         // capture device number
1121         short capt = -1;
1122         // capture width, only if capt is >= 0
1123         short width = 0;
1124         // capture height, only if capt is >= 0
1125         short height = 0;
1126         // capture rate, only if capt is >= 0
1127         float rate = 25.f;
1128
1129         static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1130
1131         // get parameters
1132         if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
1133                 const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
1134                 return -1; 
1135
1136         try
1137         {
1138                 // create video object
1139                 Video_init<VideoFFmpeg>(self);
1140
1141                 // set thread usage
1142                 getVideoFFmpeg(self)->initParams(width, height, rate);
1143
1144                 // open video source
1145                 Video_open(getVideo(self), file, capt);
1146         }
1147         catch (Exception & exp)
1148         {
1149                 exp.report();
1150                 return -1;
1151         }
1152         // initialization succeded
1153         return 0;
1154 }
1155
1156 static PyObject *VideoFFmpeg_getPreseek(PyImage *self, void *closure)
1157 {
1158         return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1159 }
1160
1161 // set range
1162 static int VideoFFmpeg_setPreseek(PyImage *self, PyObject *value, void *closure)
1163 {
1164         // check validity of parameter
1165         if (value == NULL || !PyLong_Check(value))
1166         {
1167                 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1168                 return -1;
1169         }
1170         // set preseek
1171         getFFmpeg(self)->setPreseek(PyLong_AsLong(value));
1172         // success
1173         return 0;
1174 }
1175
1176 // get deinterlace
1177 static PyObject *VideoFFmpeg_getDeinterlace(PyImage *self, void *closure)
1178 {
1179         if (getFFmpeg(self)->getDeinterlace())
1180                 Py_RETURN_TRUE;
1181         else
1182                 Py_RETURN_FALSE;
1183 }
1184
1185 // set flip
1186 static int VideoFFmpeg_setDeinterlace(PyImage *self, PyObject *value, void *closure)
1187 {
1188         // check parameter, report failure
1189         if (value == NULL || !PyBool_Check(value))
1190         {
1191                 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1192                 return -1;
1193         }
1194         // set deinterlace
1195         getFFmpeg(self)->setDeinterlace(value == Py_True);
1196         // success
1197         return 0;
1198 }
1199
1200 // methods structure
1201 static PyMethodDef videoMethods[] =
1202 { // methods from VideoBase class
1203         {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1204         {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1205         {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1206         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1207         {NULL}
1208 };
1209 // attributes structure
1210 static PyGetSetDef videoGetSets[] =
1211 { // methods from VideoBase class
1212         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1213         {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1214         {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1215         {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1216         // attributes from ImageBase class
1217         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1218         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1219         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1220         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1221         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1222         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1223         {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1224         {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1225         {NULL}
1226 };
1227
1228 // python type declaration
1229 PyTypeObject VideoFFmpegType =
1230
1231         PyVarObject_HEAD_INIT(NULL, 0)
1232         "VideoTexture.VideoFFmpeg",   /*tp_name*/
1233         sizeof(PyImage),          /*tp_basicsize*/
1234         0,                         /*tp_itemsize*/
1235         (destructor)Image_dealloc, /*tp_dealloc*/
1236         0,                         /*tp_print*/
1237         0,                         /*tp_getattr*/
1238         0,                         /*tp_setattr*/
1239         0,                         /*tp_compare*/
1240         0,                         /*tp_repr*/
1241         0,                         /*tp_as_number*/
1242         0,                         /*tp_as_sequence*/
1243         0,                         /*tp_as_mapping*/
1244         0,                         /*tp_hash */
1245         0,                         /*tp_call*/
1246         0,                         /*tp_str*/
1247         0,                         /*tp_getattro*/
1248         0,                         /*tp_setattro*/
1249         &imageBufferProcs,         /*tp_as_buffer*/
1250         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1251         "FFmpeg video source",       /* tp_doc */
1252         0,                             /* tp_traverse */
1253         0,                             /* tp_clear */
1254         0,                             /* tp_richcompare */
1255         0,                             /* tp_weaklistoffset */
1256         0,                             /* tp_iter */
1257         0,                             /* tp_iternext */
1258         videoMethods,    /* tp_methods */
1259         0,                   /* tp_members */
1260         videoGetSets,          /* tp_getset */
1261         0,                         /* tp_base */
1262         0,                         /* tp_dict */
1263         0,                         /* tp_descr_get */
1264         0,                         /* tp_descr_set */
1265         0,                         /* tp_dictoffset */
1266         (initproc)VideoFFmpeg_init,     /* tp_init */
1267         0,                         /* tp_alloc */
1268         Image_allocNew,           /* tp_new */
1269 };
1270
1271 // object initialization
1272 static int ImageFFmpeg_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
1273 {
1274         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1275         // parameters - video source
1276         // file name or format type for capture (only for Linux: video4linux or dv1394)
1277         char * file = NULL;
1278
1279         // get parameters
1280         if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1281                 return -1; 
1282
1283         try
1284         {
1285                 // create video object
1286                 Video_init<VideoFFmpeg>(self);
1287
1288                 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1289
1290                 // open video source
1291                 Video_open(getVideo(self), file, -1);
1292         }
1293         catch (Exception & exp)
1294         {
1295                 exp.report();
1296                 return -1;
1297         }
1298         // initialization succeded
1299         return 0;
1300 }
1301
1302 static PyObject *Image_reload(PyImage *self, PyObject *args)
1303 {
1304         char * newname = NULL;
1305         if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1306                 return NULL;
1307         if (self->m_image != NULL)
1308         {
1309                 VideoFFmpeg* video = getFFmpeg(self);
1310                 // check type of object
1311                 if (!newname)
1312                         newname = video->getImageName();
1313                 if (!newname) {
1314                         // if not set, retport error
1315                         PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1316                         return NULL;
1317                 }
1318                 // make sure the previous file is cleared
1319                 video->release();
1320                 // open the new file
1321                 video->openFile(newname);
1322         }
1323         Py_RETURN_NONE;
1324 }
1325
1326 // methods structure
1327 static PyMethodDef imageMethods[] =
1328 { // methods from VideoBase class
1329         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1330         {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1331         {NULL}
1332 };
1333 // attributes structure
1334 static PyGetSetDef imageGetSets[] =
1335 { // methods from VideoBase class
1336         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1337         // attributes from ImageBase class
1338         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1339         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1340         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1341         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1342         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1343         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1344         {NULL}
1345 };
1346
1347 // python type declaration
1348 PyTypeObject ImageFFmpegType =
1349
1350         PyVarObject_HEAD_INIT(NULL, 0)
1351         "VideoTexture.ImageFFmpeg",   /*tp_name*/
1352         sizeof(PyImage),          /*tp_basicsize*/
1353         0,                         /*tp_itemsize*/
1354         (destructor)Image_dealloc, /*tp_dealloc*/
1355         0,                         /*tp_print*/
1356         0,                         /*tp_getattr*/
1357         0,                         /*tp_setattr*/
1358         0,                         /*tp_compare*/
1359         0,                         /*tp_repr*/
1360         0,                         /*tp_as_number*/
1361         0,                         /*tp_as_sequence*/
1362         0,                         /*tp_as_mapping*/
1363         0,                         /*tp_hash */
1364         0,                         /*tp_call*/
1365         0,                         /*tp_str*/
1366         0,                         /*tp_getattro*/
1367         0,                         /*tp_setattro*/
1368         &imageBufferProcs,         /*tp_as_buffer*/
1369         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1370         "FFmpeg image source",       /* tp_doc */
1371         0,                             /* tp_traverse */
1372         0,                             /* tp_clear */
1373         0,                             /* tp_richcompare */
1374         0,                             /* tp_weaklistoffset */
1375         0,                             /* tp_iter */
1376         0,                             /* tp_iternext */
1377         imageMethods,    /* tp_methods */
1378         0,                   /* tp_members */
1379         imageGetSets,          /* tp_getset */
1380         0,                         /* tp_base */
1381         0,                         /* tp_dict */
1382         0,                         /* tp_descr_get */
1383         0,                         /* tp_descr_set */
1384         0,                         /* tp_dictoffset */
1385         (initproc)ImageFFmpeg_init,     /* tp_init */
1386         0,                         /* tp_alloc */
1387         Image_allocNew,           /* tp_new */
1388 };
1389
1390 #endif  //WITH_FFMPEG
1391
1392