ffe06cff1005a6ebe873b25c10ac32a04158d70f
[blender-staging.git] / source / gameengine / VideoTexture / VideoFFmpeg.cpp
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software  Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Copyright (c) 2007 The Zdeno Ash Miklas
19  *
20  * This source file is part of VideoTexture library
21  *
22  * Contributor(s):
23  *
24  * ***** END GPL LICENSE BLOCK *****
25  */
26
27 /** \file gameengine/VideoTexture/VideoFFmpeg.cpp
28  *  \ingroup bgevideotex
29  */
30
31
32 #ifdef WITH_FFMPEG
33
34 // INT64_C fix for some linux machines (C99ism)
35 #ifndef __STDC_CONSTANT_MACROS
36 #define __STDC_CONSTANT_MACROS
37 #endif
38 #include <stdint.h>
39
40
41 #include "MEM_guardedalloc.h"
42 #include "PIL_time.h"
43
44 #include <string>
45
46 #include "VideoFFmpeg.h"
47 #include "Exception.h"
48
49
50 // default framerate
51 const double defFrameRate = 25.0;
52 // time scale constant
53 const long timeScale = 1000;
54
55 // macro for exception handling and logging
56 #define CATCH_EXCP catch (Exception & exp) \
57 { exp.report(); m_status = SourceError; }
58
59 // class RenderVideo
60
61 // constructor
62 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
63 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
64 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
65 m_deinterlace(false), m_preseek(0),     m_videoStream(-1), m_baseFrameRate(25.0),
66 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
67 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
68 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
69 {
70         // set video format
71         m_format = RGB24;
72         // force flip because ffmpeg always return the image in the wrong orientation for texture
73         setFlip(true);
74         // construction is OK
75         *hRslt = S_OK;
76         BLI_listbase_clear(&m_thread);
77         pthread_mutex_init(&m_cacheMutex, NULL);
78         BLI_listbase_clear(&m_frameCacheFree);
79         BLI_listbase_clear(&m_frameCacheBase);
80         BLI_listbase_clear(&m_packetCacheFree);
81         BLI_listbase_clear(&m_packetCacheBase);
82 }
83
84 // destructor
85 VideoFFmpeg::~VideoFFmpeg () 
86 {
87 }
88
89 void VideoFFmpeg::refresh(void)
90 {
91     // a fixed image will not refresh because it is loaded only once at creation
92     if (m_isImage)
93         return;
94     m_avail = false;
95 }
96
97 // release components
98 bool VideoFFmpeg::release()
99 {
100         // release
101         stopCache();
102         if (m_codecCtx)
103         {
104                 avcodec_close(m_codecCtx);
105                 m_codecCtx = NULL;
106         }
107         if (m_formatCtx)
108         {
109                 avformat_close_input(&m_formatCtx);
110                 m_formatCtx = NULL;
111         }
112         if (m_frame)
113         {
114                 av_free(m_frame);
115                 m_frame = NULL;
116         }
117         if (m_frameDeinterlaced)
118         {
119                 MEM_freeN(m_frameDeinterlaced->data[0]);
120                 av_free(m_frameDeinterlaced);
121                 m_frameDeinterlaced = NULL;
122         }
123         if (m_frameRGB)
124         {
125                 MEM_freeN(m_frameRGB->data[0]);
126                 av_free(m_frameRGB);
127                 m_frameRGB = NULL;
128         }
129         if (m_imgConvertCtx)
130         {
131                 sws_freeContext(m_imgConvertCtx);
132                 m_imgConvertCtx = NULL;
133         }
134         m_codec = NULL;
135         m_status = SourceStopped;
136         m_lastFrame = -1;
137         return true;
138 }
139
140 AVFrame *VideoFFmpeg::allocFrameRGB()
141 {
142         AVFrame *frame;
143         frame = avcodec_alloc_frame();
144         if (m_format == RGBA32)
145         {
146                 avpicture_fill((AVPicture*)frame, 
147                         (uint8_t*)MEM_callocN(avpicture_get_size(
148                                 PIX_FMT_RGBA,
149                                 m_codecCtx->width, m_codecCtx->height),
150                                 "ffmpeg rgba"),
151                         PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
152         } else 
153         {
154                 avpicture_fill((AVPicture*)frame, 
155                         (uint8_t*)MEM_callocN(avpicture_get_size(
156                                 PIX_FMT_RGB24,
157                                 m_codecCtx->width, m_codecCtx->height),
158                                 "ffmpeg rgb"),
159                         PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
160         }
161         return frame;
162 }
163
164 // set initial parameters
165 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
166 {
167         m_captWidth = width;
168         m_captHeight = height;
169         m_captRate = rate;
170         m_isImage = image;
171 }
172
173
174 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
175 {
176         AVFormatContext *formatCtx = NULL;
177         int                             i, videoStream;
178         AVCodec                 *codec;
179         AVCodecContext  *codecCtx;
180
181         if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
182                 return -1;
183
184         if (avformat_find_stream_info(formatCtx, NULL) < 0)
185         {
186                 avformat_close_input(&formatCtx);
187                 return -1;
188         }
189
190         /* Find the first video stream */
191         videoStream=-1;
192         for (i=0; i<formatCtx->nb_streams; i++)
193         {
194                 if (formatCtx->streams[i] &&
195                         get_codec_from_stream(formatCtx->streams[i]) && 
196                         (get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
197                 {
198                         videoStream=i;
199                         break;
200                 }
201         }
202
203         if (videoStream==-1) 
204         {
205                 avformat_close_input(&formatCtx);
206                 return -1;
207         }
208
209         codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
210
211         /* Find the decoder for the video stream */
212         codec=avcodec_find_decoder(codecCtx->codec_id);
213         if (codec==NULL) 
214         {
215                 avformat_close_input(&formatCtx);
216                 return -1;
217         }
218         codecCtx->workaround_bugs = 1;
219         if (avcodec_open2(codecCtx, codec, NULL) < 0)
220         {
221                 avformat_close_input(&formatCtx);
222                 return -1;
223         }
224
225 #ifdef FFMPEG_OLD_FRAME_RATE
226         if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
227                 codecCtx->frame_rate_base=1000;
228         m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
229 #else
230         m_baseFrameRate = av_q2d(av_get_r_frame_rate_compat(formatCtx->streams[videoStream]));
231 #endif
232         if (m_baseFrameRate <= 0.0) 
233                 m_baseFrameRate = defFrameRate;
234
235         m_codec = codec;
236         m_codecCtx = codecCtx;
237         m_formatCtx = formatCtx;
238         m_videoStream = videoStream;
239         m_frame = avcodec_alloc_frame();
240         m_frameDeinterlaced = avcodec_alloc_frame();
241
242         // allocate buffer if deinterlacing is required
243         avpicture_fill((AVPicture*)m_frameDeinterlaced, 
244                 (uint8_t*)MEM_callocN(avpicture_get_size(
245                 m_codecCtx->pix_fmt,
246                 m_codecCtx->width, m_codecCtx->height), 
247                 "ffmpeg deinterlace"), 
248                 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
249
250         // check if the pixel format supports Alpha
251         if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
252                 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
253                 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
254                 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
255         {
256                 // allocate buffer to store final decoded frame
257                 m_format = RGBA32;
258                 // allocate sws context
259                 m_imgConvertCtx = sws_getContext(
260                         m_codecCtx->width,
261                         m_codecCtx->height,
262                         m_codecCtx->pix_fmt,
263                         m_codecCtx->width,
264                         m_codecCtx->height,
265                         PIX_FMT_RGBA,
266                         SWS_FAST_BILINEAR,
267                         NULL, NULL, NULL);
268         } else
269         {
270                 // allocate buffer to store final decoded frame
271                 m_format = RGB24;
272                 // allocate sws context
273                 m_imgConvertCtx = sws_getContext(
274                         m_codecCtx->width,
275                         m_codecCtx->height,
276                         m_codecCtx->pix_fmt,
277                         m_codecCtx->width,
278                         m_codecCtx->height,
279                         PIX_FMT_RGB24,
280                         SWS_FAST_BILINEAR,
281                         NULL, NULL, NULL);
282         }
283         m_frameRGB = allocFrameRGB();
284
285         if (!m_imgConvertCtx) {
286                 avcodec_close(m_codecCtx);
287                 m_codecCtx = NULL;
288                 avformat_close_input(&m_formatCtx);
289                 m_formatCtx = NULL;
290                 av_free(m_frame);
291                 m_frame = NULL;
292                 MEM_freeN(m_frameDeinterlaced->data[0]);
293                 av_free(m_frameDeinterlaced);
294                 m_frameDeinterlaced = NULL;
295                 MEM_freeN(m_frameRGB->data[0]);
296                 av_free(m_frameRGB);
297                 m_frameRGB = NULL;
298                 return -1;
299         }
300         return 0;
301 }
302
303 /*
304  * This thread is used to load video frame asynchronously.
305  * It provides a frame caching service. 
306  * The main thread is responsible for positioning the frame pointer in the
307  * file correctly before calling startCache() which starts this thread.
308  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
309  * memory and CPU low 2) a cache of 5 decoded frames. 
310  * If the main thread does not find the frame in the cache (because the video has restarted
311  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
312  * function: it sends a signal to stop the cache thread and wait for confirmation), then
313  * change the position in the stream and restarts the cache thread.
314  */
315 void *VideoFFmpeg::cacheThread(void *data)
316 {
317         VideoFFmpeg* video = (VideoFFmpeg*)data;
318         // holds the frame that is being decoded
319         CacheFrame *currentFrame = NULL;
320         CachePacket *cachePacket;
321         bool endOfFile = false;
322         int frameFinished = 0;
323         double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
324         int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
325
326         if (startTs == AV_NOPTS_VALUE)
327                 startTs = 0;
328
329         while (!video->m_stopThread)
330         {
331                 // packet cache is used solely by this thread, no need to lock
332                 // In case the stream/file contains other stream than the one we are looking for,
333                 // allow a bit of cycling to get rid quickly of those frames
334                 frameFinished = 0;
335                 while (    !endOfFile 
336                                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
337                                 && frameFinished < 25)
338                 {
339                         // free packet => packet cache is not full yet, just read more
340                         if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
341                         {
342                                 if (cachePacket->packet.stream_index == video->m_videoStream)
343                                 {
344                                         // make sure fresh memory is allocated for the packet and move it to queue
345                                         av_dup_packet(&cachePacket->packet);
346                                         BLI_remlink(&video->m_packetCacheFree, cachePacket);
347                                         BLI_addtail(&video->m_packetCacheBase, cachePacket);
348                                         break;
349                                 } else {
350                                         // this is not a good packet for us, just leave it on free queue
351                                         // Note: here we could handle sound packet
352                                         av_free_packet(&cachePacket->packet);
353                                         frameFinished++;
354                                 }
355                                 
356                         } else {
357                                 if (video->m_isFile)
358                                         // this mark the end of the file
359                                         endOfFile = true;
360                                 // if we cannot read a packet, no need to continue
361                                 break;
362                         }
363                 }
364                 // frame cache is also used by main thread, lock
365                 if (currentFrame == NULL) 
366                 {
367                         // no current frame being decoded, take free one
368                         pthread_mutex_lock(&video->m_cacheMutex);
369                         if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
370                                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
371                         pthread_mutex_unlock(&video->m_cacheMutex);
372                 }
373                 if (currentFrame != NULL)
374                 {
375                         // this frame is out of free and busy queue, we can manipulate it without locking
376                         frameFinished = 0;
377                         while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
378                         {
379                                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
380                                 // use m_frame because when caching, it is not used in main thread
381                                 // we can't use currentFrame directly because we need to convert to RGB first
382                                 avcodec_decode_video2(video->m_codecCtx, 
383                                         video->m_frame, &frameFinished, 
384                                         &cachePacket->packet);
385                                 if (frameFinished) 
386                                 {
387                                         AVFrame * input = video->m_frame;
388
389                                         /* This means the data wasnt read properly, this check stops crashing */
390                                         if (   input->data[0]!=0 || input->data[1]!=0 
391                                                 || input->data[2]!=0 || input->data[3]!=0)
392                                         {
393                                                 if (video->m_deinterlace) 
394                                                 {
395                                                         if (avpicture_deinterlace(
396                                                                 (AVPicture*) video->m_frameDeinterlaced,
397                                                                 (const AVPicture*) video->m_frame,
398                                                                 video->m_codecCtx->pix_fmt,
399                                                                 video->m_codecCtx->width,
400                                                                 video->m_codecCtx->height) >= 0)
401                                                         {
402                                                                 input = video->m_frameDeinterlaced;
403                                                         }
404                                                 }
405                                                 // convert to RGB24
406                                                 sws_scale(video->m_imgConvertCtx,
407                                                         input->data,
408                                                         input->linesize,
409                                                         0,
410                                                         video->m_codecCtx->height,
411                                                         currentFrame->frame->data,
412                                                         currentFrame->frame->linesize);
413                                                 // move frame to queue, this frame is necessarily the next one
414                                                 video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
415                                                 currentFrame->framePosition = video->m_curPosition;
416                                                 pthread_mutex_lock(&video->m_cacheMutex);
417                                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
418                                                 pthread_mutex_unlock(&video->m_cacheMutex);
419                                                 currentFrame = NULL;
420                                         }
421                                 }
422                                 av_free_packet(&cachePacket->packet);
423                                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
424                         } 
425                         if (currentFrame && endOfFile) 
426                         {
427                                 // no more packet and end of file => put a special frame that indicates that
428                                 currentFrame->framePosition = -1;
429                                 pthread_mutex_lock(&video->m_cacheMutex);
430                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
431                                 pthread_mutex_unlock(&video->m_cacheMutex);
432                                 currentFrame = NULL;
433                                 // no need to stay any longer in this thread
434                                 break;
435                         }
436                 }
437                 // small sleep to avoid unnecessary looping
438                 PIL_sleep_ms(10);
439         }
440         // before quitting, put back the current frame to queue to allow freeing
441         if (currentFrame)
442         {
443                 pthread_mutex_lock(&video->m_cacheMutex);
444                 BLI_addtail(&video->m_frameCacheFree, currentFrame);
445                 pthread_mutex_unlock(&video->m_cacheMutex);
446         }
447         return 0;
448 }
449
450 // start thread to cache video frame from file/capture/stream
451 // this function should be called only when the position in the stream is set for the
452 // first frame to cache
453 bool VideoFFmpeg::startCache()
454 {
455         if (!m_cacheStarted && m_isThreaded)
456         {
457                 m_stopThread = false;
458                 for (int i=0; i<CACHE_FRAME_SIZE; i++)
459                 {
460                         CacheFrame *frame = new CacheFrame();
461                         frame->frame = allocFrameRGB();
462                         BLI_addtail(&m_frameCacheFree, frame);
463                 }
464                 for (int i=0; i<CACHE_PACKET_SIZE; i++) 
465                 {
466                         CachePacket *packet = new CachePacket();
467                         BLI_addtail(&m_packetCacheFree, packet);
468                 }
469                 BLI_init_threads(&m_thread, cacheThread, 1);
470                 BLI_insert_thread(&m_thread, this);
471                 m_cacheStarted = true;
472         }
473         return m_cacheStarted;
474 }
475
476 void VideoFFmpeg::stopCache()
477 {
478         if (m_cacheStarted)
479         {
480                 m_stopThread = true;
481                 BLI_end_threads(&m_thread);
482                 // now delete the cache
483                 CacheFrame *frame;
484                 CachePacket *packet;
485                 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
486                 {
487                         BLI_remlink(&m_frameCacheBase, frame);
488                         MEM_freeN(frame->frame->data[0]);
489                         av_free(frame->frame);
490                         delete frame;
491                 }
492                 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
493                 {
494                         BLI_remlink(&m_frameCacheFree, frame);
495                         MEM_freeN(frame->frame->data[0]);
496                         av_free(frame->frame);
497                         delete frame;
498                 }
499                 while ((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
500                 {
501                         BLI_remlink(&m_packetCacheBase, packet);
502                         av_free_packet(&packet->packet);
503                         delete packet;
504                 }
505                 while ((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
506                 {
507                         BLI_remlink(&m_packetCacheFree, packet);
508                         delete packet;
509                 }
510                 m_cacheStarted = false;
511         }
512 }
513
514 void VideoFFmpeg::releaseFrame(AVFrame *frame)
515 {
516         if (frame == m_frameRGB)
517         {
518                 // this is not a frame from the cache, ignore
519                 return;
520         }
521         // this frame MUST be the first one of the queue
522         pthread_mutex_lock(&m_cacheMutex);
523         CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
524         assert (cacheFrame != NULL && cacheFrame->frame == frame);
525         BLI_remlink(&m_frameCacheBase, cacheFrame);
526         BLI_addtail(&m_frameCacheFree, cacheFrame);
527         pthread_mutex_unlock(&m_cacheMutex);
528 }
529
530 // open video file
531 void VideoFFmpeg::openFile (char *filename)
532 {
533         if (openStream(filename, NULL, NULL) != 0)
534                 return;
535
536         if (m_codecCtx->gop_size)
537                 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
538         else if (m_codecCtx->has_b_frames)
539                 m_preseek = 25; // should determine gopsize
540         else
541                 m_preseek = 0;
542
543         // get video time range
544         m_range[0] = 0.0;
545         m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
546
547         // open base class
548         VideoBase::openFile(filename);
549
550         if (
551                 // ffmpeg reports that http source are actually non stream
552                 // but it is really not desirable to seek on http file, so force streaming.
553                 // It would be good to find this information from the context but there are no simple indication
554                 !strncmp(filename, "http://", 7) ||
555                 !strncmp(filename, "rtsp://", 7) ||
556                 (m_formatCtx->pb && !m_formatCtx->pb->seekable)
557                 )
558         {
559                 // the file is in fact a streaming source, treat as cam to prevent seeking
560                 m_isFile = false;
561                 // but it's not handled exactly like a camera.
562                 m_isStreaming = true;
563                 // for streaming it is important to do non blocking read
564                 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
565         }
566
567         if (m_isImage) 
568         {
569                 // the file is to be treated as an image, i.e. load the first frame only
570                 m_isFile = false;
571                 // in case of reload, the filename is taken from m_imageName, no need to change it
572                 if (m_imageName.Ptr() != filename)
573                         m_imageName = filename;
574                 m_preseek = 0;
575                 m_avail = false;
576                 play();
577         }
578         // check if we should do multi-threading?
579         if (!m_isImage && BLI_system_thread_count() > 1)
580         {
581                 // never thread image: there are no frame to read ahead
582                 // no need to thread if the system has a single core
583                 m_isThreaded =  true;
584         }
585 }
586
587
588 // open video capture device
589 void VideoFFmpeg::openCam (char *file, short camIdx)
590 {
591         // open camera source
592         AVInputFormat           *inputFormat;
593         AVDictionary            *formatParams = NULL;
594         char                            filename[28], rateStr[20];
595
596 #ifdef WIN32
597         // video capture on windows only through Video For Windows driver
598         inputFormat = av_find_input_format("vfwcap");
599         if (!inputFormat)
600                 // Video For Windows not supported??
601                 return;
602         sprintf(filename, "%d", camIdx);
603 #else
604         // In Linux we support two types of devices: VideoForLinux and DV1394. 
605         // the user specify it with the filename:
606         // [<device_type>][:<standard>]
607         // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
608         // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
609         // The driver name is constructed automatically from the device type:
610         // v4l   : /dev/video<camIdx>
611         // dv1394: /dev/dv1394/<camIdx>
612         // If you have different driver name, you can specify the driver name explicitly
613         // instead of device type. Examples of valid filename:
614         //    /dev/v4l/video0:pal
615         //    /dev/ieee1394/1:ntsc
616         //    dv1394:secam
617         //    v4l:pal
618         char *p;
619
620         if (file && strstr(file, "1394") != NULL) 
621         {
622                 // the user specifies a driver, check if it is v4l or d41394
623                 inputFormat = av_find_input_format("dv1394");
624                 sprintf(filename, "/dev/dv1394/%d", camIdx);
625         } else 
626         {
627                 const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"};
628                 int i, formatsCount = sizeof(formats) / sizeof(char*);
629                 for (i = 0; i < formatsCount; i++) {
630                         inputFormat = av_find_input_format(formats[i]);
631                         if (inputFormat)
632                                 break;
633                 }
634                 sprintf(filename, "/dev/video%d", camIdx);
635         }
636         if (!inputFormat)
637                 // these format should be supported, check ffmpeg compilation
638                 return;
639         if (file && strncmp(file, "/dev", 4) == 0) 
640         {
641                 // user does not specify a driver
642                 strncpy(filename, file, sizeof(filename));
643                 filename[sizeof(filename)-1] = 0;
644                 if ((p = strchr(filename, ':')) != 0)
645                         *p = 0;
646         }
647         if (file && (p = strchr(file, ':')) != NULL) {
648                 av_dict_set(&formatParams, "standard", p+1, 0);
649         }
650 #endif
651         //frame rate
652         if (m_captRate <= 0.f)
653                 m_captRate = defFrameRate;
654         sprintf(rateStr, "%f", m_captRate);
655
656         av_dict_set(&formatParams, "framerate", rateStr, 0);
657
658         if (m_captWidth > 0 && m_captHeight > 0) {
659                 char video_size[64];
660                 BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight);
661                 av_dict_set(&formatParams, "video_size", video_size, 0);
662         }
663
664         if (openStream(filename, inputFormat, &formatParams) != 0)
665                 return;
666
667         // for video capture it is important to do non blocking read
668         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
669         // open base class
670         VideoBase::openCam(file, camIdx);
671         // check if we should do multi-threading?
672         if (BLI_system_thread_count() > 1)
673         {
674                 // no need to thread if the system has a single core
675                 m_isThreaded =  true;
676         }
677
678         av_dict_free(&formatParams);
679 }
680
681 // play video
682 bool VideoFFmpeg::play (void)
683 {
684         try
685         {
686                 // if object is able to play
687                 if (VideoBase::play())
688                 {
689                         // set video position
690                         setPositions();
691
692                         if (m_isStreaming)
693                         {
694                                 av_read_play(m_formatCtx);
695                         }
696
697                         // return success
698                         return true;
699                 }
700         }
701         CATCH_EXCP;
702         return false;
703 }
704
705
706 // pause video
707 bool VideoFFmpeg::pause (void)
708 {
709         try
710         {
711                 if (VideoBase::pause())
712                 {
713                         if (m_isStreaming)
714                         {
715                                 av_read_pause(m_formatCtx);
716                         }
717                         return true;
718                 }
719         }
720         CATCH_EXCP;
721         return false;
722 }
723
724 // stop video
725 bool VideoFFmpeg::stop (void)
726 {
727         try
728         {
729                 VideoBase::stop();
730                 // force restart when play
731                 m_lastFrame = -1;
732                 return true;
733         }
734         CATCH_EXCP;
735         return false;
736 }
737
738
739 // set video range
740 void VideoFFmpeg::setRange (double start, double stop)
741 {
742         try
743         {
744                 // set range
745                 if (m_isFile)
746                 {
747                         VideoBase::setRange(start, stop);
748                         // set range for video
749                         setPositions();
750                 }
751         }
752         CATCH_EXCP;
753 }
754
755 // set framerate
756 void VideoFFmpeg::setFrameRate (float rate)
757 {
758         VideoBase::setFrameRate(rate);
759 }
760
761
762 // image calculation
763 // load frame from video
764 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
765 {
766         if (m_status == SourcePlaying)
767         {
768                 // get actual time
769                 double startTime = PIL_check_seconds_timer();
770                 double actTime;
771                 // timestamp passed from audio actuators can sometimes be slightly negative
772                 if (m_isFile && ts >= -0.5)
773                 {
774                         // allow setting timestamp only when not streaming
775                         actTime = ts;
776                         if (actTime * actFrameRate() < m_lastFrame) 
777                         {
778                                 // user is asking to rewind, force a cache clear to make sure we will do a seek
779                                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
780                                 stopCache();
781                         }
782                 }
783                 else
784                 {
785                         if (m_lastFrame == -1 && !m_isFile)
786                                 m_startTime = startTime;
787                         actTime = startTime - m_startTime;
788                 }
789                 // if video has ended
790                 if (m_isFile && actTime * m_frameRate >= m_range[1])
791                 {
792                         // in any case, this resets the cache
793                         stopCache();
794                         // if repeats are set, decrease them
795                         if (m_repeat > 0) 
796                                 --m_repeat;
797                         // if video has to be replayed
798                         if (m_repeat != 0)
799                         {
800                                 // reset its position
801                                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
802                                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
803                         }
804                         // if video has to be stopped, stop it
805                         else 
806                         {
807                                 m_status = SourceStopped;
808                                 return;
809                         }
810                 }
811                 // actual frame
812                 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
813                 // if actual frame differs from last frame
814                 if (actFrame != m_lastFrame)
815                 {
816                         AVFrame* frame;
817                         // get image
818                         if ((frame = grabFrame(actFrame)) != NULL)
819                         {
820                                 if (!m_isFile && !m_cacheStarted) 
821                                 {
822                                         // streaming without cache: detect synchronization problem
823                                         double execTime = PIL_check_seconds_timer() - startTime;
824                                         if (execTime > 0.005) 
825                                         {
826                                                 // exec time is too long, it means that the function was blocking
827                                                 // resynchronize the stream from this time
828                                                 m_startTime += execTime;
829                                         }
830                                 }
831                                 // save actual frame
832                                 m_lastFrame = actFrame;
833                                 // init image, if needed
834                                 init(short(m_codecCtx->width), short(m_codecCtx->height));
835                                 // process image
836                                 process((BYTE*)(frame->data[0]));
837                                 // finished with the frame, release it so that cache can reuse it
838                                 releaseFrame(frame);
839                                 // in case it is an image, automatically stop reading it
840                                 if (m_isImage)
841                                 {
842                                         m_status = SourceStopped;
843                                         // close the file as we don't need it anymore
844                                         release();
845                                 }
846                         } else if (m_isStreaming)
847                         {
848                                 // we didn't get a frame and we are streaming, this may be due to
849                                 // a delay in the network or because we are getting the frame too fast.
850                                 // In the later case, shift time by a small amount to compensate for a drift
851                                 m_startTime += 0.001;
852                         }
853                 }
854         }
855 }
856
857
858 // set actual position
859 void VideoFFmpeg::setPositions (void)
860 {
861         // set video start time
862         m_startTime = PIL_check_seconds_timer();
863         // if file is played and actual position is before end position
864         if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
865                 // continue from actual position
866                 m_startTime -= double(m_lastFrame) / actFrameRate();
867         else {
868                 m_startTime -= m_range[0];
869                 // start from beginning, stop cache just in case
870                 stopCache();
871         }
872 }
873
874 // position pointer in file, position in second
875 AVFrame *VideoFFmpeg::grabFrame(long position)
876 {
877         AVPacket packet;
878         int frameFinished;
879         int posFound = 1;
880         bool frameLoaded = false;
881         int64_t targetTs = 0;
882         CacheFrame *frame;
883         int64_t dts = 0;
884
885         if (m_cacheStarted)
886         {
887                 // when cache is active, we must not read the file directly
888                 do {
889                         pthread_mutex_lock(&m_cacheMutex);
890                         frame = (CacheFrame *)m_frameCacheBase.first;
891                         pthread_mutex_unlock(&m_cacheMutex);
892                         // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
893                         if (frame == NULL)
894                         {
895                                 // no frame in cache, in case of file it is an abnormal situation
896                                 if (m_isFile)
897                                 {
898                                         // go back to no threaded reading
899                                         stopCache();
900                                         break;
901                                 }
902                                 return NULL;
903                         }
904                         if (frame->framePosition == -1) 
905                         {
906                                 // this frame mark the end of the file (only used for file)
907                                 // leave in cache to make sure we don't miss it
908                                 m_eof = true;
909                                 return NULL;
910                         }
911                         // for streaming, always return the next frame, 
912                         // that's what grabFrame does in non cache mode anyway.
913                         if (m_isStreaming || frame->framePosition == position)
914                         {
915                                 return frame->frame;
916                         }
917                         // for cam, skip old frames to keep image realtime.
918                         // There should be no risk of clock drift since it all happens on the same CPU
919                         if (frame->framePosition > position) 
920                         {
921                                 // this can happen after rewind if the seek didn't find the first frame
922                                 // the frame in the buffer is ahead of time, just leave it there
923                                 return NULL;
924                         }
925                         // this frame is not useful, release it
926                         pthread_mutex_lock(&m_cacheMutex);
927                         BLI_remlink(&m_frameCacheBase, frame);
928                         BLI_addtail(&m_frameCacheFree, frame);
929                         pthread_mutex_unlock(&m_cacheMutex);
930                 } while (true);
931         }
932         double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
933         int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
934         if (startTs == AV_NOPTS_VALUE)
935                 startTs = 0;
936
937         // come here when there is no cache or cache has been stopped
938         // locate the frame, by seeking if necessary (seeking is only possible for files)
939         if (m_isFile)
940         {
941                 // first check if the position that we are looking for is in the preseek range
942                 // if so, just read the frame until we get there
943                 if (position > m_curPosition + 1 
944                         && m_preseek 
945                         && position - (m_curPosition + 1) < m_preseek) 
946                 {
947                         while (av_read_frame(m_formatCtx, &packet)>=0)
948                         {
949                                 if (packet.stream_index == m_videoStream) 
950                                 {
951                                         avcodec_decode_video2(
952                                                 m_codecCtx, 
953                                                 m_frame, &frameFinished, 
954                                                 &packet);
955                                         if (frameFinished)
956                                         {
957                                                 m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
958                                         }
959                                 }
960                                 av_free_packet(&packet);
961                                 if (position == m_curPosition+1)
962                                         break;
963                         }
964                 }
965                 // if the position is not in preseek, do a direct jump
966                 if (position != m_curPosition + 1) 
967                 { 
968                         int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
969
970                         if (pos < 0)
971                                 pos = 0;
972
973                         pos += startTs;
974
975                         if (position <= m_curPosition || !m_eof)
976                         {
977 #if 0
978                                 // Tried to make this work but couldn't: seeking on byte is ignored by the
979                                 // format plugin and it will generally continue to read from last timestamp.
980                                 // Too bad because frame seek is not always able to get the first frame
981                                 // of the file.
982                                 if (position <= m_preseek)
983                                 {
984                                         // we can safely go the beginning of the file
985                                         if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
986                                         {
987                                                 // binary seek does not reset the timestamp, must do it now
988                                                 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
989                                                 m_curPosition = 0;
990                                         }
991                                 }
992                                 else
993 #endif
994                                 {
995                                         // current position is now lost, guess a value. 
996                                         if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
997                                         {
998                                                 // current position is now lost, guess a value. 
999                                                 // It's not important because it will be set at this end of this function
1000                                                 m_curPosition = position - m_preseek - 1;
1001                                         }
1002                                 }
1003                         }
1004                         // this is the timestamp of the frame we're looking for
1005                         targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
1006
1007                         posFound = 0;
1008                         avcodec_flush_buffers(m_codecCtx);
1009                 }
1010         } else if (m_isThreaded)
1011         {
1012                 // cache is not started but threading is possible
1013                 // better not read the stream => make take some time, better start caching
1014                 if (startCache())
1015                         return NULL;
1016                 // Abnormal!!! could not start cache, fall back on direct read
1017                 m_isThreaded = false;
1018         }
1019
1020         // find the correct frame, in case of streaming and no cache, it means just
1021         // return the next frame. This is not quite correct, may need more work
1022         while (av_read_frame(m_formatCtx, &packet) >= 0)
1023         {
1024                 if (packet.stream_index == m_videoStream) 
1025                 {
1026                         AVFrame *input = m_frame;
1027                         short counter = 0;
1028
1029                         /* While the data is not read properly (png, tiffs, etc formats may need several pass)*/
1030                         while ((input->data[0] == 0 && input->data[1] == 0 && input->data[2] == 0 && input->data[3] == 0) && counter < 10) {
1031                                 avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &packet);
1032                                 counter++;
1033                         }
1034
1035                         // remember dts to compute exact frame number
1036                         dts = packet.dts;
1037                         if (frameFinished && !posFound) 
1038                         {
1039                                 if (dts >= targetTs)
1040                                 {
1041                                         posFound = 1;
1042                                 }
1043                         } 
1044
1045                         if (frameFinished && posFound == 1) 
1046                         {
1047                                 AVFrame * input = m_frame;
1048
1049                                 /* This means the data wasnt read properly, 
1050                                  * this check stops crashing */
1051                                 if (   input->data[0]==0 && input->data[1]==0 
1052                                         && input->data[2]==0 && input->data[3]==0)
1053                                 {
1054                                         av_free_packet(&packet);
1055                                         break;
1056                                 }
1057
1058                                 if (m_deinterlace) 
1059                                 {
1060                                         if (avpicture_deinterlace(
1061                                                 (AVPicture*) m_frameDeinterlaced,
1062                                                 (const AVPicture*) m_frame,
1063                                                 m_codecCtx->pix_fmt,
1064                                                 m_codecCtx->width,
1065                                                 m_codecCtx->height) >= 0)
1066                                         {
1067                                                 input = m_frameDeinterlaced;
1068                                         }
1069                                 }
1070                                 // convert to RGB24
1071                                 sws_scale(m_imgConvertCtx,
1072                                         input->data,
1073                                         input->linesize,
1074                                         0,
1075                                         m_codecCtx->height,
1076                                         m_frameRGB->data,
1077                                         m_frameRGB->linesize);
1078                                 av_free_packet(&packet);
1079                                 frameLoaded = true;
1080                                 break;
1081                         }
1082                 }
1083                 av_free_packet(&packet);
1084         }
1085         m_eof = m_isFile && !frameLoaded;
1086         if (frameLoaded)
1087         {
1088                 m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
1089                 if (m_isThreaded)
1090                 {
1091                         // normal case for file: first locate, then start cache
1092                         if (!startCache())
1093                         {
1094                                 // Abnormal!! could not start cache, return to non-cache mode
1095                                 m_isThreaded = false;
1096                         }
1097                 }
1098                 return m_frameRGB;
1099         }
1100         return NULL;
1101 }
1102
1103
1104 // python methods
1105
1106
1107 // cast Image pointer to VideoFFmpeg
1108 inline VideoFFmpeg * getVideoFFmpeg (PyImage *self)
1109 { return static_cast<VideoFFmpeg*>(self->m_image); }
1110
1111
1112 // object initialization
1113 static int VideoFFmpeg_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
1114 {
1115         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1116         // parameters - video source
1117         // file name or format type for capture (only for Linux: video4linux or dv1394)
1118         char * file = NULL;
1119         // capture device number
1120         short capt = -1;
1121         // capture width, only if capt is >= 0
1122         short width = 0;
1123         // capture height, only if capt is >= 0
1124         short height = 0;
1125         // capture rate, only if capt is >= 0
1126         float rate = 25.f;
1127
1128         static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1129
1130         // get parameters
1131         if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
1132                 const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
1133                 return -1; 
1134
1135         try
1136         {
1137                 // create video object
1138                 Video_init<VideoFFmpeg>(self);
1139
1140                 // set thread usage
1141                 getVideoFFmpeg(self)->initParams(width, height, rate);
1142
1143                 // open video source
1144                 Video_open(getVideo(self), file, capt);
1145         }
1146         catch (Exception & exp)
1147         {
1148                 exp.report();
1149                 return -1;
1150         }
1151         // initialization succeded
1152         return 0;
1153 }
1154
1155 static PyObject *VideoFFmpeg_getPreseek(PyImage *self, void *closure)
1156 {
1157         return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1158 }
1159
1160 // set range
1161 static int VideoFFmpeg_setPreseek(PyImage *self, PyObject *value, void *closure)
1162 {
1163         // check validity of parameter
1164         if (value == NULL || !PyLong_Check(value))
1165         {
1166                 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1167                 return -1;
1168         }
1169         // set preseek
1170         getFFmpeg(self)->setPreseek(PyLong_AsLong(value));
1171         // success
1172         return 0;
1173 }
1174
1175 // get deinterlace
1176 static PyObject *VideoFFmpeg_getDeinterlace(PyImage *self, void *closure)
1177 {
1178         if (getFFmpeg(self)->getDeinterlace())
1179                 Py_RETURN_TRUE;
1180         else
1181                 Py_RETURN_FALSE;
1182 }
1183
1184 // set flip
1185 static int VideoFFmpeg_setDeinterlace(PyImage *self, PyObject *value, void *closure)
1186 {
1187         // check parameter, report failure
1188         if (value == NULL || !PyBool_Check(value))
1189         {
1190                 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1191                 return -1;
1192         }
1193         // set deinterlace
1194         getFFmpeg(self)->setDeinterlace(value == Py_True);
1195         // success
1196         return 0;
1197 }
1198
1199 // methods structure
1200 static PyMethodDef videoMethods[] =
1201 { // methods from VideoBase class
1202         {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1203         {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1204         {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1205         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1206         {NULL}
1207 };
1208 // attributes structure
1209 static PyGetSetDef videoGetSets[] =
1210 { // methods from VideoBase class
1211         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1212         {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1213         {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1214         {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1215         // attributes from ImageBase class
1216         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1217         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1218         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1219         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1220         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1221         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1222         {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1223         {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1224         {NULL}
1225 };
1226
1227 // python type declaration
1228 PyTypeObject VideoFFmpegType =
1229
1230         PyVarObject_HEAD_INIT(NULL, 0)
1231         "VideoTexture.VideoFFmpeg",   /*tp_name*/
1232         sizeof(PyImage),          /*tp_basicsize*/
1233         0,                         /*tp_itemsize*/
1234         (destructor)Image_dealloc, /*tp_dealloc*/
1235         0,                         /*tp_print*/
1236         0,                         /*tp_getattr*/
1237         0,                         /*tp_setattr*/
1238         0,                         /*tp_compare*/
1239         0,                         /*tp_repr*/
1240         0,                         /*tp_as_number*/
1241         0,                         /*tp_as_sequence*/
1242         0,                         /*tp_as_mapping*/
1243         0,                         /*tp_hash */
1244         0,                         /*tp_call*/
1245         0,                         /*tp_str*/
1246         0,                         /*tp_getattro*/
1247         0,                         /*tp_setattro*/
1248         &imageBufferProcs,         /*tp_as_buffer*/
1249         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1250         "FFmpeg video source",       /* tp_doc */
1251         0,                             /* tp_traverse */
1252         0,                             /* tp_clear */
1253         0,                             /* tp_richcompare */
1254         0,                             /* tp_weaklistoffset */
1255         0,                             /* tp_iter */
1256         0,                             /* tp_iternext */
1257         videoMethods,    /* tp_methods */
1258         0,                   /* tp_members */
1259         videoGetSets,          /* tp_getset */
1260         0,                         /* tp_base */
1261         0,                         /* tp_dict */
1262         0,                         /* tp_descr_get */
1263         0,                         /* tp_descr_set */
1264         0,                         /* tp_dictoffset */
1265         (initproc)VideoFFmpeg_init,     /* tp_init */
1266         0,                         /* tp_alloc */
1267         Image_allocNew,           /* tp_new */
1268 };
1269
1270 // object initialization
1271 static int ImageFFmpeg_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
1272 {
1273         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1274         // parameters - video source
1275         // file name or format type for capture (only for Linux: video4linux or dv1394)
1276         char * file = NULL;
1277
1278         // get parameters
1279         if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1280                 return -1; 
1281
1282         try
1283         {
1284                 // create video object
1285                 Video_init<VideoFFmpeg>(self);
1286
1287                 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1288
1289                 // open video source
1290                 Video_open(getVideo(self), file, -1);
1291         }
1292         catch (Exception & exp)
1293         {
1294                 exp.report();
1295                 return -1;
1296         }
1297         // initialization succeded
1298         return 0;
1299 }
1300
1301 static PyObject *Image_reload(PyImage *self, PyObject *args)
1302 {
1303         char * newname = NULL;
1304         if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1305                 return NULL;
1306         if (self->m_image != NULL)
1307         {
1308                 VideoFFmpeg* video = getFFmpeg(self);
1309                 // check type of object
1310                 if (!newname)
1311                         newname = video->getImageName();
1312                 if (!newname) {
1313                         // if not set, retport error
1314                         PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1315                         return NULL;
1316                 }
1317                 // make sure the previous file is cleared
1318                 video->release();
1319                 // open the new file
1320                 video->openFile(newname);
1321         }
1322         Py_RETURN_NONE;
1323 }
1324
1325 // methods structure
1326 static PyMethodDef imageMethods[] =
1327 { // methods from VideoBase class
1328         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1329         {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1330         {NULL}
1331 };
1332 // attributes structure
1333 static PyGetSetDef imageGetSets[] =
1334 { // methods from VideoBase class
1335         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1336         // attributes from ImageBase class
1337         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1338         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1339         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1340         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1341         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1342         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1343         {NULL}
1344 };
1345
1346 // python type declaration
1347 PyTypeObject ImageFFmpegType =
1348
1349         PyVarObject_HEAD_INIT(NULL, 0)
1350         "VideoTexture.ImageFFmpeg",   /*tp_name*/
1351         sizeof(PyImage),          /*tp_basicsize*/
1352         0,                         /*tp_itemsize*/
1353         (destructor)Image_dealloc, /*tp_dealloc*/
1354         0,                         /*tp_print*/
1355         0,                         /*tp_getattr*/
1356         0,                         /*tp_setattr*/
1357         0,                         /*tp_compare*/
1358         0,                         /*tp_repr*/
1359         0,                         /*tp_as_number*/
1360         0,                         /*tp_as_sequence*/
1361         0,                         /*tp_as_mapping*/
1362         0,                         /*tp_hash */
1363         0,                         /*tp_call*/
1364         0,                         /*tp_str*/
1365         0,                         /*tp_getattro*/
1366         0,                         /*tp_setattro*/
1367         &imageBufferProcs,         /*tp_as_buffer*/
1368         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1369         "FFmpeg image source",       /* tp_doc */
1370         0,                             /* tp_traverse */
1371         0,                             /* tp_clear */
1372         0,                             /* tp_richcompare */
1373         0,                             /* tp_weaklistoffset */
1374         0,                             /* tp_iter */
1375         0,                             /* tp_iternext */
1376         imageMethods,    /* tp_methods */
1377         0,                   /* tp_members */
1378         imageGetSets,          /* tp_getset */
1379         0,                         /* tp_base */
1380         0,                         /* tp_dict */
1381         0,                         /* tp_descr_get */
1382         0,                         /* tp_descr_set */
1383         0,                         /* tp_dictoffset */
1384         (initproc)ImageFFmpeg_init,     /* tp_init */
1385         0,                         /* tp_alloc */
1386         Image_allocNew,           /* tp_new */
1387 };
1388
1389 #endif  //WITH_FFMPEG
1390
1391