3bf11fbdfd6847ecd2606e20ef5a75150b0ba4eb
[blender.git] / source / gameengine / VideoTexture / VideoFFmpeg.cpp
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software  Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Copyright (c) 2007 The Zdeno Ash Miklas
19  *
20  * This source file is part of VideoTexture library
21  *
22  * Contributor(s):
23  *
24  * ***** END GPL LICENSE BLOCK *****
25  */
26
27 /** \file gameengine/VideoTexture/VideoFFmpeg.cpp
28  *  \ingroup bgevideotex
29  */
30
31
32 #ifdef WITH_FFMPEG
33
34 // INT64_C fix for some linux machines (C99ism)
35 #ifndef __STDC_CONSTANT_MACROS
36 #define __STDC_CONSTANT_MACROS
37 #endif
38 #include <stdint.h>
39
40
41 #include "MEM_guardedalloc.h"
42 #include "PIL_time.h"
43
44 #include <string>
45
46 #include "VideoFFmpeg.h"
47 #include "Exception.h"
48
49
50 // default framerate
51 const double defFrameRate = 25.0;
52
53 // macro for exception handling and logging
54 #define CATCH_EXCP catch (Exception & exp) \
55 { exp.report(); m_status = SourceError; }
56
57 // class RenderVideo
58
59 // constructor
60 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
61 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
62 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
63 m_deinterlace(false), m_preseek(0),     m_videoStream(-1), m_baseFrameRate(25.0),
64 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
65 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
66 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
67 {
68         // set video format
69         m_format = RGB24;
70         // force flip because ffmpeg always return the image in the wrong orientation for texture
71         setFlip(true);
72         // construction is OK
73         *hRslt = S_OK;
74         BLI_listbase_clear(&m_thread);
75         pthread_mutex_init(&m_cacheMutex, NULL);
76         BLI_listbase_clear(&m_frameCacheFree);
77         BLI_listbase_clear(&m_frameCacheBase);
78         BLI_listbase_clear(&m_packetCacheFree);
79         BLI_listbase_clear(&m_packetCacheBase);
80 }
81
82 // destructor
83 VideoFFmpeg::~VideoFFmpeg () 
84 {
85 }
86
87 void VideoFFmpeg::refresh(void)
88 {
89     // a fixed image will not refresh because it is loaded only once at creation
90     if (m_isImage)
91         return;
92     m_avail = false;
93 }
94
95 // release components
96 bool VideoFFmpeg::release()
97 {
98         // release
99         stopCache();
100         if (m_codecCtx)
101         {
102                 avcodec_close(m_codecCtx);
103                 m_codecCtx = NULL;
104         }
105         if (m_formatCtx)
106         {
107                 avformat_close_input(&m_formatCtx);
108                 m_formatCtx = NULL;
109         }
110         if (m_frame)
111         {
112                 av_free(m_frame);
113                 m_frame = NULL;
114         }
115         if (m_frameDeinterlaced)
116         {
117                 MEM_freeN(m_frameDeinterlaced->data[0]);
118                 av_free(m_frameDeinterlaced);
119                 m_frameDeinterlaced = NULL;
120         }
121         if (m_frameRGB)
122         {
123                 MEM_freeN(m_frameRGB->data[0]);
124                 av_free(m_frameRGB);
125                 m_frameRGB = NULL;
126         }
127         if (m_imgConvertCtx)
128         {
129                 sws_freeContext(m_imgConvertCtx);
130                 m_imgConvertCtx = NULL;
131         }
132         m_codec = NULL;
133         m_status = SourceStopped;
134         m_lastFrame = -1;
135         return true;
136 }
137
138 AVFrame *VideoFFmpeg::allocFrameRGB()
139 {
140         AVFrame *frame;
141         frame = avcodec_alloc_frame();
142         if (m_format == RGBA32)
143         {
144                 avpicture_fill((AVPicture*)frame, 
145                         (uint8_t*)MEM_callocN(avpicture_get_size(
146                                 PIX_FMT_RGBA,
147                                 m_codecCtx->width, m_codecCtx->height),
148                                 "ffmpeg rgba"),
149                         PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
150         } else 
151         {
152                 avpicture_fill((AVPicture*)frame, 
153                         (uint8_t*)MEM_callocN(avpicture_get_size(
154                                 PIX_FMT_RGB24,
155                                 m_codecCtx->width, m_codecCtx->height),
156                                 "ffmpeg rgb"),
157                         PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
158         }
159         return frame;
160 }
161
162 // set initial parameters
163 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
164 {
165         m_captWidth = width;
166         m_captHeight = height;
167         m_captRate = rate;
168         m_isImage = image;
169 }
170
171
172 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
173 {
174         AVFormatContext *formatCtx = NULL;
175         int                             i, videoStream;
176         AVCodec                 *codec;
177         AVCodecContext  *codecCtx;
178
179         if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
180                 return -1;
181
182         if (avformat_find_stream_info(formatCtx, NULL) < 0)
183         {
184                 avformat_close_input(&formatCtx);
185                 return -1;
186         }
187
188         /* Find the first video stream */
189         videoStream=-1;
190         for (i=0; i<formatCtx->nb_streams; i++)
191         {
192                 if (formatCtx->streams[i] &&
193                         get_codec_from_stream(formatCtx->streams[i]) && 
194                         (get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
195                 {
196                         videoStream=i;
197                         break;
198                 }
199         }
200
201         if (videoStream==-1) 
202         {
203                 avformat_close_input(&formatCtx);
204                 return -1;
205         }
206
207         codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
208
209         /* Find the decoder for the video stream */
210         codec=avcodec_find_decoder(codecCtx->codec_id);
211         if (codec==NULL) 
212         {
213                 avformat_close_input(&formatCtx);
214                 return -1;
215         }
216         codecCtx->workaround_bugs = 1;
217         if (avcodec_open2(codecCtx, codec, NULL) < 0)
218         {
219                 avformat_close_input(&formatCtx);
220                 return -1;
221         }
222
223 #ifdef FFMPEG_OLD_FRAME_RATE
224         if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
225                 codecCtx->frame_rate_base=1000;
226         m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
227 #else
228         m_baseFrameRate = av_q2d(av_get_r_frame_rate_compat(formatCtx->streams[videoStream]));
229 #endif
230         if (m_baseFrameRate <= 0.0) 
231                 m_baseFrameRate = defFrameRate;
232
233         m_codec = codec;
234         m_codecCtx = codecCtx;
235         m_formatCtx = formatCtx;
236         m_videoStream = videoStream;
237         m_frame = avcodec_alloc_frame();
238         m_frameDeinterlaced = avcodec_alloc_frame();
239
240         // allocate buffer if deinterlacing is required
241         avpicture_fill((AVPicture*)m_frameDeinterlaced, 
242                 (uint8_t*)MEM_callocN(avpicture_get_size(
243                 m_codecCtx->pix_fmt,
244                 m_codecCtx->width, m_codecCtx->height), 
245                 "ffmpeg deinterlace"), 
246                 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
247
248         // check if the pixel format supports Alpha
249         if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
250                 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
251                 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
252                 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
253         {
254                 // allocate buffer to store final decoded frame
255                 m_format = RGBA32;
256                 // allocate sws context
257                 m_imgConvertCtx = sws_getContext(
258                         m_codecCtx->width,
259                         m_codecCtx->height,
260                         m_codecCtx->pix_fmt,
261                         m_codecCtx->width,
262                         m_codecCtx->height,
263                         PIX_FMT_RGBA,
264                         SWS_FAST_BILINEAR,
265                         NULL, NULL, NULL);
266         } else
267         {
268                 // allocate buffer to store final decoded frame
269                 m_format = RGB24;
270                 // allocate sws context
271                 m_imgConvertCtx = sws_getContext(
272                         m_codecCtx->width,
273                         m_codecCtx->height,
274                         m_codecCtx->pix_fmt,
275                         m_codecCtx->width,
276                         m_codecCtx->height,
277                         PIX_FMT_RGB24,
278                         SWS_FAST_BILINEAR,
279                         NULL, NULL, NULL);
280         }
281         m_frameRGB = allocFrameRGB();
282
283         if (!m_imgConvertCtx) {
284                 avcodec_close(m_codecCtx);
285                 m_codecCtx = NULL;
286                 avformat_close_input(&m_formatCtx);
287                 m_formatCtx = NULL;
288                 av_free(m_frame);
289                 m_frame = NULL;
290                 MEM_freeN(m_frameDeinterlaced->data[0]);
291                 av_free(m_frameDeinterlaced);
292                 m_frameDeinterlaced = NULL;
293                 MEM_freeN(m_frameRGB->data[0]);
294                 av_free(m_frameRGB);
295                 m_frameRGB = NULL;
296                 return -1;
297         }
298         return 0;
299 }
300
301 /*
302  * This thread is used to load video frame asynchronously.
303  * It provides a frame caching service. 
304  * The main thread is responsible for positioning the frame pointer in the
305  * file correctly before calling startCache() which starts this thread.
306  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
307  * memory and CPU low 2) a cache of 5 decoded frames. 
308  * If the main thread does not find the frame in the cache (because the video has restarted
309  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
310  * function: it sends a signal to stop the cache thread and wait for confirmation), then
311  * change the position in the stream and restarts the cache thread.
312  */
313 void *VideoFFmpeg::cacheThread(void *data)
314 {
315         VideoFFmpeg* video = (VideoFFmpeg*)data;
316         // holds the frame that is being decoded
317         CacheFrame *currentFrame = NULL;
318         CachePacket *cachePacket;
319         bool endOfFile = false;
320         int frameFinished = 0;
321         double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
322         int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
323
324         if (startTs == AV_NOPTS_VALUE)
325                 startTs = 0;
326
327         while (!video->m_stopThread)
328         {
329                 // packet cache is used solely by this thread, no need to lock
330                 // In case the stream/file contains other stream than the one we are looking for,
331                 // allow a bit of cycling to get rid quickly of those frames
332                 frameFinished = 0;
333                 while (    !endOfFile 
334                                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
335                                 && frameFinished < 25)
336                 {
337                         // free packet => packet cache is not full yet, just read more
338                         if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
339                         {
340                                 if (cachePacket->packet.stream_index == video->m_videoStream)
341                                 {
342                                         // make sure fresh memory is allocated for the packet and move it to queue
343                                         av_dup_packet(&cachePacket->packet);
344                                         BLI_remlink(&video->m_packetCacheFree, cachePacket);
345                                         BLI_addtail(&video->m_packetCacheBase, cachePacket);
346                                         break;
347                                 } else {
348                                         // this is not a good packet for us, just leave it on free queue
349                                         // Note: here we could handle sound packet
350                                         av_free_packet(&cachePacket->packet);
351                                         frameFinished++;
352                                 }
353                                 
354                         } else {
355                                 if (video->m_isFile)
356                                         // this mark the end of the file
357                                         endOfFile = true;
358                                 // if we cannot read a packet, no need to continue
359                                 break;
360                         }
361                 }
362                 // frame cache is also used by main thread, lock
363                 if (currentFrame == NULL) 
364                 {
365                         // no current frame being decoded, take free one
366                         pthread_mutex_lock(&video->m_cacheMutex);
367                         if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
368                                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
369                         pthread_mutex_unlock(&video->m_cacheMutex);
370                 }
371                 if (currentFrame != NULL)
372                 {
373                         // this frame is out of free and busy queue, we can manipulate it without locking
374                         frameFinished = 0;
375                         while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
376                         {
377                                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
378                                 // use m_frame because when caching, it is not used in main thread
379                                 // we can't use currentFrame directly because we need to convert to RGB first
380                                 avcodec_decode_video2(video->m_codecCtx, 
381                                         video->m_frame, &frameFinished, 
382                                         &cachePacket->packet);
383                                 if (frameFinished) 
384                                 {
385                                         AVFrame * input = video->m_frame;
386
387                                         /* This means the data wasnt read properly, this check stops crashing */
388                                         if (   input->data[0]!=0 || input->data[1]!=0 
389                                                 || input->data[2]!=0 || input->data[3]!=0)
390                                         {
391                                                 if (video->m_deinterlace) 
392                                                 {
393                                                         if (avpicture_deinterlace(
394                                                                 (AVPicture*) video->m_frameDeinterlaced,
395                                                                 (const AVPicture*) video->m_frame,
396                                                                 video->m_codecCtx->pix_fmt,
397                                                                 video->m_codecCtx->width,
398                                                                 video->m_codecCtx->height) >= 0)
399                                                         {
400                                                                 input = video->m_frameDeinterlaced;
401                                                         }
402                                                 }
403                                                 // convert to RGB24
404                                                 sws_scale(video->m_imgConvertCtx,
405                                                         input->data,
406                                                         input->linesize,
407                                                         0,
408                                                         video->m_codecCtx->height,
409                                                         currentFrame->frame->data,
410                                                         currentFrame->frame->linesize);
411                                                 // move frame to queue, this frame is necessarily the next one
412                                                 video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
413                                                 currentFrame->framePosition = video->m_curPosition;
414                                                 pthread_mutex_lock(&video->m_cacheMutex);
415                                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
416                                                 pthread_mutex_unlock(&video->m_cacheMutex);
417                                                 currentFrame = NULL;
418                                         }
419                                 }
420                                 av_free_packet(&cachePacket->packet);
421                                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
422                         } 
423                         if (currentFrame && endOfFile) 
424                         {
425                                 // no more packet and end of file => put a special frame that indicates that
426                                 currentFrame->framePosition = -1;
427                                 pthread_mutex_lock(&video->m_cacheMutex);
428                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
429                                 pthread_mutex_unlock(&video->m_cacheMutex);
430                                 currentFrame = NULL;
431                                 // no need to stay any longer in this thread
432                                 break;
433                         }
434                 }
435                 // small sleep to avoid unnecessary looping
436                 PIL_sleep_ms(10);
437         }
438         // before quitting, put back the current frame to queue to allow freeing
439         if (currentFrame)
440         {
441                 pthread_mutex_lock(&video->m_cacheMutex);
442                 BLI_addtail(&video->m_frameCacheFree, currentFrame);
443                 pthread_mutex_unlock(&video->m_cacheMutex);
444         }
445         return 0;
446 }
447
448 // start thread to cache video frame from file/capture/stream
449 // this function should be called only when the position in the stream is set for the
450 // first frame to cache
451 bool VideoFFmpeg::startCache()
452 {
453         if (!m_cacheStarted && m_isThreaded)
454         {
455                 m_stopThread = false;
456                 for (int i=0; i<CACHE_FRAME_SIZE; i++)
457                 {
458                         CacheFrame *frame = new CacheFrame();
459                         frame->frame = allocFrameRGB();
460                         BLI_addtail(&m_frameCacheFree, frame);
461                 }
462                 for (int i=0; i<CACHE_PACKET_SIZE; i++) 
463                 {
464                         CachePacket *packet = new CachePacket();
465                         BLI_addtail(&m_packetCacheFree, packet);
466                 }
467                 BLI_init_threads(&m_thread, cacheThread, 1);
468                 BLI_insert_thread(&m_thread, this);
469                 m_cacheStarted = true;
470         }
471         return m_cacheStarted;
472 }
473
474 void VideoFFmpeg::stopCache()
475 {
476         if (m_cacheStarted)
477         {
478                 m_stopThread = true;
479                 BLI_end_threads(&m_thread);
480                 // now delete the cache
481                 CacheFrame *frame;
482                 CachePacket *packet;
483                 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
484                 {
485                         BLI_remlink(&m_frameCacheBase, frame);
486                         MEM_freeN(frame->frame->data[0]);
487                         av_free(frame->frame);
488                         delete frame;
489                 }
490                 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
491                 {
492                         BLI_remlink(&m_frameCacheFree, frame);
493                         MEM_freeN(frame->frame->data[0]);
494                         av_free(frame->frame);
495                         delete frame;
496                 }
497                 while ((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
498                 {
499                         BLI_remlink(&m_packetCacheBase, packet);
500                         av_free_packet(&packet->packet);
501                         delete packet;
502                 }
503                 while ((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
504                 {
505                         BLI_remlink(&m_packetCacheFree, packet);
506                         delete packet;
507                 }
508                 m_cacheStarted = false;
509         }
510 }
511
512 void VideoFFmpeg::releaseFrame(AVFrame *frame)
513 {
514         if (frame == m_frameRGB)
515         {
516                 // this is not a frame from the cache, ignore
517                 return;
518         }
519         // this frame MUST be the first one of the queue
520         pthread_mutex_lock(&m_cacheMutex);
521         CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
522         assert (cacheFrame != NULL && cacheFrame->frame == frame);
523         BLI_remlink(&m_frameCacheBase, cacheFrame);
524         BLI_addtail(&m_frameCacheFree, cacheFrame);
525         pthread_mutex_unlock(&m_cacheMutex);
526 }
527
528 // open video file
529 void VideoFFmpeg::openFile (char *filename)
530 {
531         if (openStream(filename, NULL, NULL) != 0)
532                 return;
533
534         if (m_codecCtx->gop_size)
535                 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
536         else if (m_codecCtx->has_b_frames)
537                 m_preseek = 25; // should determine gopsize
538         else
539                 m_preseek = 0;
540
541         // get video time range
542         m_range[0] = 0.0;
543         m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
544
545         // open base class
546         VideoBase::openFile(filename);
547
548         if (
549                 // ffmpeg reports that http source are actually non stream
550                 // but it is really not desirable to seek on http file, so force streaming.
551                 // It would be good to find this information from the context but there are no simple indication
552                 !strncmp(filename, "http://", 7) ||
553                 !strncmp(filename, "rtsp://", 7) ||
554                 (m_formatCtx->pb && !m_formatCtx->pb->seekable)
555                 )
556         {
557                 // the file is in fact a streaming source, treat as cam to prevent seeking
558                 m_isFile = false;
559                 // but it's not handled exactly like a camera.
560                 m_isStreaming = true;
561                 // for streaming it is important to do non blocking read
562                 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
563         }
564
565         if (m_isImage) 
566         {
567                 // the file is to be treated as an image, i.e. load the first frame only
568                 m_isFile = false;
569                 // in case of reload, the filename is taken from m_imageName, no need to change it
570                 if (m_imageName.Ptr() != filename)
571                         m_imageName = filename;
572                 m_preseek = 0;
573                 m_avail = false;
574                 play();
575         }
576         // check if we should do multi-threading?
577         if (!m_isImage && BLI_system_thread_count() > 1)
578         {
579                 // never thread image: there are no frame to read ahead
580                 // no need to thread if the system has a single core
581                 m_isThreaded =  true;
582         }
583 }
584
585
586 // open video capture device
587 void VideoFFmpeg::openCam (char *file, short camIdx)
588 {
589         // open camera source
590         AVInputFormat           *inputFormat;
591         AVDictionary            *formatParams = NULL;
592         char                            filename[28], rateStr[20];
593
594 #ifdef WIN32
595         // video capture on windows only through Video For Windows driver
596         inputFormat = av_find_input_format("vfwcap");
597         if (!inputFormat)
598                 // Video For Windows not supported??
599                 return;
600         sprintf(filename, "%d", camIdx);
601 #else
602         // In Linux we support two types of devices: VideoForLinux and DV1394. 
603         // the user specify it with the filename:
604         // [<device_type>][:<standard>]
605         // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
606         // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
607         // The driver name is constructed automatically from the device type:
608         // v4l   : /dev/video<camIdx>
609         // dv1394: /dev/dv1394/<camIdx>
610         // If you have different driver name, you can specify the driver name explicitly
611         // instead of device type. Examples of valid filename:
612         //    /dev/v4l/video0:pal
613         //    /dev/ieee1394/1:ntsc
614         //    dv1394:secam
615         //    v4l:pal
616         char *p;
617
618         if (file && strstr(file, "1394") != NULL) 
619         {
620                 // the user specifies a driver, check if it is v4l or d41394
621                 inputFormat = av_find_input_format("dv1394");
622                 sprintf(filename, "/dev/dv1394/%d", camIdx);
623         } else 
624         {
625                 const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"};
626                 int i, formatsCount = sizeof(formats) / sizeof(char*);
627                 for (i = 0; i < formatsCount; i++) {
628                         inputFormat = av_find_input_format(formats[i]);
629                         if (inputFormat)
630                                 break;
631                 }
632                 sprintf(filename, "/dev/video%d", camIdx);
633         }
634         if (!inputFormat)
635                 // these format should be supported, check ffmpeg compilation
636                 return;
637         if (file && strncmp(file, "/dev", 4) == 0) 
638         {
639                 // user does not specify a driver
640                 strncpy(filename, file, sizeof(filename));
641                 filename[sizeof(filename)-1] = 0;
642                 if ((p = strchr(filename, ':')) != 0)
643                         *p = 0;
644         }
645         if (file && (p = strchr(file, ':')) != NULL) {
646                 av_dict_set(&formatParams, "standard", p+1, 0);
647         }
648 #endif
649         //frame rate
650         if (m_captRate <= 0.f)
651                 m_captRate = defFrameRate;
652         sprintf(rateStr, "%f", m_captRate);
653
654         av_dict_set(&formatParams, "framerate", rateStr, 0);
655
656         if (m_captWidth > 0 && m_captHeight > 0) {
657                 char video_size[64];
658                 BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight);
659                 av_dict_set(&formatParams, "video_size", video_size, 0);
660         }
661
662         if (openStream(filename, inputFormat, &formatParams) != 0)
663                 return;
664
665         // for video capture it is important to do non blocking read
666         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
667         // open base class
668         VideoBase::openCam(file, camIdx);
669         // check if we should do multi-threading?
670         if (BLI_system_thread_count() > 1)
671         {
672                 // no need to thread if the system has a single core
673                 m_isThreaded =  true;
674         }
675
676         av_dict_free(&formatParams);
677 }
678
679 // play video
680 bool VideoFFmpeg::play (void)
681 {
682         try
683         {
684                 // if object is able to play
685                 if (VideoBase::play())
686                 {
687                         // set video position
688                         setPositions();
689
690                         if (m_isStreaming)
691                         {
692                                 av_read_play(m_formatCtx);
693                         }
694
695                         // return success
696                         return true;
697                 }
698         }
699         CATCH_EXCP;
700         return false;
701 }
702
703
704 // pause video
705 bool VideoFFmpeg::pause (void)
706 {
707         try
708         {
709                 if (VideoBase::pause())
710                 {
711                         if (m_isStreaming)
712                         {
713                                 av_read_pause(m_formatCtx);
714                         }
715                         return true;
716                 }
717         }
718         CATCH_EXCP;
719         return false;
720 }
721
722 // stop video
723 bool VideoFFmpeg::stop (void)
724 {
725         try
726         {
727                 VideoBase::stop();
728                 // force restart when play
729                 m_lastFrame = -1;
730                 return true;
731         }
732         CATCH_EXCP;
733         return false;
734 }
735
736
737 // set video range
738 void VideoFFmpeg::setRange (double start, double stop)
739 {
740         try
741         {
742                 // set range
743                 if (m_isFile)
744                 {
745                         VideoBase::setRange(start, stop);
746                         // set range for video
747                         setPositions();
748                 }
749         }
750         CATCH_EXCP;
751 }
752
753 // set framerate
754 void VideoFFmpeg::setFrameRate (float rate)
755 {
756         VideoBase::setFrameRate(rate);
757 }
758
759
760 // image calculation
761 // load frame from video
762 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
763 {
764         if (m_status == SourcePlaying)
765         {
766                 // get actual time
767                 double startTime = PIL_check_seconds_timer();
768                 double actTime;
769                 // timestamp passed from audio actuators can sometimes be slightly negative
770                 if (m_isFile && ts >= -0.5)
771                 {
772                         // allow setting timestamp only when not streaming
773                         actTime = ts;
774                         if (actTime * actFrameRate() < m_lastFrame) 
775                         {
776                                 // user is asking to rewind, force a cache clear to make sure we will do a seek
777                                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
778                                 stopCache();
779                         }
780                 }
781                 else
782                 {
783                         if (m_lastFrame == -1 && !m_isFile)
784                                 m_startTime = startTime;
785                         actTime = startTime - m_startTime;
786                 }
787                 // if video has ended
788                 if (m_isFile && actTime * m_frameRate >= m_range[1])
789                 {
790                         // in any case, this resets the cache
791                         stopCache();
792                         // if repeats are set, decrease them
793                         if (m_repeat > 0) 
794                                 --m_repeat;
795                         // if video has to be replayed
796                         if (m_repeat != 0)
797                         {
798                                 // reset its position
799                                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
800                                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
801                         }
802                         // if video has to be stopped, stop it
803                         else 
804                         {
805                                 m_status = SourceStopped;
806                                 return;
807                         }
808                 }
809                 // actual frame
810                 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
811                 // if actual frame differs from last frame
812                 if (actFrame != m_lastFrame)
813                 {
814                         AVFrame* frame;
815                         // get image
816                         if ((frame = grabFrame(actFrame)) != NULL)
817                         {
818                                 if (!m_isFile && !m_cacheStarted) 
819                                 {
820                                         // streaming without cache: detect synchronization problem
821                                         double execTime = PIL_check_seconds_timer() - startTime;
822                                         if (execTime > 0.005) 
823                                         {
824                                                 // exec time is too long, it means that the function was blocking
825                                                 // resynchronize the stream from this time
826                                                 m_startTime += execTime;
827                                         }
828                                 }
829                                 // save actual frame
830                                 m_lastFrame = actFrame;
831                                 // init image, if needed
832                                 init(short(m_codecCtx->width), short(m_codecCtx->height));
833                                 // process image
834                                 process((BYTE*)(frame->data[0]));
835                                 // finished with the frame, release it so that cache can reuse it
836                                 releaseFrame(frame);
837                                 // in case it is an image, automatically stop reading it
838                                 if (m_isImage)
839                                 {
840                                         m_status = SourceStopped;
841                                         // close the file as we don't need it anymore
842                                         release();
843                                 }
844                         } else if (m_isStreaming)
845                         {
846                                 // we didn't get a frame and we are streaming, this may be due to
847                                 // a delay in the network or because we are getting the frame too fast.
848                                 // In the later case, shift time by a small amount to compensate for a drift
849                                 m_startTime += 0.001;
850                         }
851                 }
852         }
853 }
854
855
856 // set actual position
857 void VideoFFmpeg::setPositions (void)
858 {
859         // set video start time
860         m_startTime = PIL_check_seconds_timer();
861         // if file is played and actual position is before end position
862         if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
863                 // continue from actual position
864                 m_startTime -= double(m_lastFrame) / actFrameRate();
865         else {
866                 m_startTime -= m_range[0];
867                 // start from beginning, stop cache just in case
868                 stopCache();
869         }
870 }
871
872 // position pointer in file, position in second
873 AVFrame *VideoFFmpeg::grabFrame(long position)
874 {
875         AVPacket packet;
876         int frameFinished;
877         int posFound = 1;
878         bool frameLoaded = false;
879         int64_t targetTs = 0;
880         CacheFrame *frame;
881         int64_t dts = 0;
882
883         if (m_cacheStarted)
884         {
885                 // when cache is active, we must not read the file directly
886                 do {
887                         pthread_mutex_lock(&m_cacheMutex);
888                         frame = (CacheFrame *)m_frameCacheBase.first;
889                         pthread_mutex_unlock(&m_cacheMutex);
890                         // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
891                         if (frame == NULL)
892                         {
893                                 // no frame in cache, in case of file it is an abnormal situation
894                                 if (m_isFile)
895                                 {
896                                         // go back to no threaded reading
897                                         stopCache();
898                                         break;
899                                 }
900                                 return NULL;
901                         }
902                         if (frame->framePosition == -1) 
903                         {
904                                 // this frame mark the end of the file (only used for file)
905                                 // leave in cache to make sure we don't miss it
906                                 m_eof = true;
907                                 return NULL;
908                         }
909                         // for streaming, always return the next frame, 
910                         // that's what grabFrame does in non cache mode anyway.
911                         if (m_isStreaming || frame->framePosition == position)
912                         {
913                                 return frame->frame;
914                         }
915                         // for cam, skip old frames to keep image realtime.
916                         // There should be no risk of clock drift since it all happens on the same CPU
917                         if (frame->framePosition > position) 
918                         {
919                                 // this can happen after rewind if the seek didn't find the first frame
920                                 // the frame in the buffer is ahead of time, just leave it there
921                                 return NULL;
922                         }
923                         // this frame is not useful, release it
924                         pthread_mutex_lock(&m_cacheMutex);
925                         BLI_remlink(&m_frameCacheBase, frame);
926                         BLI_addtail(&m_frameCacheFree, frame);
927                         pthread_mutex_unlock(&m_cacheMutex);
928                 } while (true);
929         }
930         double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
931         int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
932         if (startTs == AV_NOPTS_VALUE)
933                 startTs = 0;
934
935         // come here when there is no cache or cache has been stopped
936         // locate the frame, by seeking if necessary (seeking is only possible for files)
937         if (m_isFile)
938         {
939                 // first check if the position that we are looking for is in the preseek range
940                 // if so, just read the frame until we get there
941                 if (position > m_curPosition + 1 
942                         && m_preseek 
943                         && position - (m_curPosition + 1) < m_preseek) 
944                 {
945                         while (av_read_frame(m_formatCtx, &packet)>=0)
946                         {
947                                 if (packet.stream_index == m_videoStream) 
948                                 {
949                                         avcodec_decode_video2(
950                                                 m_codecCtx, 
951                                                 m_frame, &frameFinished, 
952                                                 &packet);
953                                         if (frameFinished)
954                                         {
955                                                 m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
956                                         }
957                                 }
958                                 av_free_packet(&packet);
959                                 if (position == m_curPosition+1)
960                                         break;
961                         }
962                 }
963                 // if the position is not in preseek, do a direct jump
964                 if (position != m_curPosition + 1) 
965                 { 
966                         int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
967
968                         if (pos < 0)
969                                 pos = 0;
970
971                         pos += startTs;
972
973                         if (position <= m_curPosition || !m_eof)
974                         {
975 #if 0
976                                 // Tried to make this work but couldn't: seeking on byte is ignored by the
977                                 // format plugin and it will generally continue to read from last timestamp.
978                                 // Too bad because frame seek is not always able to get the first frame
979                                 // of the file.
980                                 if (position <= m_preseek)
981                                 {
982                                         // we can safely go the beginning of the file
983                                         if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
984                                         {
985                                                 // binary seek does not reset the timestamp, must do it now
986                                                 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
987                                                 m_curPosition = 0;
988                                         }
989                                 }
990                                 else
991 #endif
992                                 {
993                                         // current position is now lost, guess a value. 
994                                         if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
995                                         {
996                                                 // current position is now lost, guess a value. 
997                                                 // It's not important because it will be set at this end of this function
998                                                 m_curPosition = position - m_preseek - 1;
999                                         }
1000                                 }
1001                         }
1002                         // this is the timestamp of the frame we're looking for
1003                         targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
1004
1005                         posFound = 0;
1006                         avcodec_flush_buffers(m_codecCtx);
1007                 }
1008         } else if (m_isThreaded)
1009         {
1010                 // cache is not started but threading is possible
1011                 // better not read the stream => make take some time, better start caching
1012                 if (startCache())
1013                         return NULL;
1014                 // Abnormal!!! could not start cache, fall back on direct read
1015                 m_isThreaded = false;
1016         }
1017
1018         // find the correct frame, in case of streaming and no cache, it means just
1019         // return the next frame. This is not quite correct, may need more work
1020         while (av_read_frame(m_formatCtx, &packet) >= 0)
1021         {
1022                 if (packet.stream_index == m_videoStream) 
1023                 {
1024                         AVFrame *input = m_frame;
1025                         short counter = 0;
1026
1027                         /* If m_isImage, while the data is not read properly (png, tiffs, etc formats may need several pass), else don't need while loop*/
1028                         do {
1029                                 avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &packet);
1030                                 counter++;
1031                         } while ((input->data[0] == 0 && input->data[1] == 0 && input->data[2] == 0 && input->data[3] == 0) && counter < 10 && m_isImage);
1032
1033                         // remember dts to compute exact frame number
1034                         dts = packet.dts;
1035                         if (frameFinished && !posFound) 
1036                         {
1037                                 if (dts >= targetTs)
1038                                 {
1039                                         posFound = 1;
1040                                 }
1041                         } 
1042
1043                         if (frameFinished && posFound == 1) 
1044                         {
1045                                 AVFrame * input = m_frame;
1046
1047                                 /* This means the data wasnt read properly, 
1048                                  * this check stops crashing */
1049                                 if (   input->data[0]==0 && input->data[1]==0 
1050                                         && input->data[2]==0 && input->data[3]==0)
1051                                 {
1052                                         av_free_packet(&packet);
1053                                         break;
1054                                 }
1055
1056                                 if (m_deinterlace) 
1057                                 {
1058                                         if (avpicture_deinterlace(
1059                                                 (AVPicture*) m_frameDeinterlaced,
1060                                                 (const AVPicture*) m_frame,
1061                                                 m_codecCtx->pix_fmt,
1062                                                 m_codecCtx->width,
1063                                                 m_codecCtx->height) >= 0)
1064                                         {
1065                                                 input = m_frameDeinterlaced;
1066                                         }
1067                                 }
1068                                 // convert to RGB24
1069                                 sws_scale(m_imgConvertCtx,
1070                                         input->data,
1071                                         input->linesize,
1072                                         0,
1073                                         m_codecCtx->height,
1074                                         m_frameRGB->data,
1075                                         m_frameRGB->linesize);
1076                                 av_free_packet(&packet);
1077                                 frameLoaded = true;
1078                                 break;
1079                         }
1080                 }
1081                 av_free_packet(&packet);
1082         }
1083         m_eof = m_isFile && !frameLoaded;
1084         if (frameLoaded)
1085         {
1086                 m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
1087                 if (m_isThreaded)
1088                 {
1089                         // normal case for file: first locate, then start cache
1090                         if (!startCache())
1091                         {
1092                                 // Abnormal!! could not start cache, return to non-cache mode
1093                                 m_isThreaded = false;
1094                         }
1095                 }
1096                 return m_frameRGB;
1097         }
1098         return NULL;
1099 }
1100
1101
1102 // python methods
1103
1104
1105 // cast Image pointer to VideoFFmpeg
1106 inline VideoFFmpeg * getVideoFFmpeg (PyImage *self)
1107 { return static_cast<VideoFFmpeg*>(self->m_image); }
1108
1109
1110 // object initialization
1111 static int VideoFFmpeg_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
1112 {
1113         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1114         // parameters - video source
1115         // file name or format type for capture (only for Linux: video4linux or dv1394)
1116         char * file = NULL;
1117         // capture device number
1118         short capt = -1;
1119         // capture width, only if capt is >= 0
1120         short width = 0;
1121         // capture height, only if capt is >= 0
1122         short height = 0;
1123         // capture rate, only if capt is >= 0
1124         float rate = 25.f;
1125
1126         static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1127
1128         // get parameters
1129         if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
1130                 const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
1131                 return -1; 
1132
1133         try
1134         {
1135                 // create video object
1136                 Video_init<VideoFFmpeg>(self);
1137
1138                 // set thread usage
1139                 getVideoFFmpeg(self)->initParams(width, height, rate);
1140
1141                 // open video source
1142                 Video_open(getVideo(self), file, capt);
1143         }
1144         catch (Exception & exp)
1145         {
1146                 exp.report();
1147                 return -1;
1148         }
1149         // initialization succeded
1150         return 0;
1151 }
1152
1153 static PyObject *VideoFFmpeg_getPreseek(PyImage *self, void *closure)
1154 {
1155         return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1156 }
1157
1158 // set range
1159 static int VideoFFmpeg_setPreseek(PyImage *self, PyObject *value, void *closure)
1160 {
1161         // check validity of parameter
1162         if (value == NULL || !PyLong_Check(value))
1163         {
1164                 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1165                 return -1;
1166         }
1167         // set preseek
1168         getFFmpeg(self)->setPreseek(PyLong_AsLong(value));
1169         // success
1170         return 0;
1171 }
1172
1173 // get deinterlace
1174 static PyObject *VideoFFmpeg_getDeinterlace(PyImage *self, void *closure)
1175 {
1176         if (getFFmpeg(self)->getDeinterlace())
1177                 Py_RETURN_TRUE;
1178         else
1179                 Py_RETURN_FALSE;
1180 }
1181
1182 // set flip
1183 static int VideoFFmpeg_setDeinterlace(PyImage *self, PyObject *value, void *closure)
1184 {
1185         // check parameter, report failure
1186         if (value == NULL || !PyBool_Check(value))
1187         {
1188                 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1189                 return -1;
1190         }
1191         // set deinterlace
1192         getFFmpeg(self)->setDeinterlace(value == Py_True);
1193         // success
1194         return 0;
1195 }
1196
1197 // methods structure
1198 static PyMethodDef videoMethods[] =
1199 { // methods from VideoBase class
1200         {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1201         {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1202         {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1203         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1204         {NULL}
1205 };
1206 // attributes structure
1207 static PyGetSetDef videoGetSets[] =
1208 { // methods from VideoBase class
1209         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1210         {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1211         {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1212         {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1213         // attributes from ImageBase class
1214         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1215         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1216         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1217         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1218         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1219         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1220         {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1221         {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1222         {NULL}
1223 };
1224
1225 // python type declaration
1226 PyTypeObject VideoFFmpegType =
1227
1228         PyVarObject_HEAD_INIT(NULL, 0)
1229         "VideoTexture.VideoFFmpeg",   /*tp_name*/
1230         sizeof(PyImage),          /*tp_basicsize*/
1231         0,                         /*tp_itemsize*/
1232         (destructor)Image_dealloc, /*tp_dealloc*/
1233         0,                         /*tp_print*/
1234         0,                         /*tp_getattr*/
1235         0,                         /*tp_setattr*/
1236         0,                         /*tp_compare*/
1237         0,                         /*tp_repr*/
1238         0,                         /*tp_as_number*/
1239         0,                         /*tp_as_sequence*/
1240         0,                         /*tp_as_mapping*/
1241         0,                         /*tp_hash */
1242         0,                         /*tp_call*/
1243         0,                         /*tp_str*/
1244         0,                         /*tp_getattro*/
1245         0,                         /*tp_setattro*/
1246         &imageBufferProcs,         /*tp_as_buffer*/
1247         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1248         "FFmpeg video source",       /* tp_doc */
1249         0,                             /* tp_traverse */
1250         0,                             /* tp_clear */
1251         0,                             /* tp_richcompare */
1252         0,                             /* tp_weaklistoffset */
1253         0,                             /* tp_iter */
1254         0,                             /* tp_iternext */
1255         videoMethods,    /* tp_methods */
1256         0,                   /* tp_members */
1257         videoGetSets,          /* tp_getset */
1258         0,                         /* tp_base */
1259         0,                         /* tp_dict */
1260         0,                         /* tp_descr_get */
1261         0,                         /* tp_descr_set */
1262         0,                         /* tp_dictoffset */
1263         (initproc)VideoFFmpeg_init,     /* tp_init */
1264         0,                         /* tp_alloc */
1265         Image_allocNew,           /* tp_new */
1266 };
1267
1268 // object initialization
1269 static int ImageFFmpeg_init(PyObject *pySelf, PyObject *args, PyObject *kwds)
1270 {
1271         PyImage *self = reinterpret_cast<PyImage*>(pySelf);
1272         // parameters - video source
1273         // file name or format type for capture (only for Linux: video4linux or dv1394)
1274         char * file = NULL;
1275
1276         // get parameters
1277         if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1278                 return -1; 
1279
1280         try
1281         {
1282                 // create video object
1283                 Video_init<VideoFFmpeg>(self);
1284
1285                 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1286
1287                 // open video source
1288                 Video_open(getVideo(self), file, -1);
1289         }
1290         catch (Exception & exp)
1291         {
1292                 exp.report();
1293                 return -1;
1294         }
1295         // initialization succeded
1296         return 0;
1297 }
1298
1299 static PyObject *Image_reload(PyImage *self, PyObject *args)
1300 {
1301         char * newname = NULL;
1302         if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1303                 return NULL;
1304         if (self->m_image != NULL)
1305         {
1306                 VideoFFmpeg* video = getFFmpeg(self);
1307                 // check type of object
1308                 if (!newname)
1309                         newname = video->getImageName();
1310                 if (!newname) {
1311                         // if not set, retport error
1312                         PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1313                         return NULL;
1314                 }
1315                 // make sure the previous file is cleared
1316                 video->release();
1317                 // open the new file
1318                 video->openFile(newname);
1319         }
1320         Py_RETURN_NONE;
1321 }
1322
1323 // methods structure
1324 static PyMethodDef imageMethods[] =
1325 { // methods from VideoBase class
1326         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1327         {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1328         {NULL}
1329 };
1330 // attributes structure
1331 static PyGetSetDef imageGetSets[] =
1332 { // methods from VideoBase class
1333         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1334         // attributes from ImageBase class
1335         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1336         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1337         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1338         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1339         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1340         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1341         {NULL}
1342 };
1343
1344 // python type declaration
1345 PyTypeObject ImageFFmpegType =
1346
1347         PyVarObject_HEAD_INIT(NULL, 0)
1348         "VideoTexture.ImageFFmpeg",   /*tp_name*/
1349         sizeof(PyImage),          /*tp_basicsize*/
1350         0,                         /*tp_itemsize*/
1351         (destructor)Image_dealloc, /*tp_dealloc*/
1352         0,                         /*tp_print*/
1353         0,                         /*tp_getattr*/
1354         0,                         /*tp_setattr*/
1355         0,                         /*tp_compare*/
1356         0,                         /*tp_repr*/
1357         0,                         /*tp_as_number*/
1358         0,                         /*tp_as_sequence*/
1359         0,                         /*tp_as_mapping*/
1360         0,                         /*tp_hash */
1361         0,                         /*tp_call*/
1362         0,                         /*tp_str*/
1363         0,                         /*tp_getattro*/
1364         0,                         /*tp_setattro*/
1365         &imageBufferProcs,         /*tp_as_buffer*/
1366         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1367         "FFmpeg image source",       /* tp_doc */
1368         0,                             /* tp_traverse */
1369         0,                             /* tp_clear */
1370         0,                             /* tp_richcompare */
1371         0,                             /* tp_weaklistoffset */
1372         0,                             /* tp_iter */
1373         0,                             /* tp_iternext */
1374         imageMethods,    /* tp_methods */
1375         0,                   /* tp_members */
1376         imageGetSets,          /* tp_getset */
1377         0,                         /* tp_base */
1378         0,                         /* tp_dict */
1379         0,                         /* tp_descr_get */
1380         0,                         /* tp_descr_set */
1381         0,                         /* tp_dictoffset */
1382         (initproc)ImageFFmpeg_init,     /* tp_init */
1383         0,                         /* tp_alloc */
1384         Image_allocNew,           /* tp_new */
1385 };
1386
1387 #endif  //WITH_FFMPEG
1388
1389