Some FFmpeg changes
[blender.git] / source / gameengine / VideoTexture / VideoFFmpeg.cpp
1 /*
2 -----------------------------------------------------------------------------
3 This source file is part of VideoTexture library
4
5 Copyright (c) 2007 The Zdeno Ash Miklas
6
7 This program is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the Free Software
9 Foundation; either version 2 of the License, or (at your option) any later
10 version.
11
12 This program is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 Place - Suite 330, Boston, MA 02111-1307, USA, or go to
19 http://www.gnu.org/copyleft/lesser.txt.
20 -----------------------------------------------------------------------------
21 */
22
23 /** \file gameengine/VideoTexture/VideoFFmpeg.cpp
24  *  \ingroup bgevideotex
25  */
26
27
28 #ifdef WITH_FFMPEG
29
30 // INT64_C fix for some linux machines (C99ism)
31 #ifndef __STDC_CONSTANT_MACROS
32 #define __STDC_CONSTANT_MACROS
33 #endif
34 #include <stdint.h>
35
36
37 #include "MEM_guardedalloc.h"
38 #include "PIL_time.h"
39
40 #include <string>
41
42 #include "VideoFFmpeg.h"
43 #include "Exception.h"
44
45
46 // default framerate
47 const double defFrameRate = 25.0;
48 // time scale constant
49 const long timeScale = 1000;
50
51 // macro for exception handling and logging
52 #define CATCH_EXCP catch (Exception & exp) \
53 { exp.report(); m_status = SourceError; }
54
55 // class RenderVideo
56
57 // constructor
58 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
59 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
60 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
61 m_deinterlace(false), m_preseek(0),     m_videoStream(-1), m_baseFrameRate(25.0),
62 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
63 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
64 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
65 {
66         // set video format
67         m_format = RGB24;
68         // force flip because ffmpeg always return the image in the wrong orientation for texture
69         setFlip(true);
70         // construction is OK
71         *hRslt = S_OK;
72         m_thread.first = m_thread.last = NULL;
73         pthread_mutex_init(&m_cacheMutex, NULL);
74         m_frameCacheFree.first = m_frameCacheFree.last = NULL;
75         m_frameCacheBase.first = m_frameCacheBase.last = NULL;
76         m_packetCacheFree.first = m_packetCacheFree.last = NULL;
77         m_packetCacheBase.first = m_packetCacheBase.last = NULL;
78 }
79
80 // destructor
81 VideoFFmpeg::~VideoFFmpeg () 
82 {
83 }
84
85
86 // release components
87 bool VideoFFmpeg::release()
88 {
89         // release
90         stopCache();
91         if (m_codecCtx)
92         {
93                 avcodec_close(m_codecCtx);
94                 m_codecCtx = NULL;
95         }
96         if (m_formatCtx)
97         {
98                 av_close_input_file(m_formatCtx);
99                 m_formatCtx = NULL;
100         }
101         if (m_frame)
102         {
103                 av_free(m_frame);
104                 m_frame = NULL;
105         }
106         if (m_frameDeinterlaced)
107         {
108                 MEM_freeN(m_frameDeinterlaced->data[0]);
109                 av_free(m_frameDeinterlaced);
110                 m_frameDeinterlaced = NULL;
111         }
112         if (m_frameRGB)
113         {
114                 MEM_freeN(m_frameRGB->data[0]);
115                 av_free(m_frameRGB);
116                 m_frameRGB = NULL;
117         }
118         if (m_imgConvertCtx)
119         {
120                 sws_freeContext(m_imgConvertCtx);
121                 m_imgConvertCtx = NULL;
122         }
123         m_codec = NULL;
124         m_status = SourceStopped;
125         m_lastFrame = -1;
126         return true;
127 }
128
129 AVFrame *VideoFFmpeg::allocFrameRGB()
130 {
131         AVFrame *frame;
132         frame = avcodec_alloc_frame();
133         if (m_format == RGBA32)
134         {
135                 avpicture_fill((AVPicture*)frame, 
136                         (uint8_t*)MEM_callocN(avpicture_get_size(
137                                 PIX_FMT_RGBA,
138                                 m_codecCtx->width, m_codecCtx->height),
139                                 "ffmpeg rgba"),
140                         PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
141         } else 
142         {
143                 avpicture_fill((AVPicture*)frame, 
144                         (uint8_t*)MEM_callocN(avpicture_get_size(
145                                 PIX_FMT_RGB24,
146                                 m_codecCtx->width, m_codecCtx->height),
147                                 "ffmpeg rgb"),
148                         PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
149         }
150         return frame;
151 }
152
153 // set initial parameters
154 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
155 {
156         m_captWidth = width;
157         m_captHeight = height;
158         m_captRate = rate;
159         m_isImage = image;
160 }
161
162
163 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
164 {
165         AVFormatContext *formatCtx = NULL;
166         int                             i, videoStream;
167         AVCodec                 *codec;
168         AVCodecContext  *codecCtx;
169
170         if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
171                 return -1;
172
173         if (av_find_stream_info(formatCtx)<0) 
174         {
175                 av_close_input_file(formatCtx);
176                 return -1;
177         }
178
179         /* Find the first video stream */
180         videoStream=-1;
181         for (i=0; i<formatCtx->nb_streams; i++)
182         {
183                 if (formatCtx->streams[i] &&
184                         get_codec_from_stream(formatCtx->streams[i]) && 
185                         (get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
186                 {
187                         videoStream=i;
188                         break;
189                 }
190         }
191
192         if (videoStream==-1) 
193         {
194                 av_close_input_file(formatCtx);
195                 return -1;
196         }
197
198         codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
199
200         /* Find the decoder for the video stream */
201         codec=avcodec_find_decoder(codecCtx->codec_id);
202         if (codec==NULL) 
203         {
204                 av_close_input_file(formatCtx);
205                 return -1;
206         }
207         codecCtx->workaround_bugs = 1;
208         if (avcodec_open(codecCtx, codec)<0) 
209         {
210                 av_close_input_file(formatCtx);
211                 return -1;
212         }
213
214 #ifdef FFMPEG_OLD_FRAME_RATE
215         if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
216                 codecCtx->frame_rate_base=1000;
217         m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
218 #else
219         m_baseFrameRate = av_q2d(formatCtx->streams[videoStream]->r_frame_rate);
220 #endif
221         if (m_baseFrameRate <= 0.0) 
222                 m_baseFrameRate = defFrameRate;
223
224         m_codec = codec;
225         m_codecCtx = codecCtx;
226         m_formatCtx = formatCtx;
227         m_videoStream = videoStream;
228         m_frame = avcodec_alloc_frame();
229         m_frameDeinterlaced = avcodec_alloc_frame();
230
231         // allocate buffer if deinterlacing is required
232         avpicture_fill((AVPicture*)m_frameDeinterlaced, 
233                 (uint8_t*)MEM_callocN(avpicture_get_size(
234                 m_codecCtx->pix_fmt,
235                 m_codecCtx->width, m_codecCtx->height), 
236                 "ffmpeg deinterlace"), 
237                 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
238
239         // check if the pixel format supports Alpha
240         if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
241                 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
242                 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
243                 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
244         {
245                 // allocate buffer to store final decoded frame
246                 m_format = RGBA32;
247                 // allocate sws context
248                 m_imgConvertCtx = sws_getContext(
249                         m_codecCtx->width,
250                         m_codecCtx->height,
251                         m_codecCtx->pix_fmt,
252                         m_codecCtx->width,
253                         m_codecCtx->height,
254                         PIX_FMT_RGBA,
255                         SWS_FAST_BILINEAR,
256                         NULL, NULL, NULL);
257         } else
258         {
259                 // allocate buffer to store final decoded frame
260                 m_format = RGB24;
261                 // allocate sws context
262                 m_imgConvertCtx = sws_getContext(
263                         m_codecCtx->width,
264                         m_codecCtx->height,
265                         m_codecCtx->pix_fmt,
266                         m_codecCtx->width,
267                         m_codecCtx->height,
268                         PIX_FMT_RGB24,
269                         SWS_FAST_BILINEAR,
270                         NULL, NULL, NULL);
271         }
272         m_frameRGB = allocFrameRGB();
273
274         if (!m_imgConvertCtx) {
275                 avcodec_close(m_codecCtx);
276                 m_codecCtx = NULL;
277                 av_close_input_file(m_formatCtx);
278                 m_formatCtx = NULL;
279                 av_free(m_frame);
280                 m_frame = NULL;
281                 MEM_freeN(m_frameDeinterlaced->data[0]);
282                 av_free(m_frameDeinterlaced);
283                 m_frameDeinterlaced = NULL;
284                 MEM_freeN(m_frameRGB->data[0]);
285                 av_free(m_frameRGB);
286                 m_frameRGB = NULL;
287                 return -1;
288         }
289         return 0;
290 }
291
292 /*
293  * This thread is used to load video frame asynchronously.
294  * It provides a frame caching service. 
295  * The main thread is responsible for positioning the frame pointer in the
296  * file correctly before calling startCache() which starts this thread.
297  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
298  * memory and CPU low 2) a cache of 5 decoded frames. 
299  * If the main thread does not find the frame in the cache (because the video has restarted
300  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
301  * function: it sends a signal to stop the cache thread and wait for confirmation), then
302  * change the position in the stream and restarts the cache thread.
303  */
304 void *VideoFFmpeg::cacheThread(void *data)
305 {
306         VideoFFmpeg* video = (VideoFFmpeg*)data;
307         // holds the frame that is being decoded
308         CacheFrame *currentFrame = NULL;
309         CachePacket *cachePacket;
310         bool endOfFile = false;
311         int frameFinished = 0;
312         double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
313         int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
314
315         if (startTs == AV_NOPTS_VALUE)
316                 startTs = 0;
317
318         while (!video->m_stopThread)
319         {
320                 // packet cache is used solely by this thread, no need to lock
321                 // In case the stream/file contains other stream than the one we are looking for,
322                 // allow a bit of cycling to get rid quickly of those frames
323                 frameFinished = 0;
324                 while (    !endOfFile 
325                                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
326                                 && frameFinished < 25)
327                 {
328                         // free packet => packet cache is not full yet, just read more
329                         if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
330                         {
331                                 if (cachePacket->packet.stream_index == video->m_videoStream)
332                                 {
333                                         // make sure fresh memory is allocated for the packet and move it to queue
334                                         av_dup_packet(&cachePacket->packet);
335                                         BLI_remlink(&video->m_packetCacheFree, cachePacket);
336                                         BLI_addtail(&video->m_packetCacheBase, cachePacket);
337                                         break;
338                                 } else {
339                                         // this is not a good packet for us, just leave it on free queue
340                                         // Note: here we could handle sound packet
341                                         av_free_packet(&cachePacket->packet);
342                                         frameFinished++;
343                                 }
344                                 
345                         } else {
346                                 if (video->m_isFile)
347                                         // this mark the end of the file
348                                         endOfFile = true;
349                                 // if we cannot read a packet, no need to continue
350                                 break;
351                         }
352                 }
353                 // frame cache is also used by main thread, lock
354                 if (currentFrame == NULL) 
355                 {
356                         // no current frame being decoded, take free one
357                         pthread_mutex_lock(&video->m_cacheMutex);
358                         if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
359                                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
360                         pthread_mutex_unlock(&video->m_cacheMutex);
361                 }
362                 if (currentFrame != NULL)
363                 {
364                         // this frame is out of free and busy queue, we can manipulate it without locking
365                         frameFinished = 0;
366                         while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
367                         {
368                                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
369                                 // use m_frame because when caching, it is not used in main thread
370                                 // we can't use currentFrame directly because we need to convert to RGB first
371                                 avcodec_decode_video2(video->m_codecCtx, 
372                                         video->m_frame, &frameFinished, 
373                                         &cachePacket->packet);
374                                 if (frameFinished) 
375                                 {
376                                         AVFrame * input = video->m_frame;
377
378                                         /* This means the data wasnt read properly, this check stops crashing */
379                                         if (   input->data[0]!=0 || input->data[1]!=0 
380                                                 || input->data[2]!=0 || input->data[3]!=0)
381                                         {
382                                                 if (video->m_deinterlace) 
383                                                 {
384                                                         if (avpicture_deinterlace(
385                                                                 (AVPicture*) video->m_frameDeinterlaced,
386                                                                 (const AVPicture*) video->m_frame,
387                                                                 video->m_codecCtx->pix_fmt,
388                                                                 video->m_codecCtx->width,
389                                                                 video->m_codecCtx->height) >= 0)
390                                                         {
391                                                                 input = video->m_frameDeinterlaced;
392                                                         }
393                                                 }
394                                                 // convert to RGB24
395                                                 sws_scale(video->m_imgConvertCtx,
396                                                         input->data,
397                                                         input->linesize,
398                                                         0,
399                                                         video->m_codecCtx->height,
400                                                         currentFrame->frame->data,
401                                                         currentFrame->frame->linesize);
402                                                 // move frame to queue, this frame is necessarily the next one
403                                                 video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
404                                                 currentFrame->framePosition = video->m_curPosition;
405                                                 pthread_mutex_lock(&video->m_cacheMutex);
406                                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
407                                                 pthread_mutex_unlock(&video->m_cacheMutex);
408                                                 currentFrame = NULL;
409                                         }
410                                 }
411                                 av_free_packet(&cachePacket->packet);
412                                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
413                         } 
414                         if (currentFrame && endOfFile) 
415                         {
416                                 // no more packet and end of file => put a special frame that indicates that
417                                 currentFrame->framePosition = -1;
418                                 pthread_mutex_lock(&video->m_cacheMutex);
419                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
420                                 pthread_mutex_unlock(&video->m_cacheMutex);
421                                 currentFrame = NULL;
422                                 // no need to stay any longer in this thread
423                                 break;
424                         }
425                 }
426                 // small sleep to avoid unnecessary looping
427                 PIL_sleep_ms(10);
428         }
429         // before quitting, put back the current frame to queue to allow freeing
430         if (currentFrame)
431         {
432                 pthread_mutex_lock(&video->m_cacheMutex);
433                 BLI_addtail(&video->m_frameCacheFree, currentFrame);
434                 pthread_mutex_unlock(&video->m_cacheMutex);
435         }
436         return 0;
437 }
438
439 // start thread to cache video frame from file/capture/stream
440 // this function should be called only when the position in the stream is set for the
441 // first frame to cache
442 bool VideoFFmpeg::startCache()
443 {
444         if (!m_cacheStarted && m_isThreaded)
445         {
446                 m_stopThread = false;
447                 for (int i=0; i<CACHE_FRAME_SIZE; i++)
448                 {
449                         CacheFrame *frame = new CacheFrame();
450                         frame->frame = allocFrameRGB();
451                         BLI_addtail(&m_frameCacheFree, frame);
452                 }
453                 for (int i=0; i<CACHE_PACKET_SIZE; i++) 
454                 {
455                         CachePacket *packet = new CachePacket();
456                         BLI_addtail(&m_packetCacheFree, packet);
457                 }
458                 BLI_init_threads(&m_thread, cacheThread, 1);
459                 BLI_insert_thread(&m_thread, this);
460                 m_cacheStarted = true;
461         }
462         return m_cacheStarted;
463 }
464
465 void VideoFFmpeg::stopCache()
466 {
467         if (m_cacheStarted)
468         {
469                 m_stopThread = true;
470                 BLI_end_threads(&m_thread);
471                 // now delete the cache
472                 CacheFrame *frame;
473                 CachePacket *packet;
474                 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
475                 {
476                         BLI_remlink(&m_frameCacheBase, frame);
477                         MEM_freeN(frame->frame->data[0]);
478                         av_free(frame->frame);
479                         delete frame;
480                 }
481                 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
482                 {
483                         BLI_remlink(&m_frameCacheFree, frame);
484                         MEM_freeN(frame->frame->data[0]);
485                         av_free(frame->frame);
486                         delete frame;
487                 }
488                 while((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
489                 {
490                         BLI_remlink(&m_packetCacheBase, packet);
491                         av_free_packet(&packet->packet);
492                         delete packet;
493                 }
494                 while((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
495                 {
496                         BLI_remlink(&m_packetCacheFree, packet);
497                         delete packet;
498                 }
499                 m_cacheStarted = false;
500         }
501 }
502
503 void VideoFFmpeg::releaseFrame(AVFrame* frame)
504 {
505         if (frame == m_frameRGB)
506         {
507                 // this is not a frame from the cache, ignore
508                 return;
509         }
510         // this frame MUST be the first one of the queue
511         pthread_mutex_lock(&m_cacheMutex);
512         CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
513         assert (cacheFrame != NULL && cacheFrame->frame == frame);
514         BLI_remlink(&m_frameCacheBase, cacheFrame);
515         BLI_addtail(&m_frameCacheFree, cacheFrame);
516         pthread_mutex_unlock(&m_cacheMutex);
517 }
518
519 // open video file
520 void VideoFFmpeg::openFile (char * filename)
521 {
522         if (openStream(filename, NULL, NULL) != 0)
523                 return;
524
525         if (m_codecCtx->gop_size)
526                 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
527         else if (m_codecCtx->has_b_frames)              
528                 m_preseek = 25; // should determine gopsize
529         else
530                 m_preseek = 0;
531
532         // get video time range
533         m_range[0] = 0.0;
534         m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
535
536         // open base class
537         VideoBase::openFile(filename);
538
539         if (
540                 // ffmpeg reports that http source are actually non stream
541                 // but it is really not desirable to seek on http file, so force streaming.
542                 // It would be good to find this information from the context but there are no simple indication
543                 !strncmp(filename, "http://", 7) ||
544                 (m_formatCtx->pb && !m_formatCtx->pb->seekable)
545                 )
546         {
547                 // the file is in fact a streaming source, treat as cam to prevent seeking
548                 m_isFile = false;
549                 // but it's not handled exactly like a camera.
550                 m_isStreaming = true;
551                 // for streaming it is important to do non blocking read
552                 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
553         }
554
555         if (m_isImage) 
556         {
557                 // the file is to be treated as an image, i.e. load the first frame only
558                 m_isFile = false;
559                 // in case of reload, the filename is taken from m_imageName, no need to change it
560                 if (m_imageName.Ptr() != filename)
561                         m_imageName = filename;
562                 m_preseek = 0;
563                 m_avail = false;
564                 play();
565         }
566         // check if we should do multi-threading?
567         if (!m_isImage && BLI_system_thread_count() > 1)
568         {
569                 // never thread image: there are no frame to read ahead
570                 // no need to thread if the system has a single core
571                 m_isThreaded =  true;
572         }
573 }
574
575
576 // open video capture device
577 void VideoFFmpeg::openCam (char * file, short camIdx)
578 {
579         // open camera source
580         AVInputFormat           *inputFormat;
581         AVDictionary            *formatParams = NULL;
582         char                            filename[28], rateStr[20];
583         char                *p;
584
585 #ifdef WIN32
586         // video capture on windows only through Video For Windows driver
587         inputFormat = av_find_input_format("vfwcap");
588         if (!inputFormat)
589                 // Video For Windows not supported??
590                 return;
591         sprintf(filename, "%d", camIdx);
592 #else
593         // In Linux we support two types of devices: VideoForLinux and DV1394. 
594         // the user specify it with the filename:
595         // [<device_type>][:<standard>]
596         // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
597         // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
598         // The driver name is constructed automatically from the device type:
599         // v4l   : /dev/video<camIdx>
600         // dv1394: /dev/dv1394/<camIdx>
601         // If you have different driver name, you can specify the driver name explicitly
602         // instead of device type. Examples of valid filename:
603         //    /dev/v4l/video0:pal
604         //    /dev/ieee1394/1:ntsc
605         //    dv1394:secam
606         //    v4l:pal
607         if (file && strstr(file, "1394") != NULL) 
608         {
609                 // the user specifies a driver, check if it is v4l or d41394
610                 inputFormat = av_find_input_format("dv1394");
611                 sprintf(filename, "/dev/dv1394/%d", camIdx);
612         } else 
613         {
614                 const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"};
615                 int i, formatsCount = sizeof(formats) / sizeof(char*);
616                 for (i = 0; i < formatsCount; i++) {
617                         inputFormat = av_find_input_format(formats[i]);
618                         if (inputFormat)
619                                 break;
620                 }
621                 sprintf(filename, "/dev/video%d", camIdx);
622         }
623         if (!inputFormat)
624                 // these format should be supported, check ffmpeg compilation
625                 return;
626         if (file && strncmp(file, "/dev", 4) == 0) 
627         {
628                 // user does not specify a driver
629                 strncpy(filename, file, sizeof(filename));
630                 filename[sizeof(filename)-1] = 0;
631                 if ((p = strchr(filename, ':')) != 0)
632                         *p = 0;
633         }
634         if (file && (p = strchr(file, ':')) != NULL) {
635                 av_dict_set(&formatParams, "standard", p+1, 0);
636         }
637 #endif
638         //frame rate
639         if (m_captRate <= 0.f)
640                 m_captRate = defFrameRate;
641         sprintf(rateStr, "%f", m_captRate);
642
643         av_dict_set(&formatParams, "framerate", rateStr, 0);
644
645         if (m_captWidth > 0 && m_captHeight > 0) {
646                 char video_size[64];
647                 BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight);
648                 av_dict_set(&formatParams, "video_size", video_size, 0);
649         }
650
651         if (openStream(filename, inputFormat, &formatParams) != 0)
652                 return;
653
654         // for video capture it is important to do non blocking read
655         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
656         // open base class
657         VideoBase::openCam(file, camIdx);
658         // check if we should do multi-threading?
659         if (BLI_system_thread_count() > 1)
660         {
661                 // no need to thread if the system has a single core
662                 m_isThreaded =  true;
663         }
664
665         av_dict_free(&formatParams);
666 }
667
668 // play video
669 bool VideoFFmpeg::play (void)
670 {
671         try
672         {
673                 // if object is able to play
674                 if (VideoBase::play())
675                 {
676                         // set video position
677                         setPositions();
678                         // return success
679                         return true;
680                 }
681         }
682         CATCH_EXCP;
683         return false;
684 }
685
686
687 // pause video
688 bool VideoFFmpeg::pause (void)
689 {
690         try
691         {
692                 if (VideoBase::pause())
693                 {
694                         return true;
695                 }
696         }
697         CATCH_EXCP;
698         return false;
699 }
700
701 // stop video
702 bool VideoFFmpeg::stop (void)
703 {
704         try
705         {
706                 VideoBase::stop();
707                 // force restart when play
708                 m_lastFrame = -1;
709                 return true;
710         }
711         CATCH_EXCP;
712         return false;
713 }
714
715
716 // set video range
717 void VideoFFmpeg::setRange (double start, double stop)
718 {
719         try
720         {
721                 // set range
722                 if (m_isFile)
723                 {
724                         VideoBase::setRange(start, stop);
725                         // set range for video
726                         setPositions();
727                 }
728         }
729         CATCH_EXCP;
730 }
731
732 // set framerate
733 void VideoFFmpeg::setFrameRate (float rate)
734 {
735         VideoBase::setFrameRate(rate);
736 }
737
738
739 // image calculation
740 // load frame from video
741 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
742 {
743         if (m_status == SourcePlaying)
744         {
745                 // get actual time
746                 double startTime = PIL_check_seconds_timer();
747                 double actTime;
748                 // timestamp passed from audio actuators can sometimes be slightly negative
749                 if (m_isFile && ts >= -0.5)
750                 {
751                         // allow setting timestamp only when not streaming
752                         actTime = ts;
753                         if (actTime * actFrameRate() < m_lastFrame) 
754                         {
755                                 // user is asking to rewind, force a cache clear to make sure we will do a seek
756                                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
757                                 stopCache();
758                         }
759                 }
760                 else
761                 {
762                         if (m_lastFrame == -1 && !m_isFile)
763                                 m_startTime = startTime;
764                         actTime = startTime - m_startTime;
765                 }
766                 // if video has ended
767                 if (m_isFile && actTime * m_frameRate >= m_range[1])
768                 {
769                         // in any case, this resets the cache
770                         stopCache();
771                         // if repeats are set, decrease them
772                         if (m_repeat > 0) 
773                                 --m_repeat;
774                         // if video has to be replayed
775                         if (m_repeat != 0)
776                         {
777                                 // reset its position
778                                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
779                                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
780                         }
781                         // if video has to be stopped, stop it
782                         else 
783                         {
784                                 m_status = SourceStopped;
785                                 return;
786                         }
787                 }
788                 // actual frame
789                 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
790                 // if actual frame differs from last frame
791                 if (actFrame != m_lastFrame)
792                 {
793                         AVFrame* frame;
794                         // get image
795                         if ((frame = grabFrame(actFrame)) != NULL)
796                         {
797                                 if (!m_isFile && !m_cacheStarted) 
798                                 {
799                                         // streaming without cache: detect synchronization problem
800                                         double execTime = PIL_check_seconds_timer() - startTime;
801                                         if (execTime > 0.005) 
802                                         {
803                                                 // exec time is too long, it means that the function was blocking
804                                                 // resynchronize the stream from this time
805                                                 m_startTime += execTime;
806                                         }
807                                 }
808                                 // save actual frame
809                                 m_lastFrame = actFrame;
810                                 // init image, if needed
811                                 init(short(m_codecCtx->width), short(m_codecCtx->height));
812                                 // process image
813                                 process((BYTE*)(frame->data[0]));
814                                 // finished with the frame, release it so that cache can reuse it
815                                 releaseFrame(frame);
816                                 // in case it is an image, automatically stop reading it
817                                 if (m_isImage)
818                                 {
819                                         m_status = SourceStopped;
820                                         // close the file as we don't need it anymore
821                                         release();
822                                 }
823                         } else if (m_isStreaming)
824                         {
825                                 // we didn't get a frame and we are streaming, this may be due to
826                                 // a delay in the network or because we are getting the frame too fast.
827                                 // In the later case, shift time by a small amount to compensate for a drift
828                                 m_startTime += 0.001;
829                         }
830                 }
831         }
832 }
833
834
835 // set actual position
836 void VideoFFmpeg::setPositions (void)
837 {
838         // set video start time
839         m_startTime = PIL_check_seconds_timer();
840         // if file is played and actual position is before end position
841         if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
842                 // continue from actual position
843                 m_startTime -= double(m_lastFrame) / actFrameRate();
844         else {
845                 m_startTime -= m_range[0];
846                 // start from beginning, stop cache just in case
847                 stopCache();
848         }
849 }
850
851 // position pointer in file, position in second
852 AVFrame *VideoFFmpeg::grabFrame(long position)
853 {
854         AVPacket packet;
855         int frameFinished;
856         int posFound = 1;
857         bool frameLoaded = false;
858         int64_t targetTs = 0;
859         CacheFrame *frame;
860         int64_t dts = 0;
861
862         if (m_cacheStarted)
863         {
864                 // when cache is active, we must not read the file directly
865                 do {
866                         pthread_mutex_lock(&m_cacheMutex);
867                         frame = (CacheFrame *)m_frameCacheBase.first;
868                         pthread_mutex_unlock(&m_cacheMutex);
869                         // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
870                         if (frame == NULL)
871                         {
872                                 // no frame in cache, in case of file it is an abnormal situation
873                                 if (m_isFile)
874                                 {
875                                         // go back to no threaded reading
876                                         stopCache();
877                                         break;
878                                 }
879                                 return NULL;
880                         }
881                         if (frame->framePosition == -1) 
882                         {
883                                 // this frame mark the end of the file (only used for file)
884                                 // leave in cache to make sure we don't miss it
885                                 m_eof = true;
886                                 return NULL;
887                         }
888                         // for streaming, always return the next frame, 
889                         // that's what grabFrame does in non cache mode anyway.
890                         if (m_isStreaming || frame->framePosition == position)
891                         {
892                                 return frame->frame;
893                         }
894                         // for cam, skip old frames to keep image realtime.
895                         // There should be no risk of clock drift since it all happens on the same CPU
896                         if (frame->framePosition > position) 
897                         {
898                                 // this can happen after rewind if the seek didn't find the first frame
899                                 // the frame in the buffer is ahead of time, just leave it there
900                                 return NULL;
901                         }
902                         // this frame is not useful, release it
903                         pthread_mutex_lock(&m_cacheMutex);
904                         BLI_remlink(&m_frameCacheBase, frame);
905                         BLI_addtail(&m_frameCacheFree, frame);
906                         pthread_mutex_unlock(&m_cacheMutex);
907                 } while (true);
908         }
909         double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
910         int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
911         if (startTs == AV_NOPTS_VALUE)
912                 startTs = 0;
913
914         // come here when there is no cache or cache has been stopped
915         // locate the frame, by seeking if necessary (seeking is only possible for files)
916         if (m_isFile)
917         {
918                 // first check if the position that we are looking for is in the preseek range
919                 // if so, just read the frame until we get there
920                 if (position > m_curPosition + 1 
921                         && m_preseek 
922                         && position - (m_curPosition + 1) < m_preseek) 
923                 {
924                         while(av_read_frame(m_formatCtx, &packet)>=0) 
925                         {
926                                 if (packet.stream_index == m_videoStream) 
927                                 {
928                                         avcodec_decode_video2(
929                                                 m_codecCtx, 
930                                                 m_frame, &frameFinished, 
931                                                 &packet);
932                                         if (frameFinished)
933                                         {
934                                                 m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
935                                         }
936                                 }
937                                 av_free_packet(&packet);
938                                 if (position == m_curPosition+1)
939                                         break;
940                         }
941                 }
942                 // if the position is not in preseek, do a direct jump
943                 if (position != m_curPosition + 1) 
944                 { 
945                         int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
946
947                         if (pos < 0)
948                                 pos = 0;
949
950                         pos += startTs;
951
952                         if (position <= m_curPosition || !m_eof)
953                         {
954 #if 0
955                                 // Tried to make this work but couldn't: seeking on byte is ignored by the
956                                 // format plugin and it will generally continue to read from last timestamp.
957                                 // Too bad because frame seek is not always able to get the first frame
958                                 // of the file.
959                                 if (position <= m_preseek)
960                                 {
961                                         // we can safely go the beginning of the file
962                                         if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
963                                         {
964                                                 // binary seek does not reset the timestamp, must do it now
965                                                 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
966                                                 m_curPosition = 0;
967                                         }
968                                 }
969                                 else
970 #endif
971                                 {
972                                         // current position is now lost, guess a value. 
973                                         if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
974                                         {
975                                                 // current position is now lost, guess a value. 
976                                                 // It's not important because it will be set at this end of this function
977                                                 m_curPosition = position - m_preseek - 1;
978                                         }
979                                 }
980                         }
981                         // this is the timestamp of the frame we're looking for
982                         targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
983
984                         posFound = 0;
985                         avcodec_flush_buffers(m_codecCtx);
986                 }
987         } else if (m_isThreaded)
988         {
989                 // cache is not started but threading is possible
990                 // better not read the stream => make take some time, better start caching
991                 if (startCache())
992                         return NULL;
993                 // Abnormal!!! could not start cache, fall back on direct read
994                 m_isThreaded = false;
995         }
996
997         // find the correct frame, in case of streaming and no cache, it means just
998         // return the next frame. This is not quite correct, may need more work
999         while(av_read_frame(m_formatCtx, &packet)>=0) 
1000         {
1001                 if (packet.stream_index == m_videoStream) 
1002                 {
1003                         avcodec_decode_video2(m_codecCtx, 
1004                                 m_frame, &frameFinished, 
1005                                 &packet);
1006                         // remember dts to compute exact frame number
1007                         dts = packet.dts;
1008                         if (frameFinished && !posFound) 
1009                         {
1010                                 if (dts >= targetTs)
1011                                 {
1012                                         posFound = 1;
1013                                 }
1014                         } 
1015
1016                         if (frameFinished && posFound == 1) 
1017                         {
1018                                 AVFrame * input = m_frame;
1019
1020                                 /* This means the data wasnt read properly, 
1021                                 this check stops crashing */
1022                                 if (   input->data[0]==0 && input->data[1]==0 
1023                                         && input->data[2]==0 && input->data[3]==0)
1024                                 {
1025                                         av_free_packet(&packet);
1026                                         break;
1027                                 }
1028
1029                                 if (m_deinterlace) 
1030                                 {
1031                                         if (avpicture_deinterlace(
1032                                                 (AVPicture*) m_frameDeinterlaced,
1033                                                 (const AVPicture*) m_frame,
1034                                                 m_codecCtx->pix_fmt,
1035                                                 m_codecCtx->width,
1036                                                 m_codecCtx->height) >= 0)
1037                                         {
1038                                                 input = m_frameDeinterlaced;
1039                                         }
1040                                 }
1041                                 // convert to RGB24
1042                                 sws_scale(m_imgConvertCtx,
1043                                         input->data,
1044                                         input->linesize,
1045                                         0,
1046                                         m_codecCtx->height,
1047                                         m_frameRGB->data,
1048                                         m_frameRGB->linesize);
1049                                 av_free_packet(&packet);
1050                                 frameLoaded = true;
1051                                 break;
1052                         }
1053                 }
1054                 av_free_packet(&packet);
1055         }
1056         m_eof = m_isFile && !frameLoaded;
1057         if (frameLoaded)
1058         {
1059                 m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
1060                 if (m_isThreaded)
1061                 {
1062                         // normal case for file: first locate, then start cache
1063                         if (!startCache())
1064                         {
1065                                 // Abnormal!! could not start cache, return to non-cache mode
1066                                 m_isThreaded = false;
1067                         }
1068                 }
1069                 return m_frameRGB;
1070         }
1071         return NULL;
1072 }
1073
1074
1075 // python methods
1076
1077
1078 // cast Image pointer to VideoFFmpeg
1079 inline VideoFFmpeg * getVideoFFmpeg (PyImage * self)
1080 { return static_cast<VideoFFmpeg*>(self->m_image); }
1081
1082
1083 // object initialization
1084 static int VideoFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1085 {
1086         PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1087         // parameters - video source
1088         // file name or format type for capture (only for Linux: video4linux or dv1394)
1089         char * file = NULL;
1090         // capture device number
1091         short capt = -1;
1092         // capture width, only if capt is >= 0
1093         short width = 0;
1094         // capture height, only if capt is >= 0
1095         short height = 0;
1096         // capture rate, only if capt is >= 0
1097         float rate = 25.f;
1098
1099         static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1100
1101         // get parameters
1102         if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
1103                 const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
1104                 return -1; 
1105
1106         try
1107         {
1108                 // create video object
1109                 Video_init<VideoFFmpeg>(self);
1110
1111                 // set thread usage
1112                 getVideoFFmpeg(self)->initParams(width, height, rate);
1113
1114                 // open video source
1115                 Video_open(getVideo(self), file, capt);
1116         }
1117         catch (Exception & exp)
1118         {
1119                 exp.report();
1120                 return -1;
1121         }
1122         // initialization succeded
1123         return 0;
1124 }
1125
1126 PyObject * VideoFFmpeg_getPreseek (PyImage *self, void * closure)
1127 {
1128         return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1129 }
1130
1131 // set range
1132 int VideoFFmpeg_setPreseek (PyImage * self, PyObject * value, void * closure)
1133 {
1134         // check validity of parameter
1135         if (value == NULL || !PyLong_Check(value))
1136         {
1137                 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1138                 return -1;
1139         }
1140         // set preseek
1141         getFFmpeg(self)->setPreseek(PyLong_AsSsize_t(value));
1142         // success
1143         return 0;
1144 }
1145
1146 // get deinterlace
1147 PyObject * VideoFFmpeg_getDeinterlace (PyImage * self, void * closure)
1148 {
1149         if (getFFmpeg(self)->getDeinterlace())
1150                 Py_RETURN_TRUE;
1151         else
1152                 Py_RETURN_FALSE;
1153 }
1154
1155 // set flip
1156 int VideoFFmpeg_setDeinterlace (PyImage * self, PyObject * value, void * closure)
1157 {
1158         // check parameter, report failure
1159         if (value == NULL || !PyBool_Check(value))
1160         {
1161                 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1162                 return -1;
1163         }
1164         // set deinterlace
1165         getFFmpeg(self)->setDeinterlace(value == Py_True);
1166         // success
1167         return 0;
1168 }
1169
1170 // methods structure
1171 static PyMethodDef videoMethods[] =
1172 { // methods from VideoBase class
1173         {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1174         {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1175         {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1176         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1177         {NULL}
1178 };
1179 // attributes structure
1180 static PyGetSetDef videoGetSets[] =
1181 { // methods from VideoBase class
1182         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1183         {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1184         {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1185         {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1186         // attributes from ImageBase class
1187         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1188         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1189         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1190         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1191         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1192         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1193         {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1194         {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1195         {NULL}
1196 };
1197
1198 // python type declaration
1199 PyTypeObject VideoFFmpegType =
1200
1201         PyVarObject_HEAD_INIT(NULL, 0)
1202         "VideoTexture.VideoFFmpeg",   /*tp_name*/
1203         sizeof(PyImage),          /*tp_basicsize*/
1204         0,                         /*tp_itemsize*/
1205         (destructor)Image_dealloc, /*tp_dealloc*/
1206         0,                         /*tp_print*/
1207         0,                         /*tp_getattr*/
1208         0,                         /*tp_setattr*/
1209         0,                         /*tp_compare*/
1210         0,                         /*tp_repr*/
1211         0,                         /*tp_as_number*/
1212         0,                         /*tp_as_sequence*/
1213         0,                         /*tp_as_mapping*/
1214         0,                         /*tp_hash */
1215         0,                         /*tp_call*/
1216         0,                         /*tp_str*/
1217         0,                         /*tp_getattro*/
1218         0,                         /*tp_setattro*/
1219         &imageBufferProcs,         /*tp_as_buffer*/
1220         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1221         "FFmpeg video source",       /* tp_doc */
1222         0,                             /* tp_traverse */
1223         0,                             /* tp_clear */
1224         0,                             /* tp_richcompare */
1225         0,                             /* tp_weaklistoffset */
1226         0,                             /* tp_iter */
1227         0,                             /* tp_iternext */
1228         videoMethods,    /* tp_methods */
1229         0,                   /* tp_members */
1230         videoGetSets,          /* tp_getset */
1231         0,                         /* tp_base */
1232         0,                         /* tp_dict */
1233         0,                         /* tp_descr_get */
1234         0,                         /* tp_descr_set */
1235         0,                         /* tp_dictoffset */
1236         (initproc)VideoFFmpeg_init,     /* tp_init */
1237         0,                         /* tp_alloc */
1238         Image_allocNew,           /* tp_new */
1239 };
1240
1241 // object initialization
1242 static int ImageFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1243 {
1244         PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1245         // parameters - video source
1246         // file name or format type for capture (only for Linux: video4linux or dv1394)
1247         char * file = NULL;
1248
1249         // get parameters
1250         if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1251                 return -1; 
1252
1253         try
1254         {
1255                 // create video object
1256                 Video_init<VideoFFmpeg>(self);
1257
1258                 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1259
1260                 // open video source
1261                 Video_open(getVideo(self), file, -1);
1262         }
1263         catch (Exception & exp)
1264         {
1265                 exp.report();
1266                 return -1;
1267         }
1268         // initialization succeded
1269         return 0;
1270 }
1271
1272 PyObject * Image_reload (PyImage * self, PyObject *args)
1273 {
1274         char * newname = NULL;
1275         if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1276                 return NULL;
1277         if (self->m_image != NULL)
1278         {
1279                 VideoFFmpeg* video = getFFmpeg(self);
1280                 // check type of object
1281                 if (!newname)
1282                         newname = video->getImageName();
1283                 if (!newname) {
1284                         // if not set, retport error
1285                         PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1286                         return NULL;
1287                 }
1288                 // make sure the previous file is cleared
1289                 video->release();
1290                 // open the new file
1291                 video->openFile(newname);
1292         }
1293         Py_RETURN_NONE;
1294 }
1295
1296 // methods structure
1297 static PyMethodDef imageMethods[] =
1298 { // methods from VideoBase class
1299         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1300         {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1301         {NULL}
1302 };
1303 // attributes structure
1304 static PyGetSetDef imageGetSets[] =
1305 { // methods from VideoBase class
1306         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1307         // attributes from ImageBase class
1308         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1309         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1310         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1311         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbor)", NULL},
1312         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1313         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1314         {NULL}
1315 };
1316
1317 // python type declaration
1318 PyTypeObject ImageFFmpegType =
1319
1320         PyVarObject_HEAD_INIT(NULL, 0)
1321         "VideoTexture.ImageFFmpeg",   /*tp_name*/
1322         sizeof(PyImage),          /*tp_basicsize*/
1323         0,                         /*tp_itemsize*/
1324         (destructor)Image_dealloc, /*tp_dealloc*/
1325         0,                         /*tp_print*/
1326         0,                         /*tp_getattr*/
1327         0,                         /*tp_setattr*/
1328         0,                         /*tp_compare*/
1329         0,                         /*tp_repr*/
1330         0,                         /*tp_as_number*/
1331         0,                         /*tp_as_sequence*/
1332         0,                         /*tp_as_mapping*/
1333         0,                         /*tp_hash */
1334         0,                         /*tp_call*/
1335         0,                         /*tp_str*/
1336         0,                         /*tp_getattro*/
1337         0,                         /*tp_setattro*/
1338         &imageBufferProcs,         /*tp_as_buffer*/
1339         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1340         "FFmpeg image source",       /* tp_doc */
1341         0,                             /* tp_traverse */
1342         0,                             /* tp_clear */
1343         0,                             /* tp_richcompare */
1344         0,                             /* tp_weaklistoffset */
1345         0,                             /* tp_iter */
1346         0,                             /* tp_iternext */
1347         imageMethods,    /* tp_methods */
1348         0,                   /* tp_members */
1349         imageGetSets,          /* tp_getset */
1350         0,                         /* tp_base */
1351         0,                         /* tp_dict */
1352         0,                         /* tp_descr_get */
1353         0,                         /* tp_descr_set */
1354         0,                         /* tp_dictoffset */
1355         (initproc)ImageFFmpeg_init,     /* tp_init */
1356         0,                         /* tp_alloc */
1357         Image_allocNew,           /* tp_new */
1358 };
1359
1360 #endif  //WITH_FFMPEG
1361
1362