VideoTexture: clean previous commit.
[blender.git] / source / gameengine / VideoTexture / VideoFFmpeg.cpp
1 /* $Id$
2 -----------------------------------------------------------------------------
3 This source file is part of VideoTexture library
4
5 Copyright (c) 2007 The Zdeno Ash Miklas
6
7 This program is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the Free Software
9 Foundation; either version 2 of the License, or (at your option) any later
10 version.
11
12 This program is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 Place - Suite 330, Boston, MA 02111-1307, USA, or go to
19 http://www.gnu.org/copyleft/lesser.txt.
20 -----------------------------------------------------------------------------
21 */
22
23 #ifdef WITH_FFMPEG
24
25 // INT64_C fix for some linux machines (C99ism)
26 #define __STDC_CONSTANT_MACROS
27 #include <stdint.h>
28
29
30 #include "MEM_guardedalloc.h"
31 #include "PIL_time.h"
32
33 #include <string>
34
35 #include "Exception.h"
36 #include "VideoFFmpeg.h"
37
38
39 // default framerate
40 const double defFrameRate = 25.0;
41 // time scale constant
42 const long timeScale = 1000;
43
44 // macro for exception handling and logging
45 #define CATCH_EXCP catch (Exception & exp) \
46 { exp.report(); m_status = SourceError; }
47
48 extern "C" void do_init_ffmpeg();
49
50 // class RenderVideo
51
52 // constructor
53 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(), 
54 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL), 
55 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
56 m_deinterlace(false), m_preseek(0),     m_videoStream(-1), m_baseFrameRate(25.0),
57 m_lastFrame(-1),  m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0), 
58 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
59 m_isThreaded(false), m_isStreaming(false), m_stopThread(false), m_cacheStarted(false)
60 {
61         // set video format
62         m_format = RGB24;
63         // force flip because ffmpeg always return the image in the wrong orientation for texture
64         setFlip(true);
65         // construction is OK
66         *hRslt = S_OK;
67         m_thread.first = m_thread.last = NULL;
68         pthread_mutex_init(&m_cacheMutex, NULL);
69         m_frameCacheFree.first = m_frameCacheFree.last = NULL;
70         m_frameCacheBase.first = m_frameCacheBase.last = NULL;
71         m_packetCacheFree.first = m_packetCacheFree.last = NULL;
72         m_packetCacheBase.first = m_packetCacheBase.last = NULL;
73 }
74
75 // destructor
76 VideoFFmpeg::~VideoFFmpeg () 
77 {
78 }
79
80
81 // release components
82 bool VideoFFmpeg::release()
83 {
84         // release
85         stopCache();
86         if (m_codecCtx)
87         {
88                 avcodec_close(m_codecCtx);
89                 m_codecCtx = NULL;
90         }
91         if (m_formatCtx)
92         {
93                 av_close_input_file(m_formatCtx);
94                 m_formatCtx = NULL;
95         }
96         if (m_frame)
97         {
98                 av_free(m_frame);
99                 m_frame = NULL;
100         }
101         if (m_frameDeinterlaced)
102         {
103                 MEM_freeN(m_frameDeinterlaced->data[0]);
104                 av_free(m_frameDeinterlaced);
105                 m_frameDeinterlaced = NULL;
106         }
107         if (m_frameRGB)
108         {
109                 MEM_freeN(m_frameRGB->data[0]);
110                 av_free(m_frameRGB);
111                 m_frameRGB = NULL;
112         }
113         if (m_imgConvertCtx)
114         {
115                 sws_freeContext(m_imgConvertCtx);
116                 m_imgConvertCtx = NULL;
117         }
118         m_codec = NULL;
119         m_status = SourceStopped;
120         m_lastFrame = -1;
121         return true;
122 }
123
124 AVFrame *VideoFFmpeg::allocFrameRGB()
125 {
126         AVFrame *frame;
127         frame = avcodec_alloc_frame();
128         if (m_format == RGBA32)
129         {
130                 avpicture_fill((AVPicture*)frame, 
131                         (uint8_t*)MEM_callocN(avpicture_get_size(
132                                 PIX_FMT_RGBA,
133                                 m_codecCtx->width, m_codecCtx->height),
134                                 "ffmpeg rgba"),
135                         PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
136         } else 
137         {
138                 avpicture_fill((AVPicture*)frame, 
139                         (uint8_t*)MEM_callocN(avpicture_get_size(
140                                 PIX_FMT_RGB24,
141                                 m_codecCtx->width, m_codecCtx->height),
142                                 "ffmpeg rgb"),
143                         PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
144         }
145         return frame;
146 }
147
148 // set initial parameters
149 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
150 {
151         m_captWidth = width;
152         m_captHeight = height;
153         m_captRate = rate;
154         m_isImage = image;
155 }
156
157
158 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVFormatParameters *formatParams)
159 {
160         AVFormatContext *formatCtx;
161         int                             i, videoStream;
162         AVCodec                 *codec;
163         AVCodecContext  *codecCtx;
164
165         if(av_open_input_file(&formatCtx, filename, inputFormat, 0, formatParams)!=0)
166                 return -1;
167
168         if(av_find_stream_info(formatCtx)<0) 
169         {
170                 av_close_input_file(formatCtx);
171                 return -1;
172         }
173
174         /* Find the first video stream */
175         videoStream=-1;
176         for(i=0; i<formatCtx->nb_streams; i++)
177         {
178                 if(formatCtx->streams[i] &&
179                         get_codec_from_stream(formatCtx->streams[i]) && 
180                         (get_codec_from_stream(formatCtx->streams[i])->codec_type==CODEC_TYPE_VIDEO))
181                 {
182                         videoStream=i;
183                         break;
184                 }
185         }
186
187         if(videoStream==-1) 
188         {
189                 av_close_input_file(formatCtx);
190                 return -1;
191         }
192
193         codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
194
195         /* Find the decoder for the video stream */
196         codec=avcodec_find_decoder(codecCtx->codec_id);
197         if(codec==NULL) 
198         {
199                 av_close_input_file(formatCtx);
200                 return -1;
201         }
202         codecCtx->workaround_bugs = 1;
203         if(avcodec_open(codecCtx, codec)<0) 
204         {
205                 av_close_input_file(formatCtx);
206                 return -1;
207         }
208
209 #ifdef FFMPEG_OLD_FRAME_RATE
210         if(codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
211                 codecCtx->frame_rate_base=1000;
212         m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
213 #else
214         m_baseFrameRate = av_q2d(formatCtx->streams[videoStream]->r_frame_rate);
215 #endif
216         if (m_baseFrameRate <= 0.0) 
217                 m_baseFrameRate = defFrameRate;
218
219         m_codec = codec;
220         m_codecCtx = codecCtx;
221         m_formatCtx = formatCtx;
222         m_videoStream = videoStream;
223         m_frame = avcodec_alloc_frame();
224         m_frameDeinterlaced = avcodec_alloc_frame();
225
226         // allocate buffer if deinterlacing is required
227         avpicture_fill((AVPicture*)m_frameDeinterlaced, 
228                 (uint8_t*)MEM_callocN(avpicture_get_size(
229                 m_codecCtx->pix_fmt,
230                 m_codecCtx->width, m_codecCtx->height), 
231                 "ffmpeg deinterlace"), 
232                 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
233
234         // check if the pixel format supports Alpha
235         if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
236                 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
237                 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
238                 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
239         {
240                 // allocate buffer to store final decoded frame
241                 m_format = RGBA32;
242                 // allocate sws context
243                 m_imgConvertCtx = sws_getContext(
244                         m_codecCtx->width,
245                         m_codecCtx->height,
246                         m_codecCtx->pix_fmt,
247                         m_codecCtx->width,
248                         m_codecCtx->height,
249                         PIX_FMT_RGBA,
250                         SWS_FAST_BILINEAR,
251                         NULL, NULL, NULL);
252         } else
253         {
254                 // allocate buffer to store final decoded frame
255                 m_format = RGB24;
256                 // allocate sws context
257                 m_imgConvertCtx = sws_getContext(
258                         m_codecCtx->width,
259                         m_codecCtx->height,
260                         m_codecCtx->pix_fmt,
261                         m_codecCtx->width,
262                         m_codecCtx->height,
263                         PIX_FMT_RGB24,
264                         SWS_FAST_BILINEAR,
265                         NULL, NULL, NULL);
266         }
267         m_frameRGB = allocFrameRGB();
268
269         if (!m_imgConvertCtx) {
270                 avcodec_close(m_codecCtx);
271                 m_codecCtx = NULL;
272                 av_close_input_file(m_formatCtx);
273                 m_formatCtx = NULL;
274                 av_free(m_frame);
275                 m_frame = NULL;
276                 MEM_freeN(m_frameDeinterlaced->data[0]);
277                 av_free(m_frameDeinterlaced);
278                 m_frameDeinterlaced = NULL;
279                 MEM_freeN(m_frameRGB->data[0]);
280                 av_free(m_frameRGB);
281                 m_frameRGB = NULL;
282                 return -1;
283         }
284         return 0;
285 }
286
287 /*
288  * This thread is used to load video frame asynchronously.
289  * It provides a frame caching service. 
290  * The main thread is responsible for positionning the frame pointer in the
291  * file correctly before calling startCache() which starts this thread.
292  * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
293  * memory and CPU low 2) a cache of 5 decoded frames. 
294  * If the main thread does not find the frame in the cache (because the video has restarted
295  * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
296  * function: it sends a signal to stop the cache thread and wait for confirmation), then
297  * change the position in the stream and restarts the cache thread.
298  */
299 void *VideoFFmpeg::cacheThread(void *data)
300 {
301         VideoFFmpeg* video = (VideoFFmpeg*)data;
302         // holds the frame that is being decoded
303         CacheFrame *currentFrame = NULL;
304         CachePacket *cachePacket;
305         bool endOfFile = false;
306         int frameFinished = 0;
307         double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
308         int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;
309
310         if (startTs == AV_NOPTS_VALUE)
311                 startTs = 0;
312
313         while (!video->m_stopThread)
314         {
315                 // packet cache is used solely by this thread, no need to lock
316                 // In case the stream/file contains other stream than the one we are looking for,
317                 // allow a bit of cycling to get rid quickly of those frames
318                 frameFinished = 0;
319                 while (    !endOfFile 
320                                 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
321                                 && frameFinished < 25)
322                 {
323                         // free packet => packet cache is not full yet, just read more
324                         if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
325                         {
326                                 if (cachePacket->packet.stream_index == video->m_videoStream)
327                                 {
328                                         // make sure fresh memory is allocated for the packet and move it to queue
329                                         av_dup_packet(&cachePacket->packet);
330                                         BLI_remlink(&video->m_packetCacheFree, cachePacket);
331                                         BLI_addtail(&video->m_packetCacheBase, cachePacket);
332                                         break;
333                                 } else {
334                                         // this is not a good packet for us, just leave it on free queue
335                                         // Note: here we could handle sound packet
336                                         av_free_packet(&cachePacket->packet);
337                                         frameFinished++;
338                                 }
339                                 
340                         } else {
341                                 if (video->m_isFile)
342                                         // this mark the end of the file
343                                         endOfFile = true;
344                                 // if we cannot read a packet, no need to continue
345                                 break;
346                         }
347                 }
348                 // frame cache is also used by main thread, lock
349                 if (currentFrame == NULL) 
350                 {
351                         // no current frame being decoded, take free one
352                         pthread_mutex_lock(&video->m_cacheMutex);
353                         if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
354                                 BLI_remlink(&video->m_frameCacheFree, currentFrame);
355                         pthread_mutex_unlock(&video->m_cacheMutex);
356                 }
357                 if (currentFrame != NULL)
358                 {
359                         // this frame is out of free and busy queue, we can manipulate it without locking
360                         frameFinished = 0;
361                         while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
362                         {
363                                 BLI_remlink(&video->m_packetCacheBase, cachePacket);
364                                 // use m_frame because when caching, it is not used in main thread
365                                 // we can't use currentFrame directly because we need to convert to RGB first
366                                 avcodec_decode_video(video->m_codecCtx, 
367                                         video->m_frame, &frameFinished, 
368                                         cachePacket->packet.data, cachePacket->packet.size);
369                                 if(frameFinished) 
370                                 {
371                                         AVFrame * input = video->m_frame;
372
373                                         /* This means the data wasnt read properly, this check stops crashing */
374                                         if (   input->data[0]!=0 || input->data[1]!=0 
375                                                 || input->data[2]!=0 || input->data[3]!=0)
376                                         {
377                                                 if (video->m_deinterlace) 
378                                                 {
379                                                         if (avpicture_deinterlace(
380                                                                 (AVPicture*) video->m_frameDeinterlaced,
381                                                                 (const AVPicture*) video->m_frame,
382                                                                 video->m_codecCtx->pix_fmt,
383                                                                 video->m_codecCtx->width,
384                                                                 video->m_codecCtx->height) >= 0)
385                                                         {
386                                                                 input = video->m_frameDeinterlaced;
387                                                         }
388                                                 }
389                                                 // convert to RGB24
390                                                 sws_scale(video->m_imgConvertCtx,
391                                                         input->data,
392                                                         input->linesize,
393                                                         0,
394                                                         video->m_codecCtx->height,
395                                                         currentFrame->frame->data,
396                                                         currentFrame->frame->linesize);
397                                                 // move frame to queue, this frame is necessarily the next one
398                                                 video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
399                                                 currentFrame->framePosition = video->m_curPosition;
400                                                 pthread_mutex_lock(&video->m_cacheMutex);
401                                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
402                                                 pthread_mutex_unlock(&video->m_cacheMutex);
403                                                 currentFrame = NULL;
404                                         }
405                                 }
406                                 av_free_packet(&cachePacket->packet);
407                                 BLI_addtail(&video->m_packetCacheFree, cachePacket);
408                         } 
409                         if (currentFrame && endOfFile) 
410                         {
411                                 // no more packet and end of file => put a special frame that indicates that
412                                 currentFrame->framePosition = -1;
413                                 pthread_mutex_lock(&video->m_cacheMutex);
414                                 BLI_addtail(&video->m_frameCacheBase, currentFrame);
415                                 pthread_mutex_unlock(&video->m_cacheMutex);
416                                 currentFrame = NULL;
417                                 // no need to stay any longer in this thread
418                                 break;
419                         }
420                 }
421                 // small sleep to avoid unnecessary looping
422                 PIL_sleep_ms(10);
423         }
424         // before quitting, put back the current frame to queue to allow freeing
425         if (currentFrame)
426         {
427                 pthread_mutex_lock(&video->m_cacheMutex);
428                 BLI_addtail(&video->m_frameCacheFree, currentFrame);
429                 pthread_mutex_unlock(&video->m_cacheMutex);
430         }
431         return 0;
432 }
433
434 // start thread to cache video frame from file/capture/stream
435 // this function should be called only when the position in the stream is set for the
436 // first frame to cache
437 bool VideoFFmpeg::startCache()
438 {
439         if (!m_cacheStarted && m_isThreaded)
440         {
441                 m_stopThread = false;
442                 for (int i=0; i<CACHE_FRAME_SIZE; i++)
443                 {
444                         CacheFrame *frame = new CacheFrame();
445                         frame->frame = allocFrameRGB();
446                         BLI_addtail(&m_frameCacheFree, frame);
447                 }
448                 for (int i=0; i<CACHE_PACKET_SIZE; i++) 
449                 {
450                         CachePacket *packet = new CachePacket();
451                         BLI_addtail(&m_packetCacheFree, packet);
452                 }
453                 BLI_init_threads(&m_thread, cacheThread, 1);
454                 BLI_insert_thread(&m_thread, this);
455                 m_cacheStarted = true;
456         }
457         return m_cacheStarted;
458 }
459
460 void VideoFFmpeg::stopCache()
461 {
462         if (m_cacheStarted)
463         {
464                 m_stopThread = true;
465                 BLI_end_threads(&m_thread);
466                 // now delete the cache
467                 CacheFrame *frame;
468                 CachePacket *packet;
469                 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
470                 {
471                         BLI_remlink(&m_frameCacheBase, frame);
472                         MEM_freeN(frame->frame->data[0]);
473                         av_free(frame->frame);
474                         delete frame;
475                 }
476                 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
477                 {
478                         BLI_remlink(&m_frameCacheFree, frame);
479                         MEM_freeN(frame->frame->data[0]);
480                         av_free(frame->frame);
481                         delete frame;
482                 }
483                 while((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
484                 {
485                         BLI_remlink(&m_packetCacheBase, packet);
486                         av_free_packet(&packet->packet);
487                         delete packet;
488                 }
489                 while((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
490                 {
491                         BLI_remlink(&m_packetCacheFree, packet);
492                         delete packet;
493                 }
494                 m_cacheStarted = false;
495         }
496 }
497
498 void VideoFFmpeg::releaseFrame(AVFrame* frame)
499 {
500         if (frame == m_frameRGB)
501         {
502                 // this is not a frame from the cache, ignore
503                 return;
504         }
505         // this frame MUST be the first one of the queue
506         pthread_mutex_lock(&m_cacheMutex);
507         CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
508         assert (cacheFrame != NULL && cacheFrame->frame == frame);
509         BLI_remlink(&m_frameCacheBase, cacheFrame);
510         BLI_addtail(&m_frameCacheFree, cacheFrame);
511         pthread_mutex_unlock(&m_cacheMutex);
512 }
513
514 // open video file
515 void VideoFFmpeg::openFile (char * filename)
516 {
517         do_init_ffmpeg();
518
519         if (openStream(filename, NULL, NULL) != 0)
520                 return;
521
522         if (m_codecCtx->gop_size)
523                 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
524         else if (m_codecCtx->has_b_frames)              
525                 m_preseek = 25; // should determine gopsize
526         else
527                 m_preseek = 0;
528
529         // get video time range
530         m_range[0] = 0.0;
531         m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
532
533         // open base class
534         VideoBase::openFile(filename);
535
536         if (
537                 // ffmpeg reports that http source are actually non stream
538                 // but it is really not desirable to seek on http file, so force streaming.
539                 // It would be good to find this information from the context but there are no simple indication
540                 !strncmp(filename, "http://", 7) ||
541 #ifdef FFMPEG_PB_IS_POINTER
542         (m_formatCtx->pb && m_formatCtx->pb->is_streamed)
543 #else
544         m_formatCtx->pb.is_streamed
545 #endif
546         )
547         {
548                 // the file is in fact a streaming source, treat as cam to prevent seeking
549                 m_isFile = false;
550                 // but it's not handled exactly like a camera.
551                 m_isStreaming = true;
552                 // for streaming it is important to do non blocking read
553                 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
554         }
555
556         if (m_isImage) 
557         {
558                 // the file is to be treated as an image, i.e. load the first frame only
559                 m_isFile = false;
560                 // in case of reload, the filename is taken from m_imageName, no need to change it
561                 if (m_imageName.Ptr() != filename)
562                         m_imageName = filename;
563                 m_preseek = 0;
564                 m_avail = false;
565                 play();
566         }
567         // check if we should do multi-threading?
568         if (!m_isImage && BLI_system_thread_count() > 1)
569         {
570                 // never thread image: there are no frame to read ahead
571                 // no need to thread if the system has a single core
572                 m_isThreaded =  true;
573         }
574 }
575
576
577 // open video capture device
578 void VideoFFmpeg::openCam (char * file, short camIdx)
579 {
580         // open camera source
581         AVInputFormat           *inputFormat;
582         AVFormatParameters      formatParams;
583         AVRational                      frameRate;
584         char                            *p, filename[28], rateStr[20];
585
586         do_init_ffmpeg();
587
588         memset(&formatParams, 0, sizeof(formatParams));
589 #ifdef WIN32
590         // video capture on windows only through Video For Windows driver
591         inputFormat = av_find_input_format("vfwcap");
592         if (!inputFormat)
593                 // Video For Windows not supported??
594                 return;
595         sprintf(filename, "%d", camIdx);
596 #else
597         // In Linux we support two types of devices: VideoForLinux and DV1394. 
598         // the user specify it with the filename:
599         // [<device_type>][:<standard>]
600         // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
601         // <standard>    : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
602         // The driver name is constructed automatically from the device type:
603         // v4l   : /dev/video<camIdx>
604         // dv1394: /dev/dv1394/<camIdx>
605         // If you have different driver name, you can specify the driver name explicitely 
606         // instead of device type. Examples of valid filename:
607         //    /dev/v4l/video0:pal
608         //    /dev/ieee1394/1:ntsc
609         //    dv1394:secam
610         //    v4l:pal
611         if (file && strstr(file, "1394") != NULL) 
612         {
613                 // the user specifies a driver, check if it is v4l or d41394
614                 inputFormat = av_find_input_format("dv1394");
615                 sprintf(filename, "/dev/dv1394/%d", camIdx);
616         } else 
617         {
618                 inputFormat = av_find_input_format("video4linux");
619                 sprintf(filename, "/dev/video%d", camIdx);
620         }
621         if (!inputFormat)
622                 // these format should be supported, check ffmpeg compilation
623                 return;
624         if (file && strncmp(file, "/dev", 4) == 0) 
625         {
626                 // user does not specify a driver
627                 strncpy(filename, file, sizeof(filename));
628                 filename[sizeof(filename)-1] = 0;
629                 if ((p = strchr(filename, ':')) != 0)
630                         *p = 0;
631         }
632         if (file && (p = strchr(file, ':')) != NULL)
633                 formatParams.standard = p+1;
634 #endif
635         //frame rate
636         if (m_captRate <= 0.f)
637                 m_captRate = defFrameRate;
638         sprintf(rateStr, "%f", m_captRate);
639         av_parse_video_frame_rate(&frameRate, rateStr);
640         // populate format parameters
641         // need to specify the time base = inverse of rate
642         formatParams.time_base.num = frameRate.den;
643         formatParams.time_base.den = frameRate.num;
644         formatParams.width = m_captWidth;
645         formatParams.height = m_captHeight;
646
647         if (openStream(filename, inputFormat, &formatParams) != 0)
648                 return;
649
650         // for video capture it is important to do non blocking read
651         m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
652         // open base class
653         VideoBase::openCam(file, camIdx);
654         // check if we should do multi-threading?
655         if (BLI_system_thread_count() > 1)
656         {
657                 // no need to thread if the system has a single core
658                 m_isThreaded =  true;
659         }
660 }
661
662 // play video
663 bool VideoFFmpeg::play (void)
664 {
665         try
666         {
667                 // if object is able to play
668                 if (VideoBase::play())
669                 {
670                         // set video position
671                         setPositions();
672                         // return success
673                         return true;
674                 }
675         }
676         CATCH_EXCP;
677         return false;
678 }
679
680
681 // pause video
682 bool VideoFFmpeg::pause (void)
683 {
684         try
685         {
686                 if (VideoBase::pause())
687                 {
688                         return true;
689                 }
690         }
691         CATCH_EXCP;
692         return false;
693 }
694
695 // stop video
696 bool VideoFFmpeg::stop (void)
697 {
698         try
699         {
700                 VideoBase::stop();
701                 // force restart when play
702                 m_lastFrame = -1;
703                 return true;
704         }
705         CATCH_EXCP;
706         return false;
707 }
708
709
710 // set video range
711 void VideoFFmpeg::setRange (double start, double stop)
712 {
713         try
714         {
715                 // set range
716                 if (m_isFile)
717                 {
718                         VideoBase::setRange(start, stop);
719                         // set range for video
720                         setPositions();
721                 }
722         }
723         CATCH_EXCP;
724 }
725
726 // set framerate
727 void VideoFFmpeg::setFrameRate (float rate)
728 {
729         VideoBase::setFrameRate(rate);
730 }
731
732
733 // image calculation
734 // load frame from video
735 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
736 {
737         if (m_status == SourcePlaying)
738         {
739                 // get actual time
740                 double startTime = PIL_check_seconds_timer();
741                 double actTime;
742                 // timestamp passed from audio actuators can sometimes be slightly negative
743                 if (m_isFile && ts >= -0.5)
744                 {
745                         // allow setting timestamp only when not streaming
746                         actTime = ts;
747                         if (actTime * actFrameRate() < m_lastFrame) 
748                         {
749                                 // user is asking to rewind, force a cache clear to make sure we will do a seek
750                                 // note that this does not decrement m_repeat if ts didn't reach m_range[1]
751                                 stopCache();
752                         }
753                 }
754                 else
755                 {
756                         if (m_lastFrame == -1 && !m_isFile)
757                                 m_startTime = startTime;
758                         actTime = startTime - m_startTime;
759                 }
760                 // if video has ended
761                 if (m_isFile && actTime * m_frameRate >= m_range[1])
762                 {
763                         // in any case, this resets the cache
764                         stopCache();
765                         // if repeats are set, decrease them
766                         if (m_repeat > 0) 
767                                 --m_repeat;
768                         // if video has to be replayed
769                         if (m_repeat != 0)
770                         {
771                                 // reset its position
772                                 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
773                                 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
774                         }
775                         // if video has to be stopped, stop it
776                         else 
777                         {
778                                 m_status = SourceStopped;
779                                 return;
780                         }
781                 }
782                 // actual frame
783                 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
784                 // if actual frame differs from last frame
785                 if (actFrame != m_lastFrame)
786                 {
787                         AVFrame* frame;
788                         // get image
789                         if((frame = grabFrame(actFrame)) != NULL)
790                         {
791                                 if (!m_isFile && !m_cacheStarted) 
792                                 {
793                                         // streaming without cache: detect synchronization problem
794                                         double execTime = PIL_check_seconds_timer() - startTime;
795                                         if (execTime > 0.005) 
796                                         {
797                                                 // exec time is too long, it means that the function was blocking
798                                                 // resynchronize the stream from this time
799                                                 m_startTime += execTime;
800                                         }
801                                 }
802                                 // save actual frame
803                                 m_lastFrame = actFrame;
804                                 // init image, if needed
805                                 init(short(m_codecCtx->width), short(m_codecCtx->height));
806                                 // process image
807                                 process((BYTE*)(frame->data[0]));
808                                 // finished with the frame, release it so that cache can reuse it
809                                 releaseFrame(frame);
810                                 // in case it is an image, automatically stop reading it
811                                 if (m_isImage)
812                                 {
813                                         m_status = SourceStopped;
814                                         // close the file as we don't need it anymore
815                                         release();
816                                 }
817                         } else if (m_isStreaming)
818                         {
819                                 // we didn't get a frame and we are streaming, this may be due to
820                                 // a delay in the network or because we are getting the frame too fast.
821                                 // In the later case, shift time by a small amount to compensate for a drift
822                                 m_startTime += 0.001;
823                         }
824                 }
825         }
826 }
827
828
829 // set actual position
830 void VideoFFmpeg::setPositions (void)
831 {
832         // set video start time
833         m_startTime = PIL_check_seconds_timer();
834         // if file is played and actual position is before end position
835         if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
836                 // continue from actual position
837                 m_startTime -= double(m_lastFrame) / actFrameRate();
838         else {
839                 m_startTime -= m_range[0];
840                 // start from begining, stop cache just in case
841                 stopCache();
842         }
843 }
844
845 // position pointer in file, position in second
846 AVFrame *VideoFFmpeg::grabFrame(long position)
847 {
848         AVPacket packet;
849         int frameFinished;
850         int posFound = 1;
851         bool frameLoaded = false;
852         int64_t targetTs = 0;
853         CacheFrame *frame;
854         int64_t dts = 0;
855
856         if (m_cacheStarted)
857         {
858                 // when cache is active, we must not read the file directly
859                 do {
860                         pthread_mutex_lock(&m_cacheMutex);
861                         frame = (CacheFrame *)m_frameCacheBase.first;
862                         pthread_mutex_unlock(&m_cacheMutex);
863                         // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
864                         if (frame == NULL)
865                         {
866                                 // no frame in cache, in case of file it is an abnormal situation
867                                 if (m_isFile)
868                                 {
869                                         // go back to no threaded reading
870                                         stopCache();
871                                         break;
872                                 }
873                                 return NULL;
874                         }
875                         if (frame->framePosition == -1) 
876                         {
877                                 // this frame mark the end of the file (only used for file)
878                                 // leave in cache to make sure we don't miss it
879                                 m_eof = true;
880                                 return NULL;
881                         }
882                         // for streaming, always return the next frame, 
883                         // that's what grabFrame does in non cache mode anyway.
884                         if (m_isStreaming || frame->framePosition == position)
885                         {
886                                 return frame->frame;
887                         }
888                         // for cam, skip old frames to keep image realtime.
889                         // There should be no risk of clock drift since it all happens on the same CPU
890                         if (frame->framePosition > position) 
891                         {
892                                 // this can happen after rewind if the seek didn't find the first frame
893                                 // the frame in the buffer is ahead of time, just leave it there
894                                 return NULL;
895                         }
896                         // this frame is not useful, release it
897                         pthread_mutex_lock(&m_cacheMutex);
898                         BLI_remlink(&m_frameCacheBase, frame);
899                         BLI_addtail(&m_frameCacheFree, frame);
900                         pthread_mutex_unlock(&m_cacheMutex);
901                 } while (true);
902         }
903         double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
904         int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
905         if (startTs == AV_NOPTS_VALUE)
906                 startTs = 0;
907
908         // come here when there is no cache or cache has been stopped
909         // locate the frame, by seeking if necessary (seeking is only possible for files)
910         if (m_isFile)
911         {
912                 // first check if the position that we are looking for is in the preseek range
913                 // if so, just read the frame until we get there
914                 if (position > m_curPosition + 1 
915                         && m_preseek 
916                         && position - (m_curPosition + 1) < m_preseek) 
917                 {
918                         while(av_read_frame(m_formatCtx, &packet)>=0) 
919                         {
920                                 if (packet.stream_index == m_videoStream) 
921                                 {
922                                         avcodec_decode_video(
923                                                 m_codecCtx, 
924                                                 m_frame, &frameFinished, 
925                                                 packet.data, packet.size);
926                                         if (frameFinished)
927                                         {
928                                                 m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
929                                         }
930                                 }
931                                 av_free_packet(&packet);
932                                 if (position == m_curPosition+1)
933                                         break;
934                         }
935                 }
936                 // if the position is not in preseek, do a direct jump
937                 if (position != m_curPosition + 1) 
938                 { 
939                         int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
940                         int seekres;
941
942                         if (pos < 0)
943                                 pos = 0;
944
945                         pos += startTs;
946
947                         if (position <= m_curPosition || !m_eof)
948                         {
949 #if 0
950                                 // Tried to make this work but couldn't: seeking on byte is ignored by the
951                                 // format plugin and it will generally continue to read from last timestamp.
952                                 // Too bad because frame seek is not always able to get the first frame
953                                 // of the file.
954                                 if (position <= m_preseek)
955                                 {
956                                         // we can safely go the begining of the file
957                                         if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
958                                         {
959                                                 // binary seek does not reset the timestamp, must do it now
960                                                 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
961                                                 m_curPosition = 0;
962                                         }
963                                 }
964                                 else
965 #endif
966                                 {
967                                         // current position is now lost, guess a value. 
968                                         if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
969                                         {
970                                                 // current position is now lost, guess a value. 
971                                                 // It's not important because it will be set at this end of this function
972                                                 m_curPosition = position - m_preseek - 1;
973                                         }
974                                 }
975                         }
976                         // this is the timestamp of the frame we're looking for
977                         targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;
978
979                         posFound = 0;
980                         avcodec_flush_buffers(m_codecCtx);
981                 }
982         } else if (m_isThreaded)
983         {
984                 // cache is not started but threading is possible
985                 // better not read the stream => make take some time, better start caching
986                 if (startCache())
987                         return NULL;
988                 // Abnormal!!! could not start cache, fall back on direct read
989                 m_isThreaded = false;
990         }
991
992         // find the correct frame, in case of streaming and no cache, it means just
993         // return the next frame. This is not quite correct, may need more work
994         while(av_read_frame(m_formatCtx, &packet)>=0) 
995         {
996                 if(packet.stream_index == m_videoStream) 
997                 {
998                         avcodec_decode_video(m_codecCtx, 
999                                 m_frame, &frameFinished, 
1000                                 packet.data, packet.size);
1001                         // remember dts to compute exact frame number
1002                         dts = packet.dts;
1003                         if (frameFinished && !posFound) 
1004                         {
1005                                 if (dts >= targetTs)
1006                                 {
1007                                         posFound = 1;
1008                                 }
1009                         } 
1010
1011                         if (frameFinished && posFound == 1) 
1012                         {
1013                                 AVFrame * input = m_frame;
1014
1015                                 /* This means the data wasnt read properly, 
1016                                 this check stops crashing */
1017                                 if (   input->data[0]==0 && input->data[1]==0 
1018                                         && input->data[2]==0 && input->data[3]==0)
1019                                 {
1020                                         av_free_packet(&packet);
1021                                         break;
1022                                 }
1023
1024                                 if (m_deinterlace) 
1025                                 {
1026                                         if (avpicture_deinterlace(
1027                                                 (AVPicture*) m_frameDeinterlaced,
1028                                                 (const AVPicture*) m_frame,
1029                                                 m_codecCtx->pix_fmt,
1030                                                 m_codecCtx->width,
1031                                                 m_codecCtx->height) >= 0)
1032                                         {
1033                                                 input = m_frameDeinterlaced;
1034                                         }
1035                                 }
1036                                 // convert to RGB24
1037                                 sws_scale(m_imgConvertCtx,
1038                                         input->data,
1039                                         input->linesize,
1040                                         0,
1041                                         m_codecCtx->height,
1042                                         m_frameRGB->data,
1043                                         m_frameRGB->linesize);
1044                                 av_free_packet(&packet);
1045                                 frameLoaded = true;
1046                                 break;
1047                         }
1048                 }
1049                 av_free_packet(&packet);
1050         }
1051         m_eof = m_isFile && !frameLoaded;
1052         if (frameLoaded)
1053         {
1054                 m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
1055                 if (m_isThreaded)
1056                 {
1057                         // normal case for file: first locate, then start cache
1058                         if (!startCache())
1059                         {
1060                                 // Abnormal!! could not start cache, return to non-cache mode
1061                                 m_isThreaded = false;
1062                         }
1063                 }
1064                 return m_frameRGB;
1065         }
1066         return NULL;
1067 }
1068
1069
1070 // python methods
1071
1072
1073 // cast Image pointer to VideoFFmpeg
1074 inline VideoFFmpeg * getVideoFFmpeg (PyImage * self)
1075 { return static_cast<VideoFFmpeg*>(self->m_image); }
1076
1077
1078 // object initialization
1079 static int VideoFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1080 {
1081         PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1082         // parameters - video source
1083         // file name or format type for capture (only for Linux: video4linux or dv1394)
1084         char * file = NULL;
1085         // capture device number
1086         short capt = -1;
1087         // capture width, only if capt is >= 0
1088         short width = 0;
1089         // capture height, only if capt is >= 0
1090         short height = 0;
1091         // capture rate, only if capt is >= 0
1092         float rate = 25.f;
1093
1094         static const char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1095
1096         // get parameters
1097         if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh",
1098                 const_cast<char**>(kwlist), &file, &capt, &rate, &width, &height))
1099                 return -1; 
1100
1101         try
1102         {
1103                 // create video object
1104                 Video_init<VideoFFmpeg>(self);
1105
1106                 // set thread usage
1107                 getVideoFFmpeg(self)->initParams(width, height, rate);
1108
1109                 // open video source
1110                 Video_open(getVideo(self), file, capt);
1111         }
1112         catch (Exception & exp)
1113         {
1114                 exp.report();
1115                 return -1;
1116         }
1117         // initialization succeded
1118         return 0;
1119 }
1120
1121 PyObject * VideoFFmpeg_getPreseek (PyImage *self, void * closure)
1122 {
1123         return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1124 }
1125
1126 // set range
1127 int VideoFFmpeg_setPreseek (PyImage * self, PyObject * value, void * closure)
1128 {
1129         // check validity of parameter
1130         if (value == NULL || !PyLong_Check(value))
1131         {
1132                 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1133                 return -1;
1134         }
1135         // set preseek
1136         getFFmpeg(self)->setPreseek(PyLong_AsSsize_t(value));
1137         // success
1138         return 0;
1139 }
1140
1141 // get deinterlace
1142 PyObject * VideoFFmpeg_getDeinterlace (PyImage * self, void * closure)
1143 {
1144         if (getFFmpeg(self)->getDeinterlace())
1145                 Py_RETURN_TRUE;
1146         else
1147                 Py_RETURN_FALSE;
1148 }
1149
1150 // set flip
1151 int VideoFFmpeg_setDeinterlace (PyImage * self, PyObject * value, void * closure)
1152 {
1153         // check parameter, report failure
1154         if (value == NULL || !PyBool_Check(value))
1155         {
1156                 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1157                 return -1;
1158         }
1159         // set deinterlace
1160         getFFmpeg(self)->setDeinterlace(value == Py_True);
1161         // success
1162         return 0;
1163 }
1164
1165 // methods structure
1166 static PyMethodDef videoMethods[] =
1167 { // methods from VideoBase class
1168         {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1169         {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1170         {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1171         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1172         {NULL}
1173 };
1174 // attributes structure
1175 static PyGetSetDef videoGetSets[] =
1176 { // methods from VideoBase class
1177         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1178         {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1179         {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1180         {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1181         // attributes from ImageBase class
1182         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1183         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1184         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1185         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL},
1186         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1187         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1188         {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1189         {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1190         {NULL}
1191 };
1192
1193 // python type declaration
1194 PyTypeObject VideoFFmpegType =
1195
1196         PyVarObject_HEAD_INIT(NULL, 0)
1197         "VideoTexture.VideoFFmpeg",   /*tp_name*/
1198         sizeof(PyImage),          /*tp_basicsize*/
1199         0,                         /*tp_itemsize*/
1200         (destructor)Image_dealloc, /*tp_dealloc*/
1201         0,                         /*tp_print*/
1202         0,                         /*tp_getattr*/
1203         0,                         /*tp_setattr*/
1204         0,                         /*tp_compare*/
1205         0,                         /*tp_repr*/
1206         0,                         /*tp_as_number*/
1207         0,                         /*tp_as_sequence*/
1208         0,                         /*tp_as_mapping*/
1209         0,                         /*tp_hash */
1210         0,                         /*tp_call*/
1211         0,                         /*tp_str*/
1212         0,                         /*tp_getattro*/
1213         0,                         /*tp_setattro*/
1214         &imageBufferProcs,         /*tp_as_buffer*/
1215         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1216         "FFmpeg video source",       /* tp_doc */
1217         0,                             /* tp_traverse */
1218         0,                             /* tp_clear */
1219         0,                             /* tp_richcompare */
1220         0,                             /* tp_weaklistoffset */
1221         0,                             /* tp_iter */
1222         0,                             /* tp_iternext */
1223         videoMethods,    /* tp_methods */
1224         0,                   /* tp_members */
1225         videoGetSets,          /* tp_getset */
1226         0,                         /* tp_base */
1227         0,                         /* tp_dict */
1228         0,                         /* tp_descr_get */
1229         0,                         /* tp_descr_set */
1230         0,                         /* tp_dictoffset */
1231         (initproc)VideoFFmpeg_init,     /* tp_init */
1232         0,                         /* tp_alloc */
1233         Image_allocNew,           /* tp_new */
1234 };
1235
1236 // object initialization
1237 static int ImageFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1238 {
1239         PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1240         // parameters - video source
1241         // file name or format type for capture (only for Linux: video4linux or dv1394)
1242         char * file = NULL;
1243
1244         // get parameters
1245         if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1246                 return -1; 
1247
1248         try
1249         {
1250                 // create video object
1251                 Video_init<VideoFFmpeg>(self);
1252
1253                 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1254
1255                 // open video source
1256                 Video_open(getVideo(self), file, -1);
1257         }
1258         catch (Exception & exp)
1259         {
1260                 exp.report();
1261                 return -1;
1262         }
1263         // initialization succeded
1264         return 0;
1265 }
1266
1267 PyObject * Image_reload (PyImage * self, PyObject *args)
1268 {
1269         char * newname = NULL;
1270         if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1271                 return NULL;
1272         if (self->m_image != NULL)
1273         {
1274                 VideoFFmpeg* video = getFFmpeg(self);
1275                 // check type of object
1276                 if (!newname)
1277                         newname = video->getImageName();
1278                 if (!newname) {
1279                         // if not set, retport error
1280                         PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1281                         return NULL;
1282                 }
1283                 // make sure the previous file is cleared
1284                 video->release();
1285                 // open the new file
1286                 video->openFile(newname);
1287         }
1288         Py_RETURN_NONE;
1289 }
1290
1291 // methods structure
1292 static PyMethodDef imageMethods[] =
1293 { // methods from VideoBase class
1294         {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1295         {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1296         {NULL}
1297 };
1298 // attributes structure
1299 static PyGetSetDef imageGetSets[] =
1300 { // methods from VideoBase class
1301         {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1302         // attributes from ImageBase class
1303         {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL},
1304         {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1305         {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1306         {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL},
1307         {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1308         {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1309         {NULL}
1310 };
1311
1312 // python type declaration
1313 PyTypeObject ImageFFmpegType =
1314
1315         PyVarObject_HEAD_INIT(NULL, 0)
1316         "VideoTexture.ImageFFmpeg",   /*tp_name*/
1317         sizeof(PyImage),          /*tp_basicsize*/
1318         0,                         /*tp_itemsize*/
1319         (destructor)Image_dealloc, /*tp_dealloc*/
1320         0,                         /*tp_print*/
1321         0,                         /*tp_getattr*/
1322         0,                         /*tp_setattr*/
1323         0,                         /*tp_compare*/
1324         0,                         /*tp_repr*/
1325         0,                         /*tp_as_number*/
1326         0,                         /*tp_as_sequence*/
1327         0,                         /*tp_as_mapping*/
1328         0,                         /*tp_hash */
1329         0,                         /*tp_call*/
1330         0,                         /*tp_str*/
1331         0,                         /*tp_getattro*/
1332         0,                         /*tp_setattro*/
1333         &imageBufferProcs,         /*tp_as_buffer*/
1334         Py_TPFLAGS_DEFAULT,        /*tp_flags*/
1335         "FFmpeg image source",       /* tp_doc */
1336         0,                             /* tp_traverse */
1337         0,                             /* tp_clear */
1338         0,                             /* tp_richcompare */
1339         0,                             /* tp_weaklistoffset */
1340         0,                             /* tp_iter */
1341         0,                             /* tp_iternext */
1342         imageMethods,    /* tp_methods */
1343         0,                   /* tp_members */
1344         imageGetSets,          /* tp_getset */
1345         0,                         /* tp_base */
1346         0,                         /* tp_dict */
1347         0,                         /* tp_descr_get */
1348         0,                         /* tp_descr_set */
1349         0,                         /* tp_dictoffset */
1350         (initproc)ImageFFmpeg_init,     /* tp_init */
1351         0,                         /* tp_alloc */
1352         Image_allocNew,           /* tp_new */
1353 };
1354
1355 #endif  //WITH_FFMPEG
1356
1357