2 -----------------------------------------------------------------------------
3 This source file is part of VideoTexture library
5 Copyright (c) 2007 The Zdeno Ash Miklas
7 This program is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the Free Software
9 Foundation; either version 2 of the License, or (at your option) any later
12 This program is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 Place - Suite 330, Boston, MA 02111-1307, USA, or go to
19 http://www.gnu.org/copyleft/lesser.txt.
20 -----------------------------------------------------------------------------
25 // INT64_C fix for some linux machines (C99ism)
26 #define __STDC_CONSTANT_MACROS
30 #include "MEM_guardedalloc.h"
35 #include "Exception.h"
36 #include "VideoFFmpeg.h"
40 const double defFrameRate = 25.0;
41 // time scale constant
42 const long timeScale = 1000;
44 // macro for exception handling and logging
45 #define CATCH_EXCP catch (Exception & exp) \
46 { exp.report(); m_status = SourceError; }
48 extern "C" void do_init_ffmpeg();
53 VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(),
54 m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL),
55 m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
56 m_deinterlace(false), m_preseek(0), m_videoStream(-1), m_baseFrameRate(25.0),
57 m_lastFrame(-1), m_eof(false), m_externTime(false), m_curPosition(-1), m_startTime(0),
58 m_captWidth(0), m_captHeight(0), m_captRate(0.f), m_isImage(false),
59 m_isThreaded(false), m_stopThread(false), m_cacheStarted(false)
63 // force flip because ffmpeg always return the image in the wrong orientation for texture
67 m_thread.first = m_thread.last = NULL;
68 pthread_mutex_init(&m_cacheMutex, NULL);
69 m_frameCacheFree.first = m_frameCacheFree.last = NULL;
70 m_frameCacheBase.first = m_frameCacheBase.last = NULL;
71 m_packetCacheFree.first = m_packetCacheFree.last = NULL;
72 m_packetCacheBase.first = m_packetCacheBase.last = NULL;
76 VideoFFmpeg::~VideoFFmpeg ()
82 bool VideoFFmpeg::release()
88 avcodec_close(m_codecCtx);
93 av_close_input_file(m_formatCtx);
101 if (m_frameDeinterlaced)
103 MEM_freeN(m_frameDeinterlaced->data[0]);
104 av_free(m_frameDeinterlaced);
105 m_frameDeinterlaced = NULL;
109 MEM_freeN(m_frameRGB->data[0]);
115 sws_freeContext(m_imgConvertCtx);
116 m_imgConvertCtx = NULL;
119 m_status = SourceStopped;
124 AVFrame *VideoFFmpeg::allocFrameRGB()
127 frame = avcodec_alloc_frame();
128 if (m_format == RGBA32)
130 avpicture_fill((AVPicture*)frame,
131 (uint8_t*)MEM_callocN(avpicture_get_size(
133 m_codecCtx->width, m_codecCtx->height),
135 PIX_FMT_RGBA, m_codecCtx->width, m_codecCtx->height);
138 avpicture_fill((AVPicture*)frame,
139 (uint8_t*)MEM_callocN(avpicture_get_size(
141 m_codecCtx->width, m_codecCtx->height),
143 PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
148 // set initial parameters
149 void VideoFFmpeg::initParams (short width, short height, float rate, bool image)
152 m_captHeight = height;
158 int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVFormatParameters *formatParams)
160 AVFormatContext *formatCtx;
163 AVCodecContext *codecCtx;
165 if(av_open_input_file(&formatCtx, filename, inputFormat, 0, formatParams)!=0)
168 if(av_find_stream_info(formatCtx)<0)
170 av_close_input_file(formatCtx);
174 /* Find the first video stream */
176 for(i=0; i<formatCtx->nb_streams; i++)
178 if(formatCtx->streams[i] &&
179 get_codec_from_stream(formatCtx->streams[i]) &&
180 (get_codec_from_stream(formatCtx->streams[i])->codec_type==CODEC_TYPE_VIDEO))
189 av_close_input_file(formatCtx);
193 codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);
195 /* Find the decoder for the video stream */
196 codec=avcodec_find_decoder(codecCtx->codec_id);
199 av_close_input_file(formatCtx);
202 codecCtx->workaround_bugs = 1;
203 if(avcodec_open(codecCtx, codec)<0)
205 av_close_input_file(formatCtx);
209 #ifdef FFMPEG_OLD_FRAME_RATE
210 if(codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
211 codecCtx->frame_rate_base=1000;
212 m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
214 m_baseFrameRate = av_q2d(formatCtx->streams[videoStream]->r_frame_rate);
216 if (m_baseFrameRate <= 0.0)
217 m_baseFrameRate = defFrameRate;
220 m_codecCtx = codecCtx;
221 m_formatCtx = formatCtx;
222 m_videoStream = videoStream;
223 m_frame = avcodec_alloc_frame();
224 m_frameDeinterlaced = avcodec_alloc_frame();
226 // allocate buffer if deinterlacing is required
227 avpicture_fill((AVPicture*)m_frameDeinterlaced,
228 (uint8_t*)MEM_callocN(avpicture_get_size(
230 m_codecCtx->width, m_codecCtx->height),
231 "ffmpeg deinterlace"),
232 m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
234 // check if the pixel format supports Alpha
235 if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
236 m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
237 m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
238 m_codecCtx->pix_fmt == PIX_FMT_BGR32_1)
240 // allocate buffer to store final decoded frame
242 // allocate sws context
243 m_imgConvertCtx = sws_getContext(
254 // allocate buffer to store final decoded frame
256 // allocate sws context
257 m_imgConvertCtx = sws_getContext(
267 m_frameRGB = allocFrameRGB();
269 if (!m_imgConvertCtx) {
270 avcodec_close(m_codecCtx);
272 av_close_input_file(m_formatCtx);
276 MEM_freeN(m_frameDeinterlaced->data[0]);
277 av_free(m_frameDeinterlaced);
278 m_frameDeinterlaced = NULL;
279 MEM_freeN(m_frameRGB->data[0]);
288 * This thread is used to load video frame asynchronously.
289 * It provides a frame caching service.
290 * The main thread is responsible for positionning the frame pointer in the
291 * file correctly before calling startCache() which starts this thread.
292 * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
293 * memory and CPU low 2) a cache of 5 decoded frames.
294 * If the main thread does not find the frame in the cache (because the video has restarted
295 * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
296 * function: it sends a signal to stop the cache thread and wait for confirmation), then
297 * change the position in the stream and restarts the cache thread.
299 void *VideoFFmpeg::cacheThread(void *data)
301 VideoFFmpeg* video = (VideoFFmpeg*)data;
302 // holds the frame that is being decoded
303 CacheFrame *currentFrame = NULL;
304 CachePacket *cachePacket;
305 bool endOfFile = false;
306 int frameFinished = 0;
308 while (!video->m_stopThread)
310 // packet cache is used solely by this thread, no need to lock
311 // In case the stream/file contains other stream than the one we are looking for,
312 // allow a bit of cycling to get rid quickly of those frames
315 && (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL
316 && frameFinished < 25)
318 // free packet => packet cache is not full yet, just read more
319 if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0)
321 if (cachePacket->packet.stream_index == video->m_videoStream)
323 // make sure fresh memory is allocated for the packet and move it to queue
324 av_dup_packet(&cachePacket->packet);
325 BLI_remlink(&video->m_packetCacheFree, cachePacket);
326 BLI_addtail(&video->m_packetCacheBase, cachePacket);
329 // this is not a good packet for us, just leave it on free queue
330 // Note: here we could handle sound packet
331 av_free_packet(&cachePacket->packet);
337 // this mark the end of the file
339 // if we cannot read a packet, no need to continue
343 // frame cache is also used by main thread, lock
344 if (currentFrame == NULL)
346 // no current frame being decoded, take free one
347 pthread_mutex_lock(&video->m_cacheMutex);
348 if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
349 BLI_remlink(&video->m_frameCacheFree, currentFrame);
350 pthread_mutex_unlock(&video->m_cacheMutex);
352 if (currentFrame != NULL)
354 // this frame is out of free and busy queue, we can manipulate it without locking
356 while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
358 BLI_remlink(&video->m_packetCacheBase, cachePacket);
359 // use m_frame because when caching, it is not used in main thread
360 // we can't use currentFrame directly because we need to convert to RGB first
361 avcodec_decode_video(video->m_codecCtx,
362 video->m_frame, &frameFinished,
363 cachePacket->packet.data, cachePacket->packet.size);
366 AVFrame * input = video->m_frame;
368 /* This means the data wasnt read properly, this check stops crashing */
369 if ( input->data[0]!=0 || input->data[1]!=0
370 || input->data[2]!=0 || input->data[3]!=0)
372 if (video->m_deinterlace)
374 if (avpicture_deinterlace(
375 (AVPicture*) video->m_frameDeinterlaced,
376 (const AVPicture*) video->m_frame,
377 video->m_codecCtx->pix_fmt,
378 video->m_codecCtx->width,
379 video->m_codecCtx->height) >= 0)
381 input = video->m_frameDeinterlaced;
385 sws_scale(video->m_imgConvertCtx,
389 video->m_codecCtx->height,
390 currentFrame->frame->data,
391 currentFrame->frame->linesize);
392 // move frame to queue, this frame is necessarily the next one
393 currentFrame->framePosition = ++video->m_curPosition;
394 pthread_mutex_lock(&video->m_cacheMutex);
395 BLI_addtail(&video->m_frameCacheBase, currentFrame);
396 pthread_mutex_unlock(&video->m_cacheMutex);
400 av_free_packet(&cachePacket->packet);
401 BLI_addtail(&video->m_packetCacheFree, cachePacket);
403 if (currentFrame && endOfFile)
405 // no more packet and end of file => put a special frame that indicates that
406 currentFrame->framePosition = -1;
407 pthread_mutex_lock(&video->m_cacheMutex);
408 BLI_addtail(&video->m_frameCacheBase, currentFrame);
409 pthread_mutex_unlock(&video->m_cacheMutex);
411 // no need to stay any longer in this thread
415 // small sleep to avoid unnecessary looping
418 // before quitting, put back the current frame to queue to allow freeing
421 pthread_mutex_lock(&video->m_cacheMutex);
422 BLI_addtail(&video->m_frameCacheFree, currentFrame);
423 pthread_mutex_unlock(&video->m_cacheMutex);
428 // start thread to cache video frame from file/capture/stream
429 // this function should be called only when the position in the stream is set for the
430 // first frame to cache
431 bool VideoFFmpeg::startCache()
433 if (!m_cacheStarted && m_isThreaded)
435 m_stopThread = false;
436 for (int i=0; i<CACHE_FRAME_SIZE; i++)
438 CacheFrame *frame = new CacheFrame();
439 frame->frame = allocFrameRGB();
440 BLI_addtail(&m_frameCacheFree, frame);
442 for (int i=0; i<CACHE_PACKET_SIZE; i++)
444 CachePacket *packet = new CachePacket();
445 BLI_addtail(&m_packetCacheFree, packet);
447 BLI_init_threads(&m_thread, cacheThread, 1);
448 BLI_insert_thread(&m_thread, this);
449 m_cacheStarted = true;
451 return m_cacheStarted;
454 void VideoFFmpeg::stopCache()
459 BLI_end_threads(&m_thread);
460 // now delete the cache
463 while ((frame = (CacheFrame *)m_frameCacheBase.first) != NULL)
465 BLI_remlink(&m_frameCacheBase, frame);
466 MEM_freeN(frame->frame->data[0]);
467 av_free(frame->frame);
470 while ((frame = (CacheFrame *)m_frameCacheFree.first) != NULL)
472 BLI_remlink(&m_frameCacheFree, frame);
473 MEM_freeN(frame->frame->data[0]);
474 av_free(frame->frame);
477 while((packet = (CachePacket *)m_packetCacheBase.first) != NULL)
479 BLI_remlink(&m_packetCacheBase, packet);
480 av_free_packet(&packet->packet);
483 while((packet = (CachePacket *)m_packetCacheFree.first) != NULL)
485 BLI_remlink(&m_packetCacheFree, packet);
488 m_cacheStarted = false;
492 void VideoFFmpeg::releaseFrame(AVFrame* frame)
494 if (frame == m_frameRGB)
496 // this is not a frame from the cache, ignore
499 // this frame MUST be the first one of the queue
500 pthread_mutex_lock(&m_cacheMutex);
501 CacheFrame *cacheFrame = (CacheFrame *)m_frameCacheBase.first;
502 assert (cacheFrame != NULL && cacheFrame->frame == frame);
503 BLI_remlink(&m_frameCacheBase, cacheFrame);
504 BLI_addtail(&m_frameCacheFree, cacheFrame);
505 pthread_mutex_unlock(&m_cacheMutex);
509 void VideoFFmpeg::openFile (char * filename)
513 if (openStream(filename, NULL, NULL) != 0)
516 if (m_codecCtx->gop_size)
517 m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25;
518 else if (m_codecCtx->has_b_frames)
519 m_preseek = 25; // should determine gopsize
523 // get video time range
525 m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE;
528 VideoBase::openFile(filename);
531 // ffmpeg reports that http source are actually non stream
532 // but it is really not desirable to seek on http file, so force streaming.
533 // It would be good to find this information from the context but there are no simple indication
534 !strncmp(filename, "http://", 7) ||
535 #ifdef FFMPEG_PB_IS_POINTER
536 (m_formatCtx->pb && m_formatCtx->pb->is_streamed)
538 m_formatCtx->pb.is_streamed
542 // the file is in fact a streaming source, prevent seeking
544 // for streaming it is important to do non blocking read
545 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
550 // the file is to be treated as an image, i.e. load the first frame only
552 // in case of reload, the filename is taken from m_imageName, no need to change it
553 if (m_imageName.Ptr() != filename)
554 m_imageName = filename;
559 // check if we should do multi-threading?
560 if (!m_isImage && BLI_system_thread_count() > 1)
562 // never thread image: there are no frame to read ahead
563 // no need to thread if the system has a single core
569 // open video capture device
570 void VideoFFmpeg::openCam (char * file, short camIdx)
572 // open camera source
573 AVInputFormat *inputFormat;
574 AVFormatParameters formatParams;
575 AVRational frameRate;
576 char *p, filename[28], rateStr[20];
580 memset(&formatParams, 0, sizeof(formatParams));
582 // video capture on windows only through Video For Windows driver
583 inputFormat = av_find_input_format("vfwcap");
585 // Video For Windows not supported??
587 sprintf(filename, "%d", camIdx);
589 // In Linux we support two types of devices: VideoForLinux and DV1394.
590 // the user specify it with the filename:
591 // [<device_type>][:<standard>]
592 // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l'
593 // <standard> : 'pal', 'secam' or 'ntsc'. By default 'ntsc'
594 // The driver name is constructed automatically from the device type:
595 // v4l : /dev/video<camIdx>
596 // dv1394: /dev/dv1394/<camIdx>
597 // If you have different driver name, you can specify the driver name explicitely
598 // instead of device type. Examples of valid filename:
599 // /dev/v4l/video0:pal
600 // /dev/ieee1394/1:ntsc
603 if (file && strstr(file, "1394") != NULL)
605 // the user specifies a driver, check if it is v4l or d41394
606 inputFormat = av_find_input_format("dv1394");
607 sprintf(filename, "/dev/dv1394/%d", camIdx);
610 inputFormat = av_find_input_format("video4linux");
611 sprintf(filename, "/dev/video%d", camIdx);
614 // these format should be supported, check ffmpeg compilation
616 if (file && strncmp(file, "/dev", 4) == 0)
618 // user does not specify a driver
619 strncpy(filename, file, sizeof(filename));
620 filename[sizeof(filename)-1] = 0;
621 if ((p = strchr(filename, ':')) != 0)
624 if (file && (p = strchr(file, ':')) != NULL)
625 formatParams.standard = p+1;
628 if (m_captRate <= 0.f)
629 m_captRate = defFrameRate;
630 sprintf(rateStr, "%f", m_captRate);
631 av_parse_video_frame_rate(&frameRate, rateStr);
632 // populate format parameters
633 // need to specify the time base = inverse of rate
634 formatParams.time_base.num = frameRate.den;
635 formatParams.time_base.den = frameRate.num;
636 formatParams.width = m_captWidth;
637 formatParams.height = m_captHeight;
639 if (openStream(filename, inputFormat, &formatParams) != 0)
642 // for video capture it is important to do non blocking read
643 m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK;
645 VideoBase::openCam(file, camIdx);
646 // check if we should do multi-threading?
647 if (BLI_system_thread_count() > 1)
649 // no need to thread if the system has a single core
655 bool VideoFFmpeg::play (void)
659 // if object is able to play
660 if (VideoBase::play())
662 // set video position
674 bool VideoFFmpeg::pause (void)
678 if (VideoBase::pause())
688 bool VideoFFmpeg::stop (void)
693 // force restart when play
703 void VideoFFmpeg::setRange (double start, double stop)
710 VideoBase::setRange(start, stop);
711 // set range for video
719 void VideoFFmpeg::setFrameRate (float rate)
721 VideoBase::setFrameRate(rate);
726 void VideoFFmpeg::calcImage (unsigned int texId, double ts)
732 // load frame from video
733 void VideoFFmpeg::loadFrame (double ts)
735 if (m_status == SourcePlaying)
738 double startTime = PIL_check_seconds_timer();
740 if (m_isFile && ts >= 0.0)
742 // allow setting timestamp only when not streaming
744 if (m_eof && actTime * actFrameRate() < m_lastFrame)
746 // user is asking to rewind while the playback is already finished in the cache.
747 // we must clean the cache otherwise the eof condition will prevent any further reading.
753 if (m_lastFrame == -1 && !m_isFile)
754 m_startTime = startTime;
755 actTime = startTime - m_startTime;
757 // if video has ended
758 if (m_isFile && actTime * m_frameRate >= m_range[1])
760 // in any case, this resets the cache
762 // if repeats are set, decrease them
765 // if video has to be replayed
768 // reset its position
769 actTime -= (m_range[1] - m_range[0]) / m_frameRate;
770 m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
772 // if video has to be stopped, stop it
775 m_status = SourceStopped;
780 long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
781 // if actual frame differs from last frame
782 if (actFrame != m_lastFrame)
786 if((frame = grabFrame(actFrame)) != NULL)
788 if (!m_isFile && !m_cacheStarted)
790 // streaming without cache: detect synchronization problem
791 double execTime = PIL_check_seconds_timer() - startTime;
792 if (execTime > 0.005)
794 // exec time is too long, it means that the function was blocking
795 // resynchronize the stream from this time
796 m_startTime += execTime;
800 m_lastFrame = actFrame;
801 // init image, if needed
802 init(short(m_codecCtx->width), short(m_codecCtx->height));
804 process((BYTE*)(frame->data[0]));
805 // finished with the frame, release it so that cache can reuse it
807 // in case it is an image, automatically stop reading it
810 m_status = SourceStopped;
811 // close the file as we don't need it anymore
814 } else if (!m_isFile)
816 // we didn't get a frame and we are streaming, this may be due to
817 // a delay in the network or because we are getting the frame too fast.
818 // In the later case, shift time by a small amount to compensate for a drift
826 // set actual position
827 void VideoFFmpeg::setPositions (void)
829 // set video start time
830 m_startTime = PIL_check_seconds_timer();
831 // if file is played and actual position is before end position
832 if (!m_eof && m_lastFrame >= 0 && (!m_isFile || m_lastFrame < m_range[1] * actFrameRate()))
833 // continue from actual position
834 m_startTime -= double(m_lastFrame) / actFrameRate();
836 m_startTime -= m_range[0];
837 // start from begining, stop cache just in case
842 // position pointer in file, position in second
843 AVFrame *VideoFFmpeg::grabFrame(long position)
848 bool frameLoaded = false;
849 long long targetTs = 0;
854 // when cache is active, we must not read the file directly
856 pthread_mutex_lock(&m_cacheMutex);
857 frame = (CacheFrame *)m_frameCacheBase.first;
858 pthread_mutex_unlock(&m_cacheMutex);
859 // no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
862 // no frame in cache, in case of file it is an abnormal situation
865 // go back to no threaded reading
871 if (frame->framePosition == -1)
873 // this frame mark the end of the file (only used for file)
874 // leave in cache to make sure we don't miss it
878 // for streaming, always return the next frame,
879 // that's what grabFrame does in non cache mode anyway.
880 if (!m_isFile || frame->framePosition == position)
884 // this frame is not useful, release it
885 pthread_mutex_lock(&m_cacheMutex);
886 BLI_remlink(&m_frameCacheBase, frame);
887 BLI_addtail(&m_frameCacheFree, frame);
888 pthread_mutex_unlock(&m_cacheMutex);
891 // come here when there is no cache or cache has been stopped
892 // locate the frame, by seeking if necessary (seeking is only possible for files)
895 // first check if the position that we are looking for is in the preseek range
896 // if so, just read the frame until we get there
897 if (position > m_curPosition + 1
899 && position - (m_curPosition + 1) < m_preseek)
901 while(av_read_frame(m_formatCtx, &packet)>=0)
903 if (packet.stream_index == m_videoStream)
905 avcodec_decode_video(
907 m_frame, &frameFinished,
908 packet.data, packet.size);
912 av_free_packet(&packet);
913 if (position == m_curPosition+1)
917 // if the position is not in preseek, do a direct jump
918 if (position != m_curPosition + 1)
920 double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
921 int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));
922 int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
928 if (startTs != AV_NOPTS_VALUE)
931 if (position <= m_curPosition || !m_eof)
934 // Tried to make this work but couldn't: seeking on byte is ignored by the
935 // format plugin and it will generally continue to read from last timestamp.
936 // Too bad because frame seek is not always able to get the first frame
938 if (position <= m_preseek)
940 // we can safely go the begining of the file
941 if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
943 // binary seek does not reset the timestamp, must do it now
944 av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
951 // current position is now lost, guess a value.
952 if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
954 // current position is now lost, guess a value.
955 // It's not important because it will be set at this end of this function
956 m_curPosition = position - m_preseek - 1;
960 // this is the timestamp of the frame we're looking for
961 targetTs = (int64_t)(position / (m_baseFrameRate * timeBase));
962 if (startTs != AV_NOPTS_VALUE)
966 avcodec_flush_buffers(m_codecCtx);
968 } else if (m_isThreaded)
970 // cache is not started but threading is possible
971 // better not read the stream => make take some time, better start caching
974 // Abnormal!!! could not start cache, fall back on direct read
975 m_isThreaded = false;
978 // find the correct frame, in case of streaming and no cache, it means just
979 // return the next frame. This is not quite correct, may need more work
980 while(av_read_frame(m_formatCtx, &packet)>=0)
982 if(packet.stream_index == m_videoStream)
984 avcodec_decode_video(m_codecCtx,
985 m_frame, &frameFinished,
986 packet.data, packet.size);
988 if (frameFinished && !posFound)
990 if (packet.dts >= targetTs)
994 if(frameFinished && posFound == 1)
996 AVFrame * input = m_frame;
998 /* This means the data wasnt read properly,
999 this check stops crashing */
1000 if ( input->data[0]==0 && input->data[1]==0
1001 && input->data[2]==0 && input->data[3]==0)
1003 av_free_packet(&packet);
1009 if (avpicture_deinterlace(
1010 (AVPicture*) m_frameDeinterlaced,
1011 (const AVPicture*) m_frame,
1012 m_codecCtx->pix_fmt,
1014 m_codecCtx->height) >= 0)
1016 input = m_frameDeinterlaced;
1020 sws_scale(m_imgConvertCtx,
1026 m_frameRGB->linesize);
1027 av_free_packet(&packet);
1032 av_free_packet(&packet);
1034 m_eof = m_isFile && !frameLoaded;
1037 m_curPosition = position;
1040 // normal case for file: first locate, then start cache
1043 // Abnormal!! could not start cache, return to non-cache mode
1044 m_isThreaded = false;
1056 // cast Image pointer to VideoFFmpeg
1057 inline VideoFFmpeg * getVideoFFmpeg (PyImage * self)
1058 { return static_cast<VideoFFmpeg*>(self->m_image); }
1061 // object initialization
1062 static int VideoFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1064 PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1065 // parameters - video source
1066 // file name or format type for capture (only for Linux: video4linux or dv1394)
1068 // capture device number
1070 // capture width, only if capt is >= 0
1072 // capture height, only if capt is >= 0
1074 // capture rate, only if capt is >= 0
1077 static char *kwlist[] = {"file", "capture", "rate", "width", "height", NULL};
1080 if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|hfhh", kwlist, &file, &capt,
1081 &rate, &width, &height))
1086 // create video object
1087 Video_init<VideoFFmpeg>(self);
1090 getVideoFFmpeg(self)->initParams(width, height, rate);
1092 // open video source
1093 Video_open(getVideo(self), file, capt);
1095 catch (Exception & exp)
1100 // initialization succeded
1104 PyObject * VideoFFmpeg_getPreseek (PyImage *self, void * closure)
1106 return Py_BuildValue("h", getFFmpeg(self)->getPreseek());
1110 int VideoFFmpeg_setPreseek (PyImage * self, PyObject * value, void * closure)
1112 // check validity of parameter
1113 if (value == NULL || !PyLong_Check(value))
1115 PyErr_SetString(PyExc_TypeError, "The value must be an integer");
1119 getFFmpeg(self)->setPreseek(PyLong_AsSsize_t(value));
1125 PyObject * VideoFFmpeg_getDeinterlace (PyImage * self, void * closure)
1127 if (getFFmpeg(self)->getDeinterlace())
1134 int VideoFFmpeg_setDeinterlace (PyImage * self, PyObject * value, void * closure)
1136 // check parameter, report failure
1137 if (value == NULL || !PyBool_Check(value))
1139 PyErr_SetString(PyExc_TypeError, "The value must be a bool");
1143 getFFmpeg(self)->setDeinterlace(value == Py_True);
1148 // methods structure
1149 static PyMethodDef videoMethods[] =
1150 { // methods from VideoBase class
1151 {"play", (PyCFunction)Video_play, METH_NOARGS, "Play (restart) video"},
1152 {"pause", (PyCFunction)Video_pause, METH_NOARGS, "pause video"},
1153 {"stop", (PyCFunction)Video_stop, METH_NOARGS, "stop video (play will replay it from start)"},
1154 {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh video - get its status"},
1157 // attributes structure
1158 static PyGetSetDef videoGetSets[] =
1159 { // methods from VideoBase class
1160 {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1161 {(char*)"range", (getter)Video_getRange, (setter)Video_setRange, (char*)"replay range", NULL},
1162 {(char*)"repeat", (getter)Video_getRepeat, (setter)Video_setRepeat, (char*)"repeat count, -1 for infinite repeat", NULL},
1163 {(char*)"framerate", (getter)Video_getFrameRate, (setter)Video_setFrameRate, (char*)"frame rate", NULL},
1164 // attributes from ImageBase class
1165 {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1166 {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1167 {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL},
1168 {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1169 {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1170 {(char*)"preseek", (getter)VideoFFmpeg_getPreseek, (setter)VideoFFmpeg_setPreseek, (char*)"nb of frames of preseek", NULL},
1171 {(char*)"deinterlace", (getter)VideoFFmpeg_getDeinterlace, (setter)VideoFFmpeg_setDeinterlace, (char*)"deinterlace image", NULL},
1175 // python type declaration
1176 PyTypeObject VideoFFmpegType =
1178 PyVarObject_HEAD_INIT(NULL, 0)
1179 "VideoTexture.VideoFFmpeg", /*tp_name*/
1180 sizeof(PyImage), /*tp_basicsize*/
1182 (destructor)Image_dealloc, /*tp_dealloc*/
1189 0, /*tp_as_sequence*/
1190 0, /*tp_as_mapping*/
1197 Py_TPFLAGS_DEFAULT, /*tp_flags*/
1198 "FFmpeg video source", /* tp_doc */
1199 0, /* tp_traverse */
1201 0, /* tp_richcompare */
1202 0, /* tp_weaklistoffset */
1204 0, /* tp_iternext */
1205 videoMethods, /* tp_methods */
1207 videoGetSets, /* tp_getset */
1210 0, /* tp_descr_get */
1211 0, /* tp_descr_set */
1212 0, /* tp_dictoffset */
1213 (initproc)VideoFFmpeg_init, /* tp_init */
1215 Image_allocNew, /* tp_new */
1218 // object initialization
1219 static int ImageFFmpeg_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
1221 PyImage * self = reinterpret_cast<PyImage*>(pySelf);
1222 // parameters - video source
1223 // file name or format type for capture (only for Linux: video4linux or dv1394)
1227 if (!PyArg_ParseTuple(args, "s:ImageFFmpeg", &file))
1232 // create video object
1233 Video_init<VideoFFmpeg>(self);
1235 getVideoFFmpeg(self)->initParams(0, 0, 1.0, true);
1237 // open video source
1238 Video_open(getVideo(self), file, -1);
1240 catch (Exception & exp)
1245 // initialization succeded
1249 PyObject * Image_reload (PyImage * self, PyObject *args)
1251 char * newname = NULL;
1252 if (!PyArg_ParseTuple(args, "|s:reload", &newname))
1254 if (self->m_image != NULL)
1256 VideoFFmpeg* video = getFFmpeg(self);
1257 // check type of object
1259 newname = video->getImageName();
1261 // if not set, retport error
1262 PyErr_SetString(PyExc_RuntimeError, "No image file name given");
1265 // make sure the previous file is cleared
1267 // open the new file
1268 video->openFile(newname);
1273 // methods structure
1274 static PyMethodDef imageMethods[] =
1275 { // methods from VideoBase class
1276 {"refresh", (PyCFunction)Video_refresh, METH_NOARGS, "Refresh image, i.e. load it"},
1277 {"reload", (PyCFunction)Image_reload, METH_VARARGS, "Reload image, i.e. reopen it"},
1280 // attributes structure
1281 static PyGetSetDef imageGetSets[] =
1282 { // methods from VideoBase class
1283 {(char*)"status", (getter)Video_getStatus, NULL, (char*)"video status", NULL},
1284 // attributes from ImageBase class
1285 {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL},
1286 {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL},
1287 {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL},
1288 {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL},
1289 {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL},
1293 // python type declaration
1294 PyTypeObject ImageFFmpegType =
1296 PyVarObject_HEAD_INIT(NULL, 0)
1297 "VideoTexture.ImageFFmpeg", /*tp_name*/
1298 sizeof(PyImage), /*tp_basicsize*/
1300 (destructor)Image_dealloc, /*tp_dealloc*/
1307 0, /*tp_as_sequence*/
1308 0, /*tp_as_mapping*/
1315 Py_TPFLAGS_DEFAULT, /*tp_flags*/
1316 "FFmpeg image source", /* tp_doc */
1317 0, /* tp_traverse */
1319 0, /* tp_richcompare */
1320 0, /* tp_weaklistoffset */
1322 0, /* tp_iternext */
1323 imageMethods, /* tp_methods */
1325 imageGetSets, /* tp_getset */
1328 0, /* tp_descr_get */
1329 0, /* tp_descr_set */
1330 0, /* tp_dictoffset */
1331 (initproc)ImageFFmpeg_init, /* tp_init */
1333 Image_allocNew, /* tp_new */
1336 #endif //WITH_FFMPEG