예제 #1
0
파일: movie2.py 프로젝트: RalRal/psychopy
    def _reset(self):
        self.duration = None
        self.status = NOT_STARTED
        self._numpy_frame = None
        if self._texID is not None:
            GL.glDeleteTextures(1, self._texID)
            self._texID = None
        # self._video_stream = None
        self._total_frame_count = None
        self._video_width = None
        self._video_height = None
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        self._video_frame_rate = None
        self._inter_frame_interval = None
        self._prev_frame_sec = None
        self._next_frame_sec = None
        self._next_frame_index = None
        self._prev_frame_index = None
        self._video_perc_done = None
        #        self._last_video_flip_time = None
        self._next_frame_displayed = False
        self._video_track_clock = Clock()

        self._audio_stream_clock = Clock()
        self._vlc_instance = None
        self._audio_stream = None
        self._audio_stream_player = None
        self._audio_stream_started = False
        self._audio_stream_event_manager = None
예제 #2
0
파일: movie2.py 프로젝트: 9173860/psychopy
 def _reset(self):
     self.duration = None
     self.status = NOT_STARTED
     self._numpy_frame = None
     self._frame_texture = None
     self._frame_data_interface = None
     self._video_stream = None
     self._total_frame_count = None
     self._video_width = None
     self._video_height = None
     # TODO: Read depth from video source
     self._video_frame_depth = 3
     self._video_frame_rate = None
     self._inter_frame_interval = None
     self._prev_frame_sec = None
     self._next_frame_sec = None
     self._next_frame_index = None
     self._prev_frame_index = None
     self._video_perc_done = None
     self._last_video_flip_time = None
     self._next_frame_displayed = False
     self._video_track_clock = Clock()
     self._vlc_instance = None
     self._audio_stream = None
     self._audio_stream_player = None
     self._audio_stream_started = False
     self._audio_stream_event_manager=None
     self._last_audio_callback_time = core.getTime()
     self._last_audio_stream_time = None
     self._first_audio_callback_time = None
     self._audio_computer_time_drift = None
예제 #3
0
파일: movie3.py 프로젝트: papr/psychopy
    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0,0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0,1.0,1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate = True,
        ):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win, units=units, name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0/retraceRate
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        #size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h],
                                   float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        #set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" %(self.name, str(self)))
예제 #4
0
파일: movie3.py 프로젝트: papr/psychopy
class MovieStim3(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """
    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0,0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0,1.0,1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate = True,
        ):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win, units=units, name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0/retraceRate
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        #size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h],
                                   float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        #set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" %(self.name, str(self)))

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = None
        self._texID = None
        self.status = NOT_STARTED

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).
        This form is provided for syntactic consistency with other visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self.reset() #set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio= (1-self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                self._audioStream = sound.Sound(self._mov.audio.to_soundarray(),
                                            sampleRate = self._mov.audio.fps)
            else: #make sure we set to None (in case prev clip did have auido)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" %filename)
        #mov has attributes:
            # size, duration, fps
        #mov.audio has attributes
            #duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = 1.0/self._mov.fps
        self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if self._audioStream is not None:
            self._audioStream.play()
        if status != PLAYING:
            self.status = PLAYING
            self._videoClock.reset(-self.getCurrentFrameTime())

            if status == PAUSED:
                self._audioSeek(self.getCurrentFrameTime())

            if log and self.autoLog:
                    self.win.logOnFlip("Set %s playing" %(self.name),
                                       level=logging.EXP, obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %(self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %(self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """
        Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again. Use pause() if you may need to restart the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" %(self.name),
                    level=logging.EXP,obj=self)

    def setVolume(self, volume):
        pass #to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally (left-to-right).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically (top-to-bottom).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, 'flipVert')

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._mov.fps

    def getCurrentFrameTime(self):
        """
        Get the time that the movie file specified the current video frame as
        having.
        """
        return self._nextFrameT - self._frameInterval

    def _updateFrameTexture(self):
        if self._nextFrameT is None:
            # movie has no current position, need to reset the clock
            # to zero in order to have the timing logic work
            # otherwise the video stream would skip frames until the
            # time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0

        #only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif (self._numpyFrame is not None) and \
            (self._nextFrameT > (self._videoClock.getTime()-self._retraceInterval/2.0)):
            return None
        self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        useSubTex=self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex=False

        #bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)#bind that name to the target
        GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_WRAP_S,GL.GL_REPEAT) #makes the texture map wrap (this is actually default anyway)
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)  # data from PIL/numpy is packed, but default for GL is 4 bytes
        #important if using bits++ because GL_LINEAR
        #sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                    self._numpyFrame.shape[1],self._numpyFrame.shape[0], 0,
                    GL.GL_RGB, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                    self._numpyFrame.shape[1], self._numpyFrame.shape[0],
                    GL.GL_RGB, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)

        else:
            GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_MAG_FILTER,GL.GL_NEAREST)
            GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_MIN_FILTER,GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],self._numpyFrame.shape[0], 0,
                                GL.GL_BGR, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                    self._numpyFrame.shape[1], self._numpyFrame.shape[0],
                    GL.GL_BGR, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_MODULATE)#?? do we need this - think not!

        if not self.status==PAUSED:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """
        Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear"""

        if self.status==NOT_STARTED or (self.status==FINISHED and self.loop):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture() #will check if it's needed yet in the function

        #scale the drawing frame and get to centre of field
        GL.glPushMatrix()#push before drawing, pop after
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)#push the data for client attributes

        self.win.setScale('pix')
        #move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        #bind textures
        GL.glActiveTexture (GL.GL_TEXTURE1)
        GL.glBindTexture (GL.GL_TEXTURE_2D,0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture (GL.GL_TEXTURE0)
        GL.glBindTexture (GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        GL.glColor4f(1, 1, 1, self.opacity)  # sets opacity (1,1,1 = RGB placeholder)

        array = (GL.GLfloat * 32)(
             1,  1, #texture coords
             vertsPix[0,0], vertsPix[0,1],    0.,  #vertex
             0,  1,
             vertsPix[1,0], vertsPix[1,1],    0.,
             0, 0,
             vertsPix[2,0], vertsPix[2,1],    0.,
             1, 0,
             vertsPix[3,0], vertsPix[3,1],    0.,
             )

        #2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)
        GL.glPopAttrib(GL.GL_ENABLE_BIT)
        GL.glPopMatrix()
        #unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)#implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        #video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        #for sound we need to extract the array again and just begin at new loc
        if self._audioStream is None:
            return #do nothing
        self._audioStream.stop()
        sndArray = self._mov.audio.to_soundarray()
        startIndex = int(t*self._mov.audio.fps)
        self._audioStream = sound.Sound(sndArray[startIndex:,:], sampleRate = self._mov.audio.fps)
        self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        try:
            self.clearTextures()#remove textures from graphics card to prevent crash
        except:
            pass
        self._mov = None
        self._numpyFrame = None
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" %(self.name),
                level=logging.EXP,obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        #add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
예제 #5
0
파일: movie2.py 프로젝트: RalRal/psychopy
class MovieStim2(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """

    def __init__(
        self,
        win,
        filename="",
        units="pix",
        size=None,
        pos=(0.0, 0.0),
        ori=0.0,
        flipVert=False,
        flipHoriz=False,
        color=(1.0, 1.0, 1.0),
        colorSpace="rgb",
        opacity=1.0,
        volume=1.0,
        name="",
        loop=False,
        autoLog=True,
        depth=0.0,
        noAudio=False,
        vframe_callback=None,
        fps=None,
        interpolate=True,
    ):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove("self")
        super(MovieStim2, self).__init__(win, units=units, name=name, autoLog=False)
        # check for pyglet
        if win.winType != "pyglet":
            logging.error("Movie stimuli can only be used with a pyglet window")
            core.quit()
        self._retracerate = win._monitorFrameRate
        if self._retracerate is None:
            self._retracerate = win.getActualFrameRate()
        if self._retracerate is None:
            logging.warning("FrameRate could not be supplied by psychopy; defaulting to 60.0")
            self._retracerate = 60.0
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.volume = volume
        self._av_stream_time_offset = 0.145
        self._no_audio = noAudio
        self._vframe_callback = vframe_callback
        self.interpolate = interpolate

        self.useTexSubImage2D = True

        self._texID = None
        self._video_stream = cv2.VideoCapture()

        self._reset()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        self.aspectRatio = self._video_width / float(self._video_height)
        # size
        if size is None:
            self.size = numpy.array([self._video_width, self._video_height], float)
        elif isinstance(size, (int, float, long)):
            # treat size as desired width, and calc a height
            # that maintains the aspect ratio of the video.
            self.size = numpy.array([size, size / self.aspectRatio], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

    def _reset(self):
        self.duration = None
        self.status = NOT_STARTED
        self._numpy_frame = None
        if self._texID is not None:
            GL.glDeleteTextures(1, self._texID)
            self._texID = None
        # self._video_stream = None
        self._total_frame_count = None
        self._video_width = None
        self._video_height = None
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        self._video_frame_rate = None
        self._inter_frame_interval = None
        self._prev_frame_sec = None
        self._next_frame_sec = None
        self._next_frame_index = None
        self._prev_frame_index = None
        self._video_perc_done = None
        #        self._last_video_flip_time = None
        self._next_frame_displayed = False
        self._video_track_clock = Clock()

        self._audio_stream_clock = Clock()
        self._vlc_instance = None
        self._audio_stream = None
        self._audio_stream_player = None
        self._audio_stream_started = False
        self._audio_stream_event_manager = None

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).
        This form is provided for syntactic consistency with other visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self._unload()
        self._reset()
        if self._no_audio is False:
            self._createAudioStream()

        # Create Video Stream stuff
        self._video_stream.open(filename)
        vfstime = core.getTime()
        while not self._video_stream.isOpened() and core.getTime() - vfstime < 1.0:
            raise RuntimeError("Error when reading image file")

        if not self._video_stream.isOpened():
            raise RuntimeError("Error when reading image file")

        self._total_frame_count = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        self._video_width = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
        self._video_height = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
        self._format = self._video_stream.get(cv2.cv.CV_CAP_PROP_FORMAT)
        # TODO: Read depth from video source
        self._video_frame_depth = 3

        cv_fps = self._video_stream.get(cv2.cv.CV_CAP_PROP_FPS)

        self._video_frame_rate = cv_fps

        self._inter_frame_interval = 1.0 / self._video_frame_rate

        # Create a numpy array that can hold one video frame, as returned by cv2.
        self._numpy_frame = numpy.zeros(
            (self._video_height, self._video_width, self._video_frame_depth), dtype=numpy.uint8
        )
        self.duration = self._total_frame_count * self._inter_frame_interval
        self.status = NOT_STARTED

        self.filename = filename
        logAttrib(self, log, "movie", filename)

    def _createAudioStream(self):
        """
        Create the audio stream player for the video using pyvlc.
        """
        if not os.access(self.filename, os.R_OK):
            raise RuntimeError("Error: %s file not readable" % self.filename)
        self._vlc_instance = vlc.Instance("--novideo")
        try:
            self._audio_stream = self._vlc_instance.media_new(self.filename)
        except NameError:
            raise ImportError("NameError: %s vs LibVLC %s" % (vlc.__version__, vlc.libvlc_get_version()))
        self._audio_stream_player = self._vlc_instance.media_player_new()
        self._audio_stream_player.set_media(self._audio_stream)
        self._audio_stream_event_manager = self._audio_stream_player.event_manager()
        self._audio_stream_event_manager.event_attach(
            vlc.EventType.MediaPlayerTimeChanged, _audioTimeCallback, weakref.ref(self), self._audio_stream_player
        )
        self._audio_stream_event_manager.event_attach(
            vlc.EventType.MediaPlayerEndReached, _audioEndCallback, weakref.ref(self)
        )

    def _releaseeAudioStream(self):
        if self._audio_stream_player:
            self._audio_stream_player.stop()

        if self._audio_stream_event_manager:
            self._audio_stream_event_manager.event_detach(vlc.EventType.MediaPlayerTimeChanged)
            self._audio_stream_event_manager.event_detach(vlc.EventType.MediaPlayerEndReached)

        if self._audio_stream:
            self._audio_stream.release()

        if self._vlc_instance:
            self._vlc_instance.vlm_release()
            self._vlc_instance.release()

        self._audio_stream = None
        self._audio_stream_event_manager = None
        self._audio_stream_player = None
        self._vlc_instance = None

    def _flipCallback(self):
        self._next_frame_displayed = True

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        cstat = self.status
        if cstat != PLAYING:
            self.status = PLAYING

            if self._next_frame_sec is None:
                # movie has no current position, need to reset the clock
                # to zero in order to have the timing logic work
                # otherwise the video stream would skip frames until the
                # time since creating the movie object has passed
                self._video_track_clock.reset()

            if cstat == PAUSED:
                # toggle audio pause
                if self._audio_stream_player:
                    self._audio_stream_player.pause()
                    self._audio_stream_clock.reset(-self._audio_stream_player.get_time() / 1000.0)
                if self._next_frame_sec:
                    self._video_track_clock.reset(-self._next_frame_sec)
            else:
                nt = self._getNextFrame()
                self._video_track_clock.reset(-nt)

            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name), level=logging.EXP, obj=self)

            self._updateFrameTexture()
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audio_stream_player and self._audio_stream_player.can_pause():
                self._audio_stream_player.pause()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" % (self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" % (self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """
        Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again. Use pause() if you may need to restart the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name), level=logging.EXP, obj=self)

    def seek(self, timestamp, log=True):
        """ Seek to a particular timestamp in the movie.
        """
        if self.status in [PLAYING, PAUSED]:
            if timestamp > 0.0:
                if self.status == PLAYING:
                    self.pause()

                if self._audio_stream_player and self._audio_stream_player.is_seekable():
                    self._audio_stream_player.set_time(int(timestamp * 1000.0))
                    self._audio_stream_clock.reset(-timestamp)

                self._video_stream.set(cv2.cv.CV_CAP_PROP_POS_MSEC, timestamp * 1000.0)
                self._video_track_clock.reset(-timestamp)
                self._next_frame_index = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
                self._next_frame_sec = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.0
            else:
                self.stop()
                self.loadMovie(self.filename)
            if log:
                logAttrib(self, log, "seek", timestamp)

            self.play()

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally (left-to-right).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, "flipHoriz")

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically (top-to-bottom).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, "flipVert")

    def setVolume(self, v):
        """
        Set the audio track volume. 0 = mute, 100 = 0 dB. float values
        between 0.0 and 1.0 are also accepted, and scaled to an int between 0
        and 100.
        """
        if self._audio_stream_player:
            if 0.0 <= v <= 1.0 and isinstance(v, (float,)):
                v = int(v * 100)
            else:
                v = int(v)
            self.volume = v
            if self._audio_stream_player:
                self._audio_stream_player.audio_set_volume(v)

    def getVolume(self):
        """
        Returns the current movie audio volume. 0 is no audio, 100 is max audio
        volume.
        """
        if self._audio_stream_player:
            self.volume = self._audio_stream_player.audio_get_volume()
        return self.volume

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._video_frame_rate

    def getTimeToNextFrameDraw(self):
        """
        Get the number of sec.msec remaining until the next movie video frame
        should be drawn.
        """
        try:
            rt = (self._next_frame_sec - 1.0 / self._retracerate) - self._video_track_clock.getTime()
            return rt
        except:
            logging.warning("MovieStim2.getTimeToNextFrameDraw failed.")
            return 0.0

    def shouldDrawVideoFrame(self):
        """
        True if the next movie frame should be drawn, False if it is not yet
        time. See getTimeToNextFrameDraw().
        """
        return self.getTimeToNextFrameDraw() <= 0.0

    def getCurrentFrameNumber(self):
        """
        Get the current movie frame number. The first frame number in a file is
        1.
        """
        return self._next_frame_index

    def getCurrentFrameTime(self):
        """
        Get the time that the movie file specified the current video frame as
        having.
        """
        return self._next_frame_sec

    def getPercentageComplete(self):
        """
        Provides a value between 0.0 and 100.0, indicating the amount of the
        movie that has been already played.
        """
        return self._video_perc_done

    def isCurrentFrameVisible(self):
        """
        The current video frame goes through two stages; the first being when
        the movie frame is being loaded, but is not visible on the display.
        The second is when the frame has actually been presented on the display.
        Returns False if the frame is in the first stage, True when in stage 2.
        """
        return self._next_frame_displayed

    def _getNextFrame(self):
        # get next frame info ( do not decode frame yet)
        while self.status == PLAYING:
            if self._video_stream.grab():
                self._prev_frame_index = self._next_frame_index
                self._prev_frame_sec = self._next_frame_sec
                self._next_frame_index = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
                self._next_frame_sec = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.0
                self._video_perc_done = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO)
                self._next_frame_displayed = False
                if self.getTimeToNextFrameDraw() > -self._inter_frame_interval / 2.0:
                    return self._next_frame_sec
                else:
                    self.nDroppedFrames += 1
                    if self.nDroppedFrames < reportNDroppedFrames:
                        logging.warning("MovieStim2 dropping video frame index: %d" % (self._next_frame_index))
                    elif self.nDroppedFrames == reportNDroppedFrames:
                        logging.warning(
                            "Multiple Movie frames have " "occurred - I'll stop bothering you " "about them!"
                        )
            else:
                self._onEos()
                break

    def _updateFrameTexture(self):
        # decode frame into np array and move to opengl tex
        ret, self._numpy_frame = self._video_stream.retrieve()
        if ret:
            useSubTex = self.useTexSubImage2D
            if self._texID is None:
                self._texID = GL.GLuint()
                GL.glGenTextures(1, ctypes.byref(self._texID))
                useSubTex = False

            # bind the texture in openGL
            GL.glEnable(GL.GL_TEXTURE_2D)
            GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)  # bind that name to the target
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT
            )  # makes the texture map wrap (this is actually default anyway)
            GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)  # data from PIL/numpy is packed, but default for GL is 4 bytes
            # important if using bits++ because GL_LINEAR
            # sometimes extrapolates to pixel vals outside range
            if self.interpolate:
                GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
                if self.useShaders:  # GL_GENERATE_MIPMAP was only available from OpenGL 1.4
                    GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
                    GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_GENERATE_MIPMAP, GL.GL_TRUE)
                    if useSubTex is False:
                        GL.glTexImage2D(
                            GL.GL_TEXTURE_2D,
                            0,
                            pyglet.gl.GL_RGB8,
                            self._numpy_frame.shape[1],
                            self._numpy_frame.shape[0],
                            0,
                            GL.GL_BGR,
                            GL.GL_UNSIGNED_BYTE,
                            self._numpy_frame.ctypes,
                        )
                    else:
                        GL.glTexSubImage2D(
                            GL.GL_TEXTURE_2D,
                            0,
                            0,
                            0,
                            self._numpy_frame.shape[1],
                            self._numpy_frame.shape[0],
                            GL.GL_BGR,
                            GL.GL_UNSIGNED_BYTE,
                            self._numpy_frame.ctypes,
                        )

                else:  # use glu
                    GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_NEAREST)
                    GL.gluBuild2DMipmaps(
                        GL.GL_TEXTURE_2D,
                        GL.GL_RGB8,
                        self._numpy_frame.shape[1],
                        self._numpy_frame.shape[0],
                        GL.GL_BGR,
                        GL.GL_UNSIGNED_BYTE,
                        self._numpy_frame.ctypes,
                    )
            else:
                GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
                GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
                if useSubTex is False:
                    GL.glTexImage2D(
                        GL.GL_TEXTURE_2D,
                        0,
                        GL.GL_RGB8,
                        self._numpy_frame.shape[1],
                        self._numpy_frame.shape[0],
                        0,
                        GL.GL_BGR,
                        GL.GL_UNSIGNED_BYTE,
                        self._numpy_frame.ctypes,
                    )
                else:
                    GL.glTexSubImage2D(
                        GL.GL_TEXTURE_2D,
                        0,
                        0,
                        0,
                        self._numpy_frame.shape[1],
                        self._numpy_frame.shape[0],
                        GL.GL_BGR,
                        GL.GL_UNSIGNED_BYTE,
                        self._numpy_frame.ctypes,
                    )
            GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_MODULATE)  # ?? do we need this - think not!
        else:
            raise RuntimeError("Could not load video frame data.")

    def _getVideoAudioTimeDiff(self):
        if self._audio_stream_started is False:
            return 0
        return self.getCurrentFrameTime() - self._getAudioStreamTime()

    def draw(self, win=None):
        """
        Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear"""
        if self.status == NOT_STARTED or (self.status == FINISHED and self.loop):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        return_next_frame_index = False
        if win is None:
            win = self.win
        self._selectWindow(win)

        if (
            self._no_audio is False
            and not self._audio_stream_started
            and self._video_track_clock.getTime() >= self._av_stream_time_offset
        ):
            self._startAudio()

        if self._next_frame_displayed:
            if self._getVideoAudioTimeDiff() > self._inter_frame_interval:
                self._video_track_clock.reset(-self._next_frame_sec)
            else:
                self._getNextFrame()

        if self.shouldDrawVideoFrame() and not self._next_frame_displayed:
            self._updateFrameTexture()
            return_next_frame_index = True

        # make sure that textures are on and GL_TEXTURE0 is active
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glColor4f(1, 1, 1, self.opacity)  # sets opacity (1,1,1 = RGB placeholder)
        GL.glPushMatrix()
        self.win.setScale("pix")
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        array = (GL.GLfloat * 32)(
            1,
            1,  # texture coords
            vertsPix[0, 0],
            vertsPix[0, 1],
            0.0,  # vertex
            0,
            1,
            vertsPix[1, 0],
            vertsPix[1, 1],
            0.0,
            0,
            0,
            vertsPix[2, 0],
            vertsPix[2, 1],
            0.0,
            1,
            0,
            vertsPix[3, 0],
            vertsPix[3, 1],
            0.0,
        )
        GL.glPushAttrib(GL.GL_ENABLE_BIT)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glPushClientAttrib(GL.GL_CLIENT_VERTEX_ARRAY_BIT)
        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        # GL.glActiveTexture(0)
        # GL.glDisable(GL.GL_TEXTURE_2D)
        if return_next_frame_index:
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def setContrast(self):
        """Not yet implemented for MovieStim"""
        pass

    def _startAudio(self):
        """
        Start the audio playback stream.
        """
        if self._audio_stream_player:
            self._audio_stream_started = True

            self._audio_stream_player.play()
            self._audio_stream_clock.reset(-self._audio_stream_player.get_time() / 1000.0)

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        # if self._video_stream:
        self._video_stream.release()
        # self._video_stream = None
        self._numpy_frame = None

        self._releaseeAudioStream()

        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % (self.name), level=logging.EXP, obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, "autoDraw", val, log)
예제 #6
0
class MovieStim2(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """
    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0,0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0,1.0,1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None
        ):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim2, self).__init__(win, units=units, name=name,
                                         autoLog=False)
        #check for pyglet
        if win.winType != 'pyglet':
            logging.error('Movie stimuli can only be used with a pyglet window')
            core.quit()
        self._retracerate = win._monitorFrameRate
        if self._retracerate is None:
            self._retracerate = win.getActualFrameRate()
        if self._retracerate is None:
            logging.warning("FrameRate could not be supplied by psychopy; defaulting to 60.0")
            self._retracerate = 60.0
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.volume = volume
        self._av_stream_time_offset = 0.145
        self._no_audio = noAudio
        self._requested_fps = fps
        self._vframe_callback = vframe_callback
        self._reset()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        self.aspectRatio = self._video_width/float(self._video_height)
        #size
        if size is None:
            self.size = numpy.array([self._video_width, self._video_height],
                                   float)
        elif isinstance(size, (int, float, long)):
            # treat size as desired width, and calc a height
            # that maintains the aspect ratio of the video.
            self.size = numpy.array([size, size/self.aspectRatio], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        #set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" %(self.name, str(self)))

    def _reset(self):
        self.duration = None
        self.status = NOT_STARTED
        self._numpy_frame = None
        self._frame_texture = None
        self._frame_data_interface = None
        self._video_stream = None
        self._total_frame_count = None
        self._video_width = None
        self._video_height = None
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        self._video_frame_rate = None
        self._inter_frame_interval = None
        self._prev_frame_sec = None
        self._next_frame_sec = None
        self._next_frame_index = None
        self._prev_frame_index = None
        self._video_perc_done = None
        self._last_video_flip_time = None
        self._next_frame_displayed = False
        self._video_track_clock = Clock()

        self._audio_stream_clock=Clock()
        self._vlc_instance = None
        self._audio_stream = None
        self._audio_stream_player = None
        self._audio_stream_started = False
        self._audio_stream_event_manager=None

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).
        This form is provided for syntactic consistency with other visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self._unload()
        self._reset()
        if self._no_audio is False:
            self._createAudioStream()

        # Create Video Stream stuff
        self._video_stream = cv2.VideoCapture()
        self._video_stream.open(filename)
        if not self._video_stream.isOpened():
          raise RuntimeError( "Error when reading image file")

        self._total_frame_count = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        self._video_width = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
        self._video_height = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
        self._format = self._video_stream.get(cv2.cv.CV_CAP_PROP_FORMAT)
        # TODO: Read depth from video source
        self._video_frame_depth = 3

        cv_fps = self._video_stream.get(cv2.cv.CV_CAP_PROP_FPS)
        if self._requested_fps:
            if self._no_audio is False and cv_fps != self._requested_fps:
                self._no_audio = True
                logging.error("MovieStim2 video fps != requested fps. Disabling Audio Stream.")
                logging.flush()

        if self._no_audio and self._requested_fps:
            self._video_frame_rate = self._requested_fps
        else:
            self._video_frame_rate = self._video_stream.get(cv2.cv.CV_CAP_PROP_FPS)

        self._inter_frame_interval = 1.0/self._video_frame_rate

        # Create a numpy array that can hold one video frame, as returned by cv2.
        self._numpy_frame = numpy.zeros((self._video_height,
                                          self._video_width,
                                          self._video_frame_depth),
                                         dtype=numpy.uint8)

        # Uses a preallocated numpy array as the pyglet ImageData data
        self._frame_data_interface = ArrayInterfaceImage(self._numpy_frame,
                                                         allow_copy=False,
                                                         rectangle=True,
                                                         force_rectangle=True)
        #frame texture; transformed so it looks right in psychopy
        self._frame_texture = self._frame_data_interface.texture.get_transform(flip_x=not self.flipHoriz,
                                                    flip_y=not self.flipVert)

        self.duration = self._total_frame_count * self._inter_frame_interval
        self.status = NOT_STARTED

        self.filename = filename
        logAttrib(self, log, 'movie', filename)

    def _createAudioStream(self):
        """
        Create the audio stream player for the video using pyvlc.
        """
        if not os.access(self.filename, os.R_OK):
            raise RuntimeError('Error: %s file not readable' % self.filename)
        self._vlc_instance = vlc.Instance('--novideo')
        try:
            self._audio_stream = self._vlc_instance.media_new(self.filename)
        except NameError:
            raise ImportError('NameError: %s vs LibVLC %s' % (vlc.__version__,
                                                       vlc.libvlc_get_version()))
        self._audio_stream_player = self._vlc_instance.media_player_new()
        self._audio_stream_player.set_media(self._audio_stream)
        self._audio_stream_event_manager = self._audio_stream_player.event_manager()
        self._audio_stream_event_manager.event_attach(vlc.EventType.MediaPlayerTimeChanged, self._audio_time_callback, self._audio_stream_player)
        self._audio_stream_event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, self._audio_end_callback)

    def _releaseeAudioStream(self):
        if self._audio_stream_player:
            self._audio_stream_player.stop()

        if self._audio_stream_event_manager:
            self._audio_stream_event_manager.event_detach(vlc.EventType.MediaPlayerTimeChanged)
            self._audio_stream_event_manager.event_detach(vlc.EventType.MediaPlayerEndReached)

        if self._audio_stream:
            self._audio_stream.release()

        if self._vlc_instance:
            self._vlc_instance.vlm_release()
            self._vlc_instance.release()

        self._audio_stream = None
        self._audio_stream_event_manager = None
        self._audio_stream_player = None
        self._vlc_instance = None

    def _flipCallback(self):
        import inspect
        flip_time = inspect.currentframe().f_back.f_locals.get('now')
        if PRINT_FRAME_FLIP_TIMES:
            if self._last_video_flip_time is None:
                self._last_video_flip_time=flip_time
            print 'Frame %d\t%.4f\t%.4f'%(self.getCurrentFrameIndex(), flip_time,
                                          flip_time-self._last_video_flip_time)
        if flip_time is None:
            raise RuntimeError("Movie2._flipCallback: Can not access the currect flip time.")
        self._last_video_flip_time = flip_time
        self._next_frame_displayed = True

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        cstat = self.status
        if cstat != PLAYING:
            self.status = PLAYING

            if self._next_frame_sec is None:
                # movie has no current position, need to reset the clock
                # to zero in order to have the timing logic work
                # otherwise the video stream would skip frames until the
                # time since creating the movie object has passed
                self._video_track_clock.reset()

            if cstat == PAUSED:
                # toggle audio pause
                if self._audio_stream_player:
                    self._audio_stream_player.pause()
                    self._audio_stream_clock.reset(-self._audio_stream_player.get_time()/1000.0)
                if self._next_frame_sec:
                    self._video_track_clock.reset(-self._next_frame_sec)
            else:
                nt = self._getNextFrame()
                self._video_track_clock.reset(-nt)

            if log and self.autoLog:
                    self.win.logOnFlip("Set %s playing" %(self.name),
                                       level=logging.EXP, obj=self)

            self._updateFrameTexture()
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audio_stream_player and self._audio_stream_player.can_pause():
                self._audio_stream_player.pause()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %(self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %(self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """
        Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again. Use pause() if you may need to restart the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" %(self.name),
                    level=logging.EXP,obj=self)


    def seek(self, timestamp, log=True):
        """ Seek to a particular timestamp in the movie.
        """
        if self.status in [PLAYING, PAUSED]:
            if timestamp > 0.0:
                if self.status == PLAYING:
                    self.pause()

                if self._audio_stream_player and self._audio_stream_player.is_seekable():
                    self._audio_stream_player.set_time(int(timestamp*1000.0))
                    self._audio_stream_clock.reset(-timestamp)

                self._video_stream.set(cv2.cv.CV_CAP_PROP_POS_MSEC,
                                        timestamp*1000.0)
                self._video_track_clock.reset(-timestamp)
                self._next_frame_index = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
                self._next_frame_sec = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_MSEC)/1000.0
            else:
                self.stop()
                self.loadMovie(self.filename)
            if log:
                logAttrib(self, log, 'seek', timestamp)

            self.play()

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally (left-to-right).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically (top-to-bottom).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, 'flipVert')

    def setVolume(self, v):
        """
        Set the audio track volume. 0 = mute, 100 = 0 dB. float values
        between 0.0 and 1.0 are also accepted, and scaled to an int between 0
        and 100.
        """
        if self._audio_stream_player:
            if 0.0 <= v <= 1.0 and isinstance(v, (float,)):
                v = int(v*100)
            else:
                v = int(v)
            self.volume = v
            self._audio_stream_player.audio_set_volume(v)

    def getVolume(self):
        """
        Returns the current movie audio volume. 0 is no audio, 100 is max audio
        volume.
        """
        if self._audio_stream_player:
            self.volume = self._audio_stream_player.audio_get_volume()
        return self.volume

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._video_frame_rate

    def setFPS(self, fps):
        """
        If the movie was created with noAudio = True kwarg, then the movie
        playback speed can be changed from the original frame rate. For example,
        if the movie being played has 30 fps and you would like to play it at 2x
        normal speed, setFPS(60) will do that.
        """
        if self._no_audio:
            self._requested_fps = fps
            self._video_frame_rate = fps
            self._inter_frame_interval = 1.0/self._video_frame_rate
            return
        raise ValueError("Error calling movie.setFPS(): MovieStim must be created with kwarg noAudio=True.")

    def getTimeToNextFrameDraw(self):
        """
        Get the number of sec.msec remaining until the next movie video frame
        should be drawn.
        """
#        rt = (self._next_frame_sec - 1.0/self._retracerate) - self._video_track_clock.getTime()
        try:
            rt = (self._next_frame_sec - 1.0/self._retracerate) - self._video_track_clock.getTime()
            #print "getTimeToNextFrameDraw: ",self.getCurrentFrameNumber(), rt
            return rt
        except:
            #import traceback
            #traceback.print_exc()
            logging.WARNING("MovieStim2.getTimeToNextFrameDraw failed.")
            return 0.0

    def shouldDrawVideoFrame(self):
        """
        True if the next movie frame should be drawn, False if it is not yet
        time. See getTimeToNextFrameDraw().
        """
        return self.getTimeToNextFrameDraw() <= 0.0

    def getCurrentFrameNumber(self):
        """
        Get the current movie frame number. The first frame number in a file is
        1.
        """
        return self._next_frame_index

    def getCurrentFrameTime(self):
        """
        Get the time that the movie file specified the current video frame as
        having.
        """
        return self._next_frame_sec

    def getPercentageComplete(self):
        """
        Provides a value between 0.0 and 100.0, indicating the amount of the
        movie that has been already played.
        """
        return self._video_perc_done

    def isCurrentFrameVisible(self):
        """
        The current video frame goes through two stages; the first being when
        the movie frame is being loaded, but is not visible on the display.
        The second is when the frame has actually been presented on the display.
        Returns False if the frame is in the first stage, True when in stage 2.
        """
        return self._next_frame_displayed

    def _getNextFrame(self):
        # get next frame info ( do not decode frame yet)
        while self.status == PLAYING:
            if self._video_stream.grab():
                self._prev_frame_index = self._next_frame_index
                self._prev_frame_sec = self._next_frame_sec
                self._next_frame_index = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
                if self._requested_fps and self._no_audio:
                    self._next_frame_sec = self._next_frame_index/self._requested_fps#*self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_MSEC)/1000.0
                else:
                    self._next_frame_sec = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_MSEC)/1000.0
                self._video_perc_done = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO)
                self._next_frame_displayed = False
                if self.getTimeToNextFrameDraw() > -self._inter_frame_interval/2.0:
                    return self._next_frame_sec
                else:
                    self.nDroppedFrames += 1
                    if self.nDroppedFrames < reportNDroppedFrames:
                        logging.warning("MovieStim2 dropping video frame index: %d"%(self._next_frame_index))
                    elif self.nDroppedFrames == reportNDroppedFrames:
                        logging.warning("Multiple Movie frames have "
                                        "occurred - I'll stop bothering you "
                                        "about them!")
            else:
                self._onEos()
                break


    def _updateFrameTexture(self):
        # decode frame into np array and move to opengl tex
        ret, f = self._video_stream.retrieve()
        if ret:
            frame_array = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
            if callable(self._vframe_callback):
                try:
                    frame_array = self._vframe_callback(self._next_frame_index, frame_array)
                except:
                    print "MovieStim2 Error: vframe_callback raised an exception. Using original frame data."
                    import traceback
                    traceback.print_exc()
            #self._numpy_frame[:] = f[...,::-1]
            numpy.copyto(self._numpy_frame, frame_array)
            self._frame_data_interface.dirty()
        else:
            raise RuntimeError("Could not load video frame data.")

    def _getVideoAudioTimeDiff(self):
        if self._audio_stream_started is False:
            return 0
        return self.getCurrentFrameTime()-self._getAudioStreamTime()

    def draw(self, win=None):
        """
        Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear"""
        if self.status==NOT_STARTED or (self.status==FINISHED and self.loop):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        return_next_frame_index = False
        if win is None:
            win = self.win
        self._selectWindow(win)

        if self._no_audio is False and not self._audio_stream_started and self._video_track_clock.getTime() >= self._av_stream_time_offset:
            self._startAudio()

        if self._next_frame_displayed:
            if self._getVideoAudioTimeDiff() > self._inter_frame_interval:
                self._video_track_clock.reset(-self._next_frame_sec)
            else:
                self._getNextFrame()

        if self.shouldDrawVideoFrame() and not self._next_frame_displayed:
            self._updateFrameTexture()
            return_next_frame_index = True

        #make sure that textures are on and GL_TEXTURE0 is active
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glColor4f(1, 1, 1, self.opacity)  # sets opacity (1,1,1 = RGB placeholder)
        GL.glPushMatrix()
        self.win.setScale('pix')
        #move to centre of stimulus and rotate
        vertsPix = self.verticesPix
        t=self._frame_texture.tex_coords
        array = (GL.GLfloat * 32)(
             t[0],  t[1],
             vertsPix[0,0], vertsPix[0,1],    0.,  #vertex
             t[3],  t[4],
             vertsPix[1,0], vertsPix[1,1],    0.,
             t[6],  t[7],
             vertsPix[2,0], vertsPix[2,1],    0.,
             t[9],  t[10],
             vertsPix[3,0], vertsPix[3,1],    0.,
             )
        GL.glPushAttrib(GL.GL_ENABLE_BIT)
        GL.glEnable(self._frame_texture.target)
        GL.glBindTexture(self._frame_texture.target, self._frame_texture.id)
        GL.glPushClientAttrib(GL.GL_CLIENT_VERTEX_ARRAY_BIT)
        #2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        if return_next_frame_index:
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def setContrast(self):
        """Not yet implemented for MovieStim"""
        pass

    def _startAudio(self):
        """
        Start the audio playback stream.
        """
        if self._audio_stream_player:
            self._audio_stream_started = True

            self._audio_stream_player.play()
            self._audio_stream_clock.reset(-self._audio_stream_player.get_time()/1000.0)

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _audio_time_callback(self, event, player):
        """
        Called by VLC every few hundred msec providing the current audio track
        time. This info is used to pace the display of video frames read using
        cv2.
        """
        self._audio_stream_clock.reset(-event.u.new_time/1000.0)

    def _audio_end_callback(self, event):
        """
        Called by VLC when the audio track ends. Right now, when this is called
        the video is stopped.
        """
        self._onEos()

    def _unload(self):
        if self._video_stream:
            self._video_stream.release()
        self._video_stream = None
        self._frame_data_interface = None
        self._numpy_frame = None

        self._releaseeAudioStream()

        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" %(self.name),
                level=logging.EXP,obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        #add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
예제 #7
0
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win,
                                         units=units,
                                         name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0 / retraceRate
        self.filename = pathToString(filename)
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
예제 #8
0
class MovieStim3(BaseVisualStim, ContainerMixin, TextureMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win,
                                         units=units,
                                         name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0 / retraceRate
        self.filename = pathToString(filename)
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = None
        self._texID = None
        self.status = NOT_STARTED

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other visual
        stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary

        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        filename = pathToString(filename)
        self.reset()  # set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio=(1 - self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                sound = self.sound
                try:
                    self._audioStream = sound.Sound(
                        self._mov.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                except:
                    # JWE added this as a patch for a moviepy oddity where the
                    # duration is inflated in the saved file causes the
                    # audioclip to be the wrong length, so round down and it
                    # should work
                    jwe_tmp = self._mov.subclip(0, round(self._mov.duration))
                    self._audioStream = sound.Sound(
                        jwe_tmp.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                    del (jwe_tmp)
            else:  # make sure we set to None (in case prev clip had audio)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" % filename)
        # mov has attributes:
        # size, duration, fps
        # mov.audio has attributes
        # duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = 1.0 / self._mov.fps
        self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if status != PLAYING:
            self.status = PLAYING  #moved this to get better audio behavior - JK
            #Added extra check to prevent audio doubling - JK
            if self._audioStream is not None and self._audioStream.status is not PLAYING:
                self._audioStream.play()
            if status == PAUSED:
                if self.getCurrentFrameTime(
                ) < 0:  #Check for valid timestamp, correct if needed -JK
                    self._audioSeek(0)
                else:
                    self._audioSeek(self.getCurrentFrameTime())
            self._videoClock.reset(-self.getCurrentFrameTime())
            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP,
                                   obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                if prefs.hardware['audioLib'] == ['sounddevice']:
                    self._audioStream.pause(
                    )  #sounddevice has a "pause" function -JK
                else:
                    self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" % (self.name),
                                   level=logging.EXP,
                                   obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" % (self.name),
                               level=logging.EXP,
                               obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted -
        it must be loaded again. Use pause() if you may need to restart
        the movie.
        """
        if self.status != STOPPED:
            self._unload()
            self.reset()
            self.status = STOPPED  # set status to STOPPED after _unload
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP,
                                   obj=self)

    def setVolume(self, volume):
        pass  # to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')
        self._needVertexUpdate = True

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = newVal
        logAttrib(self, log, 'flipVert')
        self._needVertexUpdate = True

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._mov.fps

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._nextFrameT - self._frameInterval

    def _updateFrameTexture(self):
        if self._nextFrameT is None or self._nextFrameT < 0:
            # movie has no current position (or invalid position -JK),
            # need to reset the clock to zero in order to have the
            # timing logic work otherwise the video stream would skip
            # frames until the time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0.0

        # only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif self._numpyFrame is not None:
            if self._nextFrameT > (self._videoClock.getTime() -
                                   self._retraceInterval / 2.0):
                return None
        try:
            self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        except OSError:
            if self.autoLog:
                logging.warning(
                    "Frame {} not found, moving one frame and trying again".
                    format(self._nextFrameT),
                    obj=self)
            self._nextFrameT += self._frameInterval
            self._updateFrameTexture()
        useSubTex = self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex = False

        # bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        # bind that name to the target
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        # makes the texture map wrap (this is actually default anyway)
        GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP)
        GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP)
        # data from PIL/numpy is packed, but default for GL is 4 bytes
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
        # important if using bits++ because GL_LINEAR
        # sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                               GL.GL_LINEAR)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                               GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0, GL.GL_RGB,
                                GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0], GL.GL_RGB,
                                   GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        else:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                               GL.GL_NEAREST)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                               GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0, GL.GL_BGR,
                                GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0], GL.GL_BGR,
                                   GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                     GL.GL_MODULATE)  # ?? do we need this - think not!

        if self.status == PLAYING:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current
        position in the movie will be determined automatically.

        This method should be called on every frame that the movie is
        meant to appear.
        """

        if (self.status == NOT_STARTED
                or (self.status == FINISHED and self.loop)):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture()  # will check if it's needed

        # scale the drawing frame and get to centre of field
        GL.glPushMatrix()  # push before drawing, pop after
        # push the data for client attributes
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)

        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        # bind textures
        GL.glActiveTexture(GL.GL_TEXTURE1)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)

        array = (GL.GLfloat * 32)(
            1,
            1,  # texture coords
            vertsPix[0, 0],
            vertsPix[0, 1],
            0.,  # vertex
            0,
            1,
            vertsPix[1, 0],
            vertsPix[1, 1],
            0.,
            0,
            0,
            vertsPix[2, 0],
            vertsPix[2, 1],
            0.,
            1,
            0,
            vertsPix[3, 0],
            vertsPix[3, 1],
            0.,
        )

        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopMatrix()
        # unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)  # implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        # video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        sound = self.sound
        if self._audioStream is None:
            return  # do nothing
        #check if sounddevice  is being used. If so we can use seek. If not we have to
        #reload the audio stream and begin at the new loc
        if prefs.hardware['audioLib'] == ['sounddevice']:
            self._audioStream.seek(t)
        else:
            self._audioStream.stop()
            sndArray = self._mov.audio.to_soundarray()
            startIndex = int(t * self._mov.audio.fps)
            self._audioStream = sound.Sound(sndArray[startIndex:, :],
                                            sampleRate=self._mov.audio.fps)
            if self.status != PAUSED:  #Allows for seeking while paused - JK
                self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        # remove textures from graphics card to prevent crash
        self.clearTextures()
        if self._mov is not None:
            self._mov.close()
        self._mov = None
        self._numpyFrame = None
        if self._audioStream is not None:
            self._audioStream.stop()
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP,
                               obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
예제 #9
0
파일: movie2.py 프로젝트: unshur/psychopy
class MovieStim2(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """
    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0,0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0,1.0,1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None
        ):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim2, self).__init__(win, units=units, name=name,
                                         autoLog=False)
        #check for pyglet
        if win.winType != 'pyglet':
            logging.error('Movie stimuli can only be used with a pyglet window')
            core.quit()
        self._retracerate = win._monitorFrameRate
        if self._retracerate is None:
            self._retracerate = win.getActualFrameRate()
        if self._retracerate is None:
            logging.warning("FrameRate could not be supplied by psychopy; defaulting to 60.0")
            self._retracerate = 60.0
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.volume = volume
        self._av_stream_time_offset = 0.145
        self._no_audio = noAudio
        if self._no_audio:
            self._requested_fps = fps
        else:
            self._requested_fps = None
        self._vframe_callback = vframe_callback
        self._reset()
        self.loadMovie(self.filename)
        self.setVolume(volume)

        self.aspectRatio = self._video_width/float(self._video_height)
        #size
        if size is None:
            self.size = numpy.array([self._video_width, self._video_height],
                                   float)
        elif isinstance(size, (int, float, long)):
            # treat size as desired width, and calc a height
            # that maintains the aspect ratio of the video.
            self.size = numpy.array([size, size/self.aspectRatio], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        #set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" %(self.name, str(self)))

    def _reset(self):
        self.duration = None
        self.status = NOT_STARTED
        self._numpy_frame = None
        self._frame_texture = None
        self._frame_data_interface = None
        self._video_stream = None
        self._total_frame_count = None
        self._video_width = None
        self._video_height = None
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        self._video_frame_rate = None
        self._inter_frame_interval = None
        self._prev_frame_sec = None
        self._next_frame_sec = None
        self._next_frame_index = None
        self._prev_frame_index = None
        self._video_perc_done = None
        self._last_video_flip_time = None
        self._next_frame_displayed = False
        self._video_track_clock = Clock()

        self._audio_stream_clock=Clock()
        self._vlc_instance = None
        self._audio_stream = None
        self._audio_stream_player = None
        self._audio_stream_started = False
        self._audio_stream_event_manager=None

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).
        This form is provided for syntactic consistency with other visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self._unload()
        self._reset()
        if self._no_audio is False:
            self._createAudioStream()

        # Create Video Stream stuff
        self._video_stream = cv2.VideoCapture()
        self._video_stream.open(filename)
        if not self._video_stream.isOpened():
          raise RuntimeError( "Error when reading image file")

        self._total_frame_count = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        self._video_width = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
        self._video_height = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
        self._format = self._video_stream.get(cv2.cv.CV_CAP_PROP_FORMAT)
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        if self._no_audio:
            self._video_frame_rate = self._requested_fps
        else:
            self._video_frame_rate = self._video_stream.get(cv2.cv.CV_CAP_PROP_FPS)
        self._inter_frame_interval = 1.0/self._video_frame_rate

        # Create a numpy array that can hold one video frame, as returned by cv2.
        self._numpy_frame = numpy.zeros((self._video_height,
                                          self._video_width,
                                          self._video_frame_depth),
                                         dtype=numpy.uint8)

        # Uses a preallocated numpy array as the pyglet ImageData data
        self._frame_data_interface = ArrayInterfaceImage(self._numpy_frame,
                                                         allow_copy=False,
                                                         rectangle=True,
                                                         force_rectangle=True)
        #frame texture; transformed so it looks right in psychopy
        self._frame_texture = self._frame_data_interface.texture.get_transform(flip_x=not self.flipHoriz,
                                                    flip_y=not self.flipVert)

        self.duration = self._total_frame_count * self._inter_frame_interval
        self.status = NOT_STARTED

        self.filename = filename
        logAttrib(self, log, 'movie', filename)

    def _createAudioStream(self):
        """
        Create the audio stream player for the video using pyvlc.
        """
        if not os.access(self.filename, os.R_OK):
            raise RuntimeError('Error: %s file not readable' % self.filename)
        self._vlc_instance = vlc.Instance('--novideo')
        try:
            self._audio_stream = self._vlc_instance.media_new(self.filename)
        except NameError:
            raise ImportError('NameError: %s vs LibVLC %s' % (vlc.__version__,
                                                       vlc.libvlc_get_version()))
        self._audio_stream_player = self._vlc_instance.media_player_new()
        self._audio_stream_player.set_media(self._audio_stream)
        self._audio_stream_event_manager = self._audio_stream_player.event_manager()
        self._audio_stream_event_manager.event_attach(vlc.EventType.MediaPlayerTimeChanged, self._audio_time_callback, self._audio_stream_player)
        self._audio_stream_event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, self._audio_end_callback)

    def _releaseeAudioStream(self):
        if self._audio_stream_player:
            self._audio_stream_player.stop()

        if self._audio_stream_event_manager:
            self._audio_stream_event_manager.event_detach(vlc.EventType.MediaPlayerTimeChanged)
            self._audio_stream_event_manager.event_detach(vlc.EventType.MediaPlayerEndReached)

        if self._audio_stream:
            self._audio_stream.release()

        if self._vlc_instance:
            self._vlc_instance.vlm_release()
            self._vlc_instance.release()

        self._audio_stream = None
        self._audio_stream_event_manager = None
        self._audio_stream_player = None
        self._vlc_instance = None

    def _flipCallback(self):
        import inspect
        flip_time = inspect.currentframe().f_back.f_locals.get('now')
        if PRINT_FRAME_FLIP_TIMES:
            if self._last_video_flip_time is None:
                self._last_video_flip_time=flip_time
            print 'Frame %d\t%.4f\t%.4f'%(self.getCurrentFrameIndex(), flip_time,
                                          flip_time-self._last_video_flip_time)
        if flip_time is None:
            raise RuntimeError("Movie2._flipCallback: Can not access the currect flip time.")
        self._last_video_flip_time = flip_time
        self._next_frame_displayed = True

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        if self.status != PLAYING:

            if self.status == PAUSED:
                # toggle audio pause
                if self._audio_stream_player:
                    self._audio_stream_player.pause()
                    self._audio_stream_clock.reset(-self._audio_stream_player.get_time()/1000.0)

            self.status = PLAYING
            if log and self.autoLog:
                    self.win.logOnFlip("Set %s playing" %(self.name),
                                       level=logging.EXP, obj=self)

            self._video_track_clock.reset(-self._getNextFrame())
            self._updateFrameTexture()
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audio_stream_player and self._audio_stream_player.can_pause():
                self._audio_stream_player.pause()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %(self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %(self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """
        Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again. Use pause() if you may need to restart the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" %(self.name),
                    level=logging.EXP,obj=self)


    def seek(self, timestamp, log=True):
        """ Seek to a particular timestamp in the movie.
        """
        if self.status in [PLAYING, PAUSED]:
            if self.status == PLAYING:
                self.pause()
                if self._audio_stream_player and self._audio_stream_player.is_seekable():
                    self._audio_stream_player.set_time(int(timestamp*1000.0))
                self._video_stream.set(cv2.cv.CV_CAP_PROP_POS_MSEC,
                                        timestamp*1000.0)
                self.play()
                if log:
                    logAttrib(self, log, 'seek', timestamp)

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally (left-to-right).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically (top-to-bottom).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, 'flipVert')

    def setVolume(self, v):
        """
        Set the audio track volume. 0 = mute, 100 = 0 dB. float values
        between 0.0 and 1.0 are also accepted, and scaled to an int between 0
        and 100.
        """
        if self._audio_stream_player:
            if 0.0 <= v <= 1.0 and isinstance(v, (float,)):
                v = int(v*100)
            else:
                v = int(v)
            self.volume = v
            self._audio_stream_player.audio_set_volume(v)

    def getVolume(self):
        """
        Returns the current movie audio volume. 0 is no audio, 100 is max audio
        volume.
        """
        if self._audio_stream_player:
            self.volume = self._audio_stream_player.audio_get_volume()
        return self.volume

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._video_frame_rate

    def setFPS(self, fps):
        """
        If the movie was created with noAudio = True kwarg, then the movie
        playback speed can be changed from the original frame rate. For example,
        if the movie being played has 30 fps and you would like to play it at 2x
        normal speed, setFPS(60) will do that.
        """
        if self._no_audio:
            self._requested_fps = fps
            self._video_frame_rate = fps
            self._inter_frame_interval = 1.0/self._video_frame_rate
            return
        raise ValueError("Error calling movie.setFPS(): MovieStim must be created with kwarg noAudio=True.")

    def getTimeToNextFrameDraw(self):
        """
        Get the number of sec.msec remaining until the next movie video frame
        should be drawn.
        """
        try:
            rt = (self._next_frame_sec - 1.0/self._retracerate) - self._video_track_clock.getTime()
            return rt
        except:
            #import traceback
            #traceback.print_exc()
            logging.WARNING("MovieStim2.getTimeToNextFrameDraw failed.")
            return 0.0

    def shouldDrawVideoFrame(self):
        """
        True if the next movie frame should be drawn, False if it is not yet
        time. See getTimeToNextFrameDraw().
        """
        return self.getTimeToNextFrameDraw() <= 0.0

    def getCurrentFrameNumber(self):
        """
        Get the current movie frame number. The first frame number in a file is
        1.
        """
        return self._next_frame_index

    def getCurrentFrameTime(self):
        """
        Get the time that the movie file specified the current video frame as
        having.
        """
        return self._next_frame_sec

    def getPercentageComplete(self):
        """
        Provides a value between 0.0 and 100.0, indicating the amount of the
        movie that has been already played.
        """
        return self._video_perc_done

    def isCurrentFrameVisible(self):
        """
        The current video frame goes through two stages; the first being when
        the movie frame is being loaded, but is not visible on the display.
        The second is when the frame has actually been presented on the display.
        Returns False if the frame is in the first stage, True when in stage 2.
        """
        return self._next_frame_displayed

    def _getNextFrame(self):
        # get next frame info ( do not decode frame yet)
        if self._video_stream.grab():
            self._prev_frame_index = self._next_frame_index
            self._prev_frame_sec = self._next_frame_sec
            self._next_frame_sec = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_MSEC)/1000.0
            self._next_frame_index = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
            self._video_perc_done = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO)
            self._next_frame_displayed = False
            return self._next_frame_sec
        else:
            self._onEos()

    def _updateFrameTexture(self):
        # decode frame into np array and move to opengl tex
        ret, f = self._video_stream.retrieve()
        if ret:
            frame_array = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
            if callable(self._vframe_callback):
                try:
                    frame_array = self._vframe_callback(self._next_frame_index, frame_array)
                except:
                    print "MovieStim2 Error: vframe_callback raised an exception. Using original frame data."
                    import traceback
                    traceback.print_exc()
            #self._numpy_frame[:] = f[...,::-1]
            numpy.copyto(self._numpy_frame, frame_array)
            self._frame_data_interface.dirty()
        else:
            raise RuntimeError("Could not load video frame data.")

    def _getVideoAudioTimeDiff(self):
        if self._audio_stream_started is False:
            return 0
        return self.getCurrentFrameTime()-self._getAudioStreamTime()

    def draw(self, win=None):
        """
        Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear"""
        if self.status != PLAYING:
            return

        return_next_frame_index = False
        if win is None:
            win = self.win
        self._selectWindow(win)

        if self._no_audio is False and not self._audio_stream_started and self._video_track_clock.getTime() >= self._av_stream_time_offset:
            self._startAudio()

        if self._next_frame_displayed:
            if self._getVideoAudioTimeDiff() > self._inter_frame_interval:
                self._video_track_clock.reset(-self._next_frame_sec)
            else:
                self._getNextFrame()

        if self.shouldDrawVideoFrame() and not self._next_frame_displayed:
            self._updateFrameTexture()
            return_next_frame_index = True

        #make sure that textures are on and GL_TEXTURE0 is active
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glColor4f(1, 1, 1, self.opacity)  # sets opacity (1,1,1 = RGB placeholder)
        GL.glPushMatrix()
        self.win.setScale('pix')
        #move to centre of stimulus and rotate
        vertsPix = self.verticesPix
        t=self._frame_texture.tex_coords
        array = (GL.GLfloat * 32)(
             t[0],  t[1],
             vertsPix[0,0], vertsPix[0,1],    0.,  #vertex
             t[3],  t[4],
             vertsPix[1,0], vertsPix[1,1],    0.,
             t[6],  t[7],
             vertsPix[2,0], vertsPix[2,1],    0.,
             t[9],  t[10],
             vertsPix[3,0], vertsPix[3,1],    0.,
             )
        GL.glPushAttrib(GL.GL_ENABLE_BIT)
        GL.glEnable(self._frame_texture.target)
        GL.glBindTexture(self._frame_texture.target, self._frame_texture.id)
        GL.glPushClientAttrib(GL.GL_CLIENT_VERTEX_ARRAY_BIT)
        #2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        if return_next_frame_index:
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def setContrast(self):
        """Not yet implemented for MovieStim"""
        pass

    def _startAudio(self):
        """
        Start the audio playback stream.
        """
        if self._audio_stream_player:
            self._audio_stream_started = True

            self._audio_stream_player.play()
            self._audio_stream_clock.reset(-self._audio_stream_player.get_time()/1000.0)

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _audio_time_callback(self, event, player):
        """
        Called by VLC every few hundred msec providing the current audio track
        time. This info is used to pace the display of video frames read using
        cv2.
        """
        self._audio_stream_clock.reset(-event.u.new_time/1000.0)

    def _audio_end_callback(self, event):
        """
        Called by VLC when the audio track ends. Right now, when this is called
        the video is stopped.
        """
        self._onEos()

    def _unload(self):
        if self._video_stream:
            self._video_stream.release()
        self._video_stream = None
        self._frame_data_interface = None
        self._numpy_frame = None

        self._releaseeAudioStream()

        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" %(self.name),
                level=logging.EXP,obj=self)

    def __del__(self):
        self._unload()
def main():
    # I set up my siplay size and background colour
    DISPSIZE = (1400, 800)
    BGC = (-1, -1, -1)

    # for my game I need to create some variables:
    score = 0
    lives = 3
    level = 1
    mouse_x = 0
    mouse_y = 0

    # I create some objects:
    win = Window(size=DISPSIZE, units='pix', fullscr=False, color=BGC)

    mouse = Mouse(win)

    target = ImageStim(win, 'target.png', size=(420, 420))

    # I will display three text stimuli to the player while playing the game:
    lives_count = TextStim(
        win,
        text=f'Lives = {lives}',
        height=35,
        color=(1, 0.2, 0.6),
        pos=(100, 330),
    )

    score_count = TextStim(win,
                           text=f'Score = {score}',
                           height=35,
                           color=(0.2, 0.2, 0.8),
                           pos=(450, 330))

    level_count = TextStim(win,
                           text=f'Level = {level}',
                           height=35,
                           color=(1, -0.5, 1),
                           pos=(850, 330))

    # I define the messages to show the player the outcome of the game:
    you_have_lost = TextStim(
        win,
        text='Boo! Not a great game, pal... Get it together!',
        height=35,
        color=(0.2, 0.2, 0.8),
        pos=(250, 230))

    you_have_won = TextStim(win,
                            text='Yey! Well done, champ! Time to celebrate!',
                            height=35,
                            color=(0.2, 0.2, 0.8),
                            pos=(250, 230))

    # These are the images I use for the winning and loosing scenarios:
    looser = ImageStim(win, 'failed.jpg', pos=(0, -100), size=(420, 420))
    winner = ImageStim(win, 'tiny_trash.jpg', pos=(0, -100), size=(420, 420))

    # I introduce this dialog to save the user's ID:
    user_id_dialog = gui.Dlg(title="Target Game")
    user_id_dialog.addText('Please write your subject ID: a 4-digit code')
    user_id_dialog.addField('Subject ID:')
    ok_data = user_id_dialog.show()  # show dialog and wait for OK or Cancel

    if not user_id_dialog.OK:
        print('user cancelled')

    # NOW THE GAME WILL START:

    # If enabled, intro will play:
    enable_intro = True

    if enable_intro:
        show_intro(win)

    # We create this list to save our results into
    target_hits_per_level = [
        [],
        [],
        [],
        [],
    ]

    move_target_at_random_pos(
        target)  # first the target is shown on the screen

    lives_timer = CountdownTimer(
        5)  # Level 1 starts with 5 sec to hit the target
    mouse_click_clock = Clock()
    reaction_time_clock = Clock()
    change_target = False

    while level < 4 and lives > 0:
        target.draw()
        target_x, target_y = target.pos
        lives_count.draw()
        score_count.draw()
        level_count.draw()

        win.flip()

        keys_pressed = getKeys()
        if 'q' in keys_pressed:
            break
        mouse_is_pressed = mouse.getPressed()[0] == True

        mouse_x, mouse_y = mouse.getPos()
        level_count.setText(f'Level = {level}')

        #if the player does not click, the target moves and the player looses a life
        if lives_timer.getTime() <= 0:
            lives -= 1
            lives_count.setText(f'Lives = {lives}')
            mouse_in_target = None
            mouse_in_target_x = None
            mouse_in_target_y = None
            change_target = True

        # Check for a mouse click every 0.2s, so that we don't accept more than 1
        # press on mouse hold
        if mouse_is_pressed and mouse_click_clock.getTime() > 0.2:
            mouse_click_clock.reset()
            change_target = True

            if mouse_clicked_in_target(mouse, target):
                mouse_in_target = True
                mouse_in_target_x = mouse_x - target_x
                mouse_in_target_y = mouse_y - target_y
                score += 1
                score_count.setText(f'Score = {score}')

            else:
                lives -= 1
                lives_count.setText(f'Lives = {lives}')
                mouse_in_target = False
                mouse_in_target_x = None
                mouse_in_target_y = None

        if change_target:

            mouse_click = {
                'mouse_x': mouse_in_target_x,
                'mouse_y': mouse_in_target_y,
                'reaction_time': reaction_time_clock.getTime(),
                'mouse_in_target': mouse_in_target,
            }

            target_hits_per_level[level - 1].append(
                mouse_click)  # inddexes start from 0 --> level - 1

            if score == 5:
                lives_timer.reset(3)
                level = 2
            elif score == 10:
                lives_timer.reset(1)
                level = 3
            elif score == 15:
                level = 4

            move_target_at_random_pos(target)
            lives_timer.reset()
            reaction_time_clock.reset()
            change_target = False

    # Here we display the outcome of the game:
    if level == 4:
        you_have_won.draw()
        winner.draw()
    else:
        you_have_lost.draw()
        looser.draw()

    win.flip()
    wait(3)

    # Finally, we draw the overwivew for thr player

    draw_overview_target(
        win=win,
        level=1,
        target_pos=(-450, 0),
        text_pos=(50, 300),
        mouse_clicks_all_levels=target_hits_per_level,
    )

    draw_overview_target(
        win=win,
        level=2,
        target_pos=(0, 0),
        text_pos=(450, 300),
        mouse_clicks_all_levels=target_hits_per_level,
    )

    draw_overview_target(
        win=win,
        level=3,
        target_pos=(450, 0),
        text_pos=(850, 300),
        mouse_clicks_all_levels=target_hits_per_level,
    )

    win.flip()
    wait(4)

    # The user has not clicked Cancel on the subject ID window
    if ok_data is not None:
        write_results(target_hits_per_level, 'results-' + ok_data[0] + '.csv')

    win.close()
예제 #11
0
파일: movie2.py 프로젝트: qenops/psychopy
class MovieStim2(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim2, self).__init__(win,
                                         units=units,
                                         name=name,
                                         autoLog=False)
        # check for pyglet
        if win.winType != 'pyglet':
            logging.error(
                'Movie stimuli can only be used with a pyglet window')
            core.quit()
        self._retracerate = win._monitorFrameRate
        if self._retracerate is None:
            self._retracerate = win.getActualFrameRate()
        if self._retracerate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            self._retracerate = 60.0
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.volume = volume
        self._av_stream_time_offset = 0.145
        self._no_audio = noAudio
        self._vframe_callback = vframe_callback
        self.interpolate = interpolate

        self.useTexSubImage2D = True

        self._texID = None
        self._video_stream = cv2.VideoCapture()

        self._reset()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        self.aspectRatio = self._video_width / float(self._video_height)
        # size
        if size is None:
            self.size = numpy.array([self._video_width, self._video_height],
                                    float)
        elif isinstance(size, (int, float, int)):
            # treat size as desired width, and calc a height
            # that maintains the aspect ratio of the video.
            self.size = numpy.array([size, size / self.aspectRatio], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created {} = {}".format(self.name, self))

    def _reset(self):
        self.duration = None
        self.status = NOT_STARTED
        self._numpy_frame = None
        if self._texID is not None:
            GL.glDeleteTextures(1, self._texID)
            self._texID = None
        # self._video_stream = None
        self._total_frame_count = None
        self._video_width = None
        self._video_height = None
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        self._video_frame_rate = None
        self._inter_frame_interval = None
        self._prev_frame_sec = None
        self._next_frame_sec = None
        self._next_frame_index = None
        self._prev_frame_index = None
        self._video_perc_done = None
        # self._last_video_flip_time = None
        self._next_frame_displayed = False
        self._video_track_clock = Clock()

        self._audio_stream_clock = Clock()
        self._vlc_instance = None
        self._audio_stream = None
        self._audio_stream_player = None
        self._audio_stream_started = False
        self._audio_stream_event_manager = None

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other
        visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self._unload()
        self._reset()
        if self._no_audio is False:
            self._createAudioStream()

        # Create Video Stream stuff
        self._video_stream.open(filename)
        vfstime = core.getTime()
        opened = self._video_stream.isOpened()
        if not opened and core.getTime() - vfstime < 1:
            raise RuntimeError("Error when reading image file")

        if not opened:
            raise RuntimeError("Error when reading image file")

        self._total_frame_count = self._video_stream.get(
            cv2.CAP_PROP_FRAME_COUNT)
        self._video_width = int(
            self._video_stream.get(cv2.CAP_PROP_FRAME_WIDTH))
        self._video_height = int(
            self._video_stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
        self._format = self._video_stream.get(cv2.CAP_PROP_FORMAT)
        # TODO: Read depth from video source
        self._video_frame_depth = 3

        cv_fps = self._video_stream.get(cv2.CAP_PROP_FPS)

        self._video_frame_rate = cv_fps

        self._inter_frame_interval = 1.0 / self._video_frame_rate

        # Create a numpy array that can hold one video frame, as returned by
        # cv2.
        self._numpy_frame = numpy.zeros(
            (self._video_height, self._video_width, self._video_frame_depth),
            dtype=numpy.uint8)
        self.duration = self._total_frame_count * self._inter_frame_interval
        self.status = NOT_STARTED

        self.filename = filename
        logAttrib(self, log, 'movie', filename)

    def _createAudioStream(self):
        """
        Create the audio stream player for the video using pyvlc.
        """
        if not os.access(self.filename, os.R_OK):
            raise RuntimeError('Error: %s file not readable' % self.filename)
        self._vlc_instance = vlc.Instance('--novideo')
        try:
            self._audio_stream = self._vlc_instance.media_new(self.filename)
        except NameError:
            msg = 'NameError: %s vs LibVLC %s'
            raise ImportError(msg %
                              (vlc.__version__, vlc.libvlc_get_version()))
        self._audio_stream_player = self._vlc_instance.media_player_new()
        self._audio_stream_player.set_media(self._audio_stream)
        self._audio_stream_event_manager = self._audio_stream_player.event_manager(
        )
        self._audio_stream_event_manager.event_attach(
            vlc.EventType.MediaPlayerTimeChanged, _audioTimeCallback,
            weakref.ref(self), self._audio_stream_player)
        self._audio_stream_event_manager.event_attach(
            vlc.EventType.MediaPlayerEndReached, _audioEndCallback,
            weakref.ref(self))

    def _releaseeAudioStream(self):
        if self._audio_stream_player:
            self._audio_stream_player.stop()

        if self._audio_stream_event_manager:
            self._audio_stream_event_manager.event_detach(
                vlc.EventType.MediaPlayerTimeChanged)
            self._audio_stream_event_manager.event_detach(
                vlc.EventType.MediaPlayerEndReached)

        if self._audio_stream:
            self._audio_stream.release()

        if self._vlc_instance:
            self._vlc_instance.vlm_release()
            self._vlc_instance.release()

        self._audio_stream = None
        self._audio_stream_event_manager = None
        self._audio_stream_player = None
        self._vlc_instance = None

    def _flipCallback(self):
        self._next_frame_displayed = True

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        cstat = self.status
        if cstat != PLAYING:
            self.status = PLAYING

            if self._next_frame_sec is None:
                # movie has no current position, need to reset the clock
                # to zero in order to have the timing logic work
                # otherwise the video stream would skip frames until the
                # time since creating the movie object has passed
                self._video_track_clock.reset()

            if cstat == PAUSED:
                # toggle audio pause
                if self._audio_stream_player:
                    self._audio_stream_player.pause()
                    self._audio_stream_clock.reset(
                        -self._audio_stream_player.get_time() / 1000.0)
                if self._next_frame_sec:
                    self._video_track_clock.reset(-self._next_frame_sec)
            else:
                nt = self._getNextFrame()
                self._video_track_clock.reset(-nt)

            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP,
                                   obj=self)

            self._updateFrameTexture()
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def pause(self, log=True):
        """Pause the current point in the movie (sound will stop, current
        frame will not advance). If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            player = self._audio_stream_player
            if player and player.can_pause():
                player.pause()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" % self.name,
                                   level=logging.EXP,
                                   obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" % self.name,
                               level=logging.EXP,
                               obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop,
        current frame will not advance). Once stopped the movie cannot
        be restarted - it must be loaded again.

        Use pause() if you may need to restart the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP,
                                   obj=self)

    def seek(self, timestamp, log=True):
        """Seek to a particular timestamp in the movie.
        """
        if self.status in [PLAYING, PAUSED]:
            if timestamp > 0.0:
                if self.status == PLAYING:
                    self.pause()
                player = self._audio_stream_player
                if player and player.is_seekable():
                    player.set_time(int(timestamp * 1000.0))
                    self._audio_stream_clock.reset(-timestamp)

                MSEC = cv2.CAP_PROP_POS_MSEC
                FRAMES = cv2.CAP_PROP_POS_FRAMES
                self._video_stream.set(MSEC, timestamp * 1000.0)
                self._video_track_clock.reset(-timestamp)
                self._next_frame_index = self._video_stream.get(FRAMES)
                self._next_frame_sec = self._video_stream.get(MSEC) / 1000.0
            else:
                self.stop()
                self.loadMovie(self.filename)
            if log:
                logAttrib(self, log, 'seek', timestamp)

            self.play()

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, 'flipVert')

    def setVolume(self, v):
        """Set the audio track volume. 0 = mute, 100 = 0 dB. float values
        between 0.0 and 1.0 are also accepted, and scaled to an int
        between 0 and 100.
        """
        if self._audio_stream_player:
            if 0.0 <= v <= 1.0 and isinstance(v, float):
                v = int(v * 100)
            else:
                v = int(v)
            self.volume = v
            if self._audio_stream_player:
                self._audio_stream_player.audio_set_volume(v)

    def getVolume(self):
        """Returns the current movie audio volume.

        0 is no audio, 100 is max audio volume.
        """
        if self._audio_stream_player:
            self.volume = self._audio_stream_player.audio_get_volume()
        return self.volume

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._video_frame_rate

    def getTimeToNextFrameDraw(self):
        """Get the number of sec.msec remaining until the next
        movie video frame should be drawn.
        """
        try:
            _tm = self._video_track_clock.getTime()
            return self._next_frame_sec - 1.0 / self._retracerate - _tm
        except Exception:
            logging.warning("MovieStim2.getTimeToNextFrameDraw failed.")
            return 0.0

    def shouldDrawVideoFrame(self):
        """True if the next movie frame should be drawn,
        False if it is not yet time. See getTimeToNextFrameDraw().
        """
        return self.getTimeToNextFrameDraw() <= 0.0

    def getCurrentFrameNumber(self):
        """Get the current movie frame number.
        The first frame number in a file is 1.
        """
        return self._next_frame_index

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._next_frame_sec

    def getPercentageComplete(self):
        """Provides a value between 0.0 and 100.0, indicating the
        amount of the movie that has been already played.
        """
        return self._video_perc_done

    def isCurrentFrameVisible(self):
        """The current video frame goes through two stages;
        the first being when the movie frame is being loaded,
        but is not visible on the display.
        The second is when the frame has actually been presented
        on the display. Returns False if the frame is in the first stage,
        True when in stage 2.
        """
        return self._next_frame_displayed

    def _getNextFrame(self):
        """get next frame info ( do not decode frame yet)
        """
        while self.status == PLAYING:
            if self._video_stream.grab():
                self._prev_frame_index = self._next_frame_index
                self._prev_frame_sec = self._next_frame_sec
                self._next_frame_index = self._video_stream.get(
                    cv2.CAP_PROP_POS_FRAMES)
                self._next_frame_sec = self._video_stream.get(
                    cv2.CAP_PROP_POS_MSEC) / 1000.0
                self._video_perc_done = self._video_stream.get(
                    cv2.CAP_PROP_POS_AVI_RATIO)
                self._next_frame_displayed = False
                halfInterval = self._inter_frame_interval / 2.0
                if self.getTimeToNextFrameDraw() > -halfInterval:
                    return self._next_frame_sec
                else:
                    self.nDroppedFrames += 1
                    if self.nDroppedFrames < reportNDroppedFrames:
                        msg = "MovieStim2 dropping video frame index: %d"
                        logging.warning(msg % self._next_frame_index)
                    elif self.nDroppedFrames == reportNDroppedFrames:
                        msg = ("Multiple Movie frames have occurred - "
                               "I'll stop bothering you about them!")
                        logging.warning(msg)
            else:
                self._onEos()
                break

    def _updateFrameTexture(self):
        """Decode frame into np array and move to opengl tex.
        """
        ret, self._numpy_frame = self._video_stream.retrieve()
        if ret:
            useSubTex = self.useTexSubImage2D
            if self._texID is None:
                self._texID = GL.GLuint()
                GL.glGenTextures(1, ctypes.byref(self._texID))
                useSubTex = False

            # bind the texture in openGL
            GL.glEnable(GL.GL_TEXTURE_2D)
            # bind that name to the target
            GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
            # makes the texture map wrap (this is actually default anyway)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S,
                               GL.GL_REPEAT)
            # data from PIL/numpy is packed, but default for GL is 4 bytes
            GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
            # important if using bits++ because GL_LINEAR
            # sometimes extrapolates to pixel vals outside range
            if self.interpolate:
                GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                                   GL.GL_LINEAR)
                GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                                   GL.GL_LINEAR)
                if useSubTex is False:
                    GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, pyglet.gl.GL_RGB8,
                                    self._numpy_frame.shape[1],
                                    self._numpy_frame.shape[0], 0, GL.GL_BGR,
                                    GL.GL_UNSIGNED_BYTE,
                                    self._numpy_frame.ctypes)
                else:
                    GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                       self._numpy_frame.shape[1],
                                       self._numpy_frame.shape[0], GL.GL_BGR,
                                       GL.GL_UNSIGNED_BYTE,
                                       self._numpy_frame.ctypes)
            else:
                GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                                   GL.GL_NEAREST)
                GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                                   GL.GL_NEAREST)
                if useSubTex is False:
                    GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                    self._numpy_frame.shape[1],
                                    self._numpy_frame.shape[0], 0, GL.GL_BGR,
                                    GL.GL_UNSIGNED_BYTE,
                                    self._numpy_frame.ctypes)
                else:
                    GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                       self._numpy_frame.shape[1],
                                       self._numpy_frame.shape[0], GL.GL_BGR,
                                       GL.GL_UNSIGNED_BYTE,
                                       self._numpy_frame.ctypes)
            GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                         GL.GL_MODULATE)  # ?? do we need this - think not!
        else:
            raise RuntimeError("Could not load video frame data.")

    def _getVideoAudioTimeDiff(self):
        if self._audio_stream_started is False:
            return 0
        return self.getCurrentFrameTime() - self._getAudioStreamTime()

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified).
        The current position in the movie will be determined automatically.

        This method should be called on every frame that the movie is meant
        to appear.
        """
        if self.status == NOT_STARTED or (self.status == FINISHED
                                          and self.loop):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        return_next_frame_index = False
        if win is None:
            win = self.win
        self._selectWindow(win)

        vtClock = self._video_track_clock
        if (self._no_audio is False and not self._audio_stream_started
                and vtClock.getTime() >= self._av_stream_time_offset):
            self._startAudio()

        if self._next_frame_displayed:
            if self._getVideoAudioTimeDiff() > self._inter_frame_interval:
                vtClock.reset(-self._next_frame_sec)
            else:
                self._getNextFrame()

        if self.shouldDrawVideoFrame() and not self._next_frame_displayed:
            self._updateFrameTexture()
            return_next_frame_index = True

        # make sure that textures are on and GL_TEXTURE0 is active
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)
        GL.glPushMatrix()
        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        array = (GL.GLfloat * 32)(
            1,
            1,  # texture coords
            vertsPix[0, 0],
            vertsPix[0, 1],
            0.,  # vertex
            0,
            1,
            vertsPix[1, 0],
            vertsPix[1, 1],
            0.,
            0,
            0,
            vertsPix[2, 0],
            vertsPix[2, 1],
            0.,
            1,
            0,
            vertsPix[3, 0],
            vertsPix[3, 1],
            0.,
        )
        GL.glPushAttrib(GL.GL_ENABLE_BIT)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glPushClientAttrib(GL.GL_CLIENT_VERTEX_ARRAY_BIT)
        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        # GL.glActiveTexture(0)
        # GL.glDisable(GL.GL_TEXTURE_2D)
        if return_next_frame_index:
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def setContrast(self):
        """Not yet implemented for MovieStim
        """
        pass

    def _startAudio(self):
        """Start the audio playback stream.
        """
        if self._audio_stream_player:
            self._audio_stream_started = True
            self._audio_stream_player.play()
            _tm = -self._audio_stream_player.get_time()
            self._audio_stream_clock.reset(_tm / 1000.0)

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        # if self._video_stream:
        self._video_stream.release()
        # self._video_stream = None
        self._numpy_frame = None
        self._releaseeAudioStream()
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()
        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP,
                               obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
예제 #12
0
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(VlcMovieStim, self).__init__(win,
                                           units=units,
                                           name=name,
                                           autoLog=False)
        # check for pyglet
        if win.winType != 'pyglet':
            logging.error(
                'Movie stimuli can only be used with a pyglet window')
            core.quit()
        self._retracerate = win._monitorFrameRate
        if self._retracerate is None:
            self._retracerate = win.getActualFrameRate()
        if self._retracerate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            self._retracerate = 60.0
        self.filename = pathToString(filename)
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.size = numpy.asarray(size, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.volume = volume
        self.no_audio = noAudio

        self.interpolate = interpolate
        self._texture_id = GL.GLuint()
        GL.glGenTextures(1, ctypes.byref(self._texture_id))

        self._pause_time = 0
        self._vlc_clock = Clock()
        self._vlc_initialized = False
        self._reset()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        self.ori = ori
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created {} = {}".format(self.name, self))
예제 #13
0
class VlcMovieStim(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that uses VLC and does not require avbin. The VLC media player must be 
    installed on the psychopy computer.
    """
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(VlcMovieStim, self).__init__(win,
                                           units=units,
                                           name=name,
                                           autoLog=False)
        # check for pyglet
        if win.winType != 'pyglet':
            logging.error(
                'Movie stimuli can only be used with a pyglet window')
            core.quit()
        self._retracerate = win._monitorFrameRate
        if self._retracerate is None:
            self._retracerate = win.getActualFrameRate()
        if self._retracerate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            self._retracerate = 60.0
        self.filename = pathToString(filename)
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.size = numpy.asarray(size, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.volume = volume
        self.no_audio = noAudio

        self.interpolate = interpolate
        self._texture_id = GL.GLuint()
        GL.glGenTextures(1, ctypes.byref(self._texture_id))

        self._pause_time = 0
        self._vlc_clock = Clock()
        self._vlc_initialized = False
        self._reset()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        self.ori = ori
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created {} = {}".format(self.name, self))

    def _reset(self):
        self.frame_counter = 0
        self.current_frame = 0
        self.duration = None
        self.status = NOT_STARTED
        self.width = None
        self.height = None
        self.frame_rate = None

        if self._vlc_initialized:
            self._release_vlc()

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other
        visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary

        Due to VLC oddness, .duration is not correct until the movie starts playing.
        """
        self._reset()
        self.filename = pathToString(filename)

        # Initialize VLC
        self._vlc_start()

        self.status = NOT_STARTED
        logAttrib(self, log, 'movie', filename)

    def _vlc_start(self):
        """
        Create the vlc stream player for the video using python-vlc.
        """
        if not os.access(self.filename, os.R_OK):
            raise RuntimeError('Error: %s file not readable' % self.filename)
        if self.no_audio:
            instance = vlc.Instance("--no-audio")
        else:
            instance = vlc.Instance()
        try:
            stream = instance.media_new(self.filename)
        except NameError:
            msg = 'NameError: %s vs LibVLC %s'
            raise ImportError(msg %
                              (vlc.__version__, vlc.libvlc_get_version()))

        player = instance.media_player_new()
        player.set_media(stream)

        # Load up the file
        stream.parse()
        size = player.video_get_size()
        self.video_width = size[0]
        self.video_height = size[1]
        self.frame_rate = player.get_fps()
        self.frame_counter = 0

        # TODO: Why is duration -1 still even after parsing? Newer vlc docs seem to hint this won't work until playback starts
        duration = player.get_length()
        logging.warning(
            "Video is %ix%i, duration %s, fps %s" %
            (self.video_width, self.video_height, duration, self.frame_rate))
        logging.flush()

        # We assume we can use the RGBA format here
        player.video_set_format("RGBA", self.video_width, self.video_height,
                                self.video_width << 2)

        # Configure a lock and a buffer for the pixels coming from VLC
        self.pixel_lock = threading.Lock()
        self.pixel_buffer = (ctypes.c_ubyte * self.video_width *
                             self.video_height * 4)()

        # Once you set these callbacks, you are in complete control of what to do with the video buffer
        selfref = ctypes.cast(ctypes.pointer(ctypes.py_object(self)),
                              ctypes.c_void_p)
        player.video_set_callbacks(vlcLockCallback, vlcUnlockCallback,
                                   vlcDisplayCallback, selfref)

        manager = player.event_manager()
        manager.event_attach(vlc.EventType.MediaPlayerTimeChanged,
                             vlcTimeCallback, weakref.ref(self), player)
        manager.event_attach(vlc.EventType.MediaPlayerEndReached,
                             vlcEndReached, weakref.ref(self), player)

        # Keep references
        self._self_ref = selfref
        self._instance = instance
        self._player = player
        self._stream = stream
        self._manager = manager

        logging.info("Initialized VLC...")
        self._vlc_initialized = True

    def _release_vlc(self):
        logging.info("Releasing VLC...")

        if self._manager:
            self._manager.event_detach(vlc.EventType.MediaPlayerTimeChanged)
        if self._player: self._player.stop()
        if self._stream: self._stream.release()
        if self._instance: self._instance.release()

        self._stream = None
        self._stream_event_manager = None
        self._player = None
        self._instance = None
        self._vlc_initialized = False

    def _update_texture(self):
        """
        Take the pixel buffer (assumed to be RGBA)
        and cram it into the GL texture
        """
        with self.pixel_lock:
            GL.glEnable(GL.GL_TEXTURE_2D)
            GL.glBindTexture(GL.GL_TEXTURE_2D, self._texture_id)
            GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
            if self.interpolate:
                interpolation = GL.GL_LINEAR
            else:
                interpolation = GL.GL_NEAREST
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                               interpolation)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                               interpolation)
            GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB, self.video_width,
                            self.video_height, 0, GL.GL_RGBA,
                            GL.GL_UNSIGNED_BYTE, self.pixel_buffer)
            GL.glDisable(GL.GL_TEXTURE_2D)

    def play(self, log=True):
        """Start or continue a paused movie from current position.
        """
        cstat = self.status
        if cstat != PLAYING:
            self.status = PLAYING

            if self._pause_time:
                self._vlc_clock.reset(self._pause_time)

            if self._player:
                if cstat == PAUSED:
                    self._player.pause()
                else:
                    self._player.play()

            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP,
                                   obj=self)

            self._update_texture()
            return self.current_frame

    def pause(self, log=True):
        """Pause the current point in the movie.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            player = self._player
            if player and player.can_pause():
                player.pause()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" % self.name,
                                   level=logging.EXP,
                                   obj=self)
            self._pause_time = self._vlc_clock.getTime()
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" % self.name,
                               level=logging.EXP,
                               obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop,
        current frame will not advance). Once stopped the movie cannot
        be restarted - it must be loaded again.

        Use pause() if you may need to restart the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP,
                                   obj=self)

    def seek(self, timestamp, log=True):
        """Seek to a particular timestamp in the movie.
        """
        if self.status in [PLAYING, PAUSED]:
            player = self._player
            if player and player.is_seekable():
                player.set_time(int(timestamp * 1000.0))
                self._vlc_clock.reset(timestamp)

                if self.status == PAUSED:
                    self._pause_time = timestamp

            if log:
                logAttrib(self, log, 'seek', timestamp)

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, 'flipVert')

    def setVolume(self, v):
        """Set the audio track volume. 0 = mute, 100 = 0 dB. float values
        between 0.0 and 1.0 are also accepted, and scaled to an int
        between 0 and 100.
        """
        if self._player:
            if 0.0 <= v <= 1.0 and isinstance(v, float):
                v = int(v * 100)
            else:
                v = int(v)
            self.volume = v
            if self._player:
                self._player.audio_set_volume(v)

    def getVolume(self):
        """Returns the current movie audio volume.

        0 is no audio, 100 is max audio volume.
        """
        if self._player:
            self.volume = self._player.audio_get_volume()
        return self.volume

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self.frame_rate

    def getCurrentFrameNumber(self):
        """Get the current movie frame number.
        The first frame number in a file is 1.
        """
        return self.frame_counter

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._vlc_clock.getTime()

    def getPercentageComplete(self):
        """Provides a value between 0.0 and 100.0, indicating the
        amount of the movie that has been already played.
        """
        return self._player.get_position() * 100.0

    def _draw_rectangle(self, win):
        # make sure that textures are on and GL_TEXTURE0 is active
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)
        GL.glPushMatrix()
        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        array = (GL.GLfloat * 32)(
            1,
            1,  # texture coords
            vertsPix[0, 0],
            vertsPix[0, 1],
            0.,  # vertex
            0,
            1,
            vertsPix[1, 0],
            vertsPix[1, 1],
            0.,
            0,
            0,
            vertsPix[2, 0],
            vertsPix[2, 1],
            0.,
            1,
            0,
            vertsPix[3, 0],
            vertsPix[3, 1],
            0.,
        )
        GL.glPushAttrib(GL.GL_ENABLE_BIT)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texture_id)
        GL.glPushClientAttrib(GL.GL_CLIENT_VERTEX_ARRAY_BIT)
        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified).
        The current position in the movie will be determined automatically.

        This method should be called on every frame that the movie is meant
        to appear.
        """
        if self.status == NOT_STARTED or (self.status == FINISHED
                                          and self.loop):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)

        self._update_texture()
        self._draw_rectangle(win)

        if self.current_frame != self.frame_counter:
            self.current_frame = self.frame_counter
            return True

    def setContrast(self):
        """Not yet implemented
        """
        pass

    def _unload(self):
        if self._vlc_initialized:
            self._release_vlc()
        if self._texture_id is not None:
            GL.glDeleteTextures(1, self._texture_id)
            self._texture_id = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()
        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP,
                               obj=self)

    def __del__(self):
        try:
            self._unload()
        except (ImportError, ModuleNotFoundError, TypeError):
            pass  # has probably been garbage-collected already

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
예제 #14
0
파일: movie2.py 프로젝트: hanke/psychopy
class MovieStim2(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """
    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0,0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0,1.0,1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim2, self).__init__(win, units=units, name=name,
                                         autoLog=False)
        #check for pyglet
        if win.winType != 'pyglet':
            logging.error('Movie stimuli can only be used with a pyglet window')
            core.quit()
        self._retracerate = win._monitorFrameRate
        if self._retracerate is None:
            self._retracerate = win.getActualFrameRate()
        self.filename = filename
        self.loop = loop
        if loop: #and pyglet.version>='1.2':
            logging.error("looping of movies is not currently supported")
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.volume = volume
        self._av_stream_time_offset = 0.145

        self._reset()
        self.loadMovie(self.filename)
        self.setVolume(volume)

        self.aspectRatio = self._video_width/float(self._video_height)
        #size
        if size is None:
            self.size = numpy.array([self._video_width, self._video_height],
                                   float)
        elif isinstance(size, (int, float, long)):
            # treat size as desired width, and calc a height
            # that maintains the aspect ratio of the video.
            self.size = numpy.array([size, size/self.aspectRatio], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        #set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" %(self.name, str(self)))

    def _reset(self):
        self.duration = None
        self.status = NOT_STARTED
        self._numpy_frame = None
        self._frame_texture = None
        self._frame_data_interface = None
        self._video_stream = None
        self._total_frame_count = None
        self._video_width = None
        self._video_height = None
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        self._video_frame_rate = None
        self._inter_frame_interval = None
        self._prev_frame_sec = None
        self._next_frame_sec = None
        self._next_frame_index = None
        self._prev_frame_index = None
        self._video_perc_done = None
        self._last_video_flip_time = None
        self._next_frame_displayed = False
        self._video_track_clock = Clock()
        self._vlc_instance = None
        self._vlc_event_manager = None
        self._audio_stream = None
        self._audio_stream_player = None
        self._audio_stream_started = False
        self._last_audio_callback_time = core.getTime()
        self._last_audio_stream_time = None
        self._first_audio_callback_time = None
        self._audio_computer_time_drift = None

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).
        This form is provided for syntactic consistency with other visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self._reset()
        self._unload()
        self._createAudioStream()
        self._video_stream = cv2.VideoCapture()
        self._video_stream.open(filename)
        if not self._video_stream.isOpened():
          raise RuntimeError( "Error when reading image file")

        self._total_frame_count = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        self._video_width = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
        self._video_height = self._video_stream.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
        self._format = self._video_stream.get(cv2.cv.CV_CAP_PROP_FORMAT)
        # TODO: Read depth from video source
        self._video_frame_depth = 3
        self._video_frame_rate = self._video_stream.get(cv2.cv.CV_CAP_PROP_FPS)
        self._inter_frame_interval = 1.0/self._video_frame_rate

        # Create a numpy array that can hold one video frame, as returned by cv2.
        self._numpy_frame = numpy.zeros((self._video_height,
                                          self._video_width,
                                          self._video_frame_depth),
                                         dtype=numpy.uint8)

        # Uses a preallocated numpy array as the pyglet ImageData data
        self._frame_data_interface = ArrayInterfaceImage(self._numpy_frame,
                                                         allow_copy=False,
                                                         rectangle=True,
                                                         force_rectangle=True)
        #frame texture; transformed so it looks right in psychopy
        self._frame_texture = self._frame_data_interface.texture.get_transform(flip_x=not self.flipHoriz,
                                                    flip_y=not self.flipVert)

        self.duration = self._total_frame_count * self._inter_frame_interval
        self.status = NOT_STARTED

        self.filename = filename
        logAttrib(self, log, 'movie', filename)

    def _createAudioStream(self):
        """
        Create the audio stream player for the video using pyvlc.
        """
        if not os.access(self.filename, os.R_OK):
            raise RuntimeError('Error: %s file not readable' % self.filename)
        self._vlc_instance = vlc.Instance('--novideo')
        try:
            self._audio_stream = self._vlc_instance.media_new(self.filename)
        except NameError:
            raise ImportError('NameError: %s vs LibVLC %s' % (vlc.__version__,
                                                       vlc.libvlc_get_version()))
        self._audio_stream_player = self._vlc_instance.media_player_new()
        self._audio_stream_player.set_media(self._audio_stream)
        self._vlc_event_manager = self._audio_stream_player.event_manager()
        self._vlc_event_manager.event_attach(vlc.EventType.MediaPlayerTimeChanged, self._audio_time_callback, self._audio_stream_player)
        self._vlc_event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, self._audio_end_callback)

    def _flipCallback(self):
        import inspect
        flip_time = inspect.currentframe().f_back.f_locals.get('now')
        if flip_time is None:
            raise RuntimeError("Movie2._flipCallback: Can not access the currect flip time.")
        self._last_video_flip_time = flip_time
        self._next_frame_displayed = True

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        if self.status != PLAYING:

            if self.status == PAUSED:
                # toggle audio pause
                self._audio_stream_player.pause()
            self.status = PLAYING
            if log and self.autoLog:
                    self.win.logOnFlip("Set %s playing" %(self.name),
                                       level=logging.EXP, obj=self)
            #print '### PLAY ###'
            self._video_track_clock.reset(-self._getNextFrame())
            self._updateFrameTexture()
            self.win.callOnFlip(self._flipCallback)
            #self._player._on_eos=self._onEos

    def pause(self, log=True):
        """Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.

        Completely untested in all regards.
        """
        if self.status == PLAYING and self._audio_stream_player:
            if self._audio_stream_player.can_pause():
                self.status = PAUSED
                self._audio_stream_player.pause()
                #print '### PAUSE ###'
                if log and self.autoLog:
                    self.win.logOnFlip("Set %s paused" %(self.name), level=logging.EXP, obj=self)
                return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %(self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again. Use pause() if you may need to restart the movie.
        """
        #print '### STOP ###'
        self.status = STOPPED
        self._unload()
        self._reset()
        if log and self.autoLog:
            self.win.logOnFlip("Set %s stopped" %(self.name),
                level=logging.EXP,obj=self)


    def seek(self, timestamp, log=True):
        """ Seek to a particular timestamp in the movie.
        Completely untested in all regards.
        Does not currently work.
        """
        if self._audio_stream_player:
            if self.status in [PLAYING, PAUSED] and self._audio_stream_player.is_seekable():
                if self.status == PLAYING:
                    self.pause()
                aresult = self._audio_stream_player.set_time(int(timestamp*1000.0))
                vresult = self._video_stream.set(cv2.cv.CV_CAP_PROP_POS_MSEC,
                                        timestamp*1000.0)
                self.play()
                if log:
                    logAttrib(self, log, 'seek', timestamp)

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally (left-to-right).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically (top-to-bottom).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, 'flipVert')

    def setVolume(self, v):
        """
        Set the audio track volume. 0 = mute, 100 = 0 dB. float values
        between 0.0 and 1.0 are also accepted, and scaled to an int between 0
        and 100.
        """
        if 0.0 <= v <= 1.0 and isinstance(v, (float,)):
            v = int(v*100)
        else:
            v = int(v)
        #print 'setting volume:',v
        self.volume = v
        if self._audio_stream_player:
            self._audio_stream_player.audio_set_volume(v)

    def getVolume(self):
        if self._audio_stream_player:
            self.volume = self._audio_stream_player.audio_get_volume()
        return self.volume

    def getTimeToNextFrameDraw(self):
        try:
            #assert self._video_track_clock != None
            #assert self._next_frame_sec != None
            #assert self._retracerate != None
            rt = (self._next_frame_sec - 1.0/self._retracerate) - self._video_track_clock.getTime()
            #if rt > self._inter_frame_interval or rt <= -1.0/self._retracerate:
            #    print 'getTimeToNextFrameDraw:', rt
            return rt
        except:
            import traceback
            traceback.print_exc()
            return 0.0

    def shouldDrawVideoFrame(self):
        return self.getTimeToNextFrameDraw() <= 0.0

    def getCurrentFrameIndex(self):
        return self._next_frame_index

    def getCurrentFrameTime(self):
        return self._next_frame_sec

    def getPercentageComplete(self):
        return self._video_perc_done

    def getCurrentFrameDisplayed(self):
        return self._next_frame_displayed

    def _getNextFrame(self):
        # get next frame info ( do not decode frame yet)
        # TODO: Implement frame skipping (multiple grabs) if _next_frame_sec < video_track_clock - framerate
        if self._video_stream.grab():
            self._prev_frame_index = self._next_frame_index
            self._prev_frame_sec = self._next_frame_sec
            self._next_frame_sec = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_MSEC)/1000.0
            self._next_frame_index = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
            self._video_perc_done = self._video_stream.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO)
            self._next_frame_displayed = False
            return self._next_frame_sec
        else:
            self.status = FINISHED
            if self._audio_stream_player:
                self._audio_stream_player.stop()
            self._onEos()

    def _updateFrameTexture(self):
        # decode frame into np array and move to opengl tex
        ret, f = self._video_stream.retrieve()
        if ret:
            #self._numpy_frame[:] = f[...,::-1]
            numpy.copyto(self._numpy_frame, cv2.cvtColor(f, cv2.COLOR_BGR2RGB))
            self._frame_data_interface.dirty()
        else:
            raise RuntimeError("Could not load video frame data.")

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear"""
        if self.status != PLAYING:
            return

        return_next_frame_index = False
        if win is None:
            win = self.win
        self._selectWindow(win)

        if not self._audio_stream_started and self._video_track_clock.getTime() >= self._av_stream_time_offset:
            self._startAudio()
        if self._next_frame_displayed:
            self._getNextFrame()
        if self.shouldDrawVideoFrame() and not self._next_frame_displayed:
            self._updateFrameTexture()
            return_next_frame_index = True

        #make sure that textures are on and GL_TEXTURE0 is active
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glColor4f(1, 1, 1, self.opacity)  # sets opacity (1,1,1 = RGB placeholder)
        GL.glPushMatrix()
        self.win.setScale('pix')
        #move to centre of stimulus and rotate
        vertsPix = self.verticesPix
        t=self._frame_texture.tex_coords
        array = (GL.GLfloat * 32)(
             t[0],  t[1],
             vertsPix[0,0], vertsPix[0,1],    0.,  #vertex
             t[3],  t[4],
             vertsPix[1,0], vertsPix[1,1],    0.,
             t[6],  t[7],
             vertsPix[2,0], vertsPix[2,1],    0.,
             t[9],  t[10],
             vertsPix[3,0], vertsPix[3,1],    0.,
             )
        GL.glPushAttrib(GL.GL_ENABLE_BIT)
        GL.glEnable(self._frame_texture.target)
        GL.glBindTexture(self._frame_texture.target, self._frame_texture.id)
        GL.glPushClientAttrib(GL.GL_CLIENT_VERTEX_ARRAY_BIT)
        #2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        if return_next_frame_index:
            self.win.callOnFlip(self._flipCallback)
            return self._next_frame_index

    def setContrast(self):
        """Not yet implemented for MovieStim"""
        pass

    def _startAudio(self):
        """
        Start the audio playback stream.
        """
        self._audio_stream_started = True
        self._last_audio_callback_time = core.getTime()
        self._audio_stream_player.play()

    def getAudioStreamTime(self):
        """
        Get the current sec.msec audio track time, by taking the last
        reported audio stream time and adding the time since the
        _audio_time_callback was last called.
        """
        #TODO: This will not be correct is video is paused. Fix.
        return self._last_audio_stream_time + (core.getTime() -
                                               self._last_audio_callback_time)

    def _audio_time_callback(self, event, player):
        """
        Called by VLC every few hundred msec providing the current audio track
        time. This info is used to pace the display of video frames read using
        cv2.
        """
        self._last_audio_callback_time = core.getTime()
        self._last_audio_stream_time = player.get_time()/1000.0
        if self._first_audio_callback_time is None:
           self._first_audio_callback_time = self._last_audio_callback_time-self._last_audio_stream_time
        self._audio_computer_time_drift = self._last_audio_stream_time-(
            self._last_audio_callback_time-self._first_audio_callback_time)


    def _audio_end_callback(self, event):
        """
        Called by VLC when the audio track ends. Right now, when this is called
        the video is stopped.
        """
#        print('End of media stream (event %s)' % event.type)
        self.status = FINISHED
        self._onEos()

    def _unload(self):
        if self._video_stream:
            self._video_stream.release()
        if self._audio_stream_player:
            self._audio_stream_player.stop()
        self._video_stream = None
        self._audio_stream_player = None
        self._frame_data_interface = None
        self._numpy_frame = None
        self.status = FINISHED

    def __del__(self):
        self._unload()

    def _onEos(self):
        if self.loop:
            self.loadMovie(self.filename)
            self.play()
            self.status = PLAYING
        else:
            self.status = FINISHED

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" %(self.name),
                level=logging.EXP,obj=self)
예제 #15
0
파일: movie3.py 프로젝트: apitiot/psychopy
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win,
                                         units=units,
                                         name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0 / retraceRate
        self.filename = pathToString(filename)
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = opacity
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()