Exemple #1
0
def test_write_gif(util, clip_class, opt, loop, with_mask, pixel_format):
    filename = os.path.join(util.TMP_DIR, "moviepy_write_gif.gif")
    if os.path.isfile(filename):
        os.remove(filename)

    fps = 10

    if clip_class == "BitmapClip":
        original_clip = BitmapClip([["R"], ["G"], ["B"]],
                                   fps=fps).with_duration(0.3)
    else:
        original_clip = concatenate_videoclips([
            ColorClip(
                (1, 1),
                color=color,
            ).with_duration(0.1).with_fps(fps)
            for color in [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
        ])
    if with_mask:
        original_clip = original_clip.with_mask(
            ColorClip((1, 1), color=1,
                      is_mask=True).with_fps(fps).with_duration(0.3))

    kwargs = {}
    if pixel_format is not None:
        kwargs["pixel_format"] = pixel_format

    write_gif(
        original_clip,
        filename,
        fps=fps,
        with_mask=with_mask,
        program="ffmpeg",
        logger=None,
        opt=opt,
        loop=loop,
        **kwargs,
    )

    if pixel_format != "invalid":

        final_clip = VideoFileClip(filename)

        r, g, b = final_clip.get_frame(0)[0][0]
        assert r == 252
        assert g == 0
        assert b == 0

        r, g, b = final_clip.get_frame(0.1)[0][0]
        assert r == 0
        assert g == 252
        assert b == 0

        r, g, b = final_clip.get_frame(0.2)[0][0]
        assert r == 0
        assert g == 0
        assert b == 255

        assert final_clip.duration == (loop or 1) * round(
            original_clip.duration, 6)
Exemple #2
0
class VideoStim(Stim, CollectionStimMixin):

    ''' A video. '''

    def __init__(self, filename, onset=None):

        self.clip = VideoFileClip(filename)
        self.fps = self.clip.fps
        self.width = self.clip.w
        self.height = self.clip.h

        self.n_frames = int(self.fps * self.clip.duration)
        duration = self.clip.duration

        super(VideoStim, self).__init__(filename, onset, duration)

    def __iter__(self):
        """ Frame iteration. """
        for i, f in enumerate(self.clip.iter_frames()):
            yield VideoFrameStim(self, i, data=f)

    @property
    def frames(self):
        return [f for f in self.clip.iter_frames()]

    def get_frame(self, index=None, onset=None):
        if index is not None:
            onset = float(index) / self.fps
        else:
            index = int(onset * self.fps)
        return VideoFrameStim(self, index, data=self.clip.get_frame(onset))
Exemple #3
0
def main(args):

    if args.use:
        frames = []
        for filename in sorted(args.use):
            with open(filename) as f:
                frames.extend(
                    imagehash.hex_to_hash(frame) for frame in json.load(f))
    else:
        clip = VideoFileClip(args.file)
        frames = [
            imagehash.dhash(Image.fromarray(frame))
            for frame in clip.iter_frames()
        ]

    if args.save:
        with open(args.file + '.json', 'w') as f:
            json.dump([str(frame) for frame in frames], f)

    duplicate_lists = defaultdict(list)
    for i, frame in enumerate(frames):
        duplicate_lists[str(frame)].append(i)

    if args.common_frames:
        most_common_frames = sorted(duplicate_lists.values(),
                                    reverse=True,
                                    key=lambda l: len(l))[:args.common_frames]

        clip = VideoFileClip(args.file)
        for i, frame_list in enumerate(most_common_frames):
            frame = Image.fromarray(clip.get_frame(frame_list[0] / clip.fps))
            frame.save(str(i) + '.jpg')

    scores = [len(duplicate_lists[str(frame)]) for frame in frames]
    print(json.dumps(scores))
Exemple #4
0
 def compute_thumbnail(self):
     if self.thumbnail is None:
         _, _, _, _, _, thumbnail = ExifTool.get_metadata(self.media_path)
         if thumbnail is not None:
             self.thumbnail = base64.b64decode(thumbnail[7:])
             thb = Image.open(io.BytesIO(self.thumbnail))
             thb.thumbnail((100, 100), resample=NEAREST)
             bytes_output = io.BytesIO()
             thb.save(bytes_output, format='JPEG')
             self.thumbnail = bytes_output.getvalue()
         elif self.extension in IMAGE_TYPE:
             image = Image.open(self.media_path)
             image.thumbnail((100, 100), resample=NEAREST)
             bytes_output = io.BytesIO()
             if image.mode in ("RGBA", "P"):
                 image = image.convert("RGB")
             image.save(bytes_output, format='JPEG')
             self.thumbnail = bytes_output.getvalue()
         else:
             clip = None
             try:
                 clip = VideoFileClip(self.media_path)
                 frame_at_second = 0
                 frame = clip.get_frame(frame_at_second)
                 new_image = Image.fromarray(frame)
                 new_image.thumbnail((100, 100), resample=NEAREST)
                 bytes_output = io.BytesIO()
                 new_image.save(bytes_output, format='JPEG')
                 self.thumbnail = bytes_output.getvalue()
             finally:
                 if clip is not None:
                     clip.close()
Exemple #5
0
def index():
    bot = Bot(BOT_TOKEN, request=req)

    if 'filename' not in request.params:
        raise HTTPError(400, 'filename param is missing')
    filename = os.path.basename(request.params['filename'])

    with TemporaryDirectory() as d:
        fpath = os.path.join(d, filename)
        with open(fpath, 'wb') as f:
            body = request.body
            while True:
                chunk = body.read(0xFFFF)
                if not chunk:
                    break
                f.write(chunk)
        if filename.endswith('.jpg'):
            bot.send_photo(CHAT_ID, open(fpath, 'rb'))
        else:
            thumb = os.path.join(d, 'thumb.jpg')
            clip = VideoFileClip(fpath)
            frame = clip.get_frame(t=1)
            im = Image.fromarray(frame)
            im.thumbnail((320, 320), Image.ANTIALIAS)
            im.save(thumb)
            bot.send_video(CHAT_ID,
                           open(fpath, 'rb'),
                           clip.duration,
                           width=clip.size[0],
                           height=clip.size[1],
                           supports_streaming=True,
                           thumb=open(thumb, 'rb'))
    return 'OK'
Exemple #6
0
class VideoStim(Stim, CollectionStimMixin):
    ''' A video.
    Args:
        filename (str): Path to input file, if one exists.
        onset (float): Optional onset of the video file (in seconds) with
            respect to some more general context or timeline the user wishes
            to keep track of.
    '''
    def __init__(self, filename=None, onset=None, url=None):
        if url is not None:
            filename = url
        self.filename = filename
        self._load_clip()
        self.fps = self.clip.fps
        self.width = self.clip.w
        self.height = self.clip.h
        self.n_frames = int(self.fps * self.clip.duration)
        duration = self.clip.duration

        super(VideoStim, self).__init__(filename, onset, duration)

    def _load_clip(self):
        self.clip = VideoFileClip(self.filename)

    def __iter__(self):
        """ Frame iteration. """
        for i, f in enumerate(self.clip.iter_frames()):
            yield VideoFrameStim(self, i, data=f)

    def __getstate__(self):
        d = self.__dict__.copy()
        d['clip'] = None
        return d

    def __setstate__(self, d):
        self.__dict__ = d
        self._load_clip()

    @property
    def frames(self):
        return (f for f in self.clip.iter_frames())

    def get_frame(self, index=None, onset=None):
        if index is not None:
            onset = float(index) / self.fps
        else:
            index = int(onset * self.fps)
        return VideoFrameStim(self, index, data=self.clip.get_frame(onset))

    @contextmanager
    def get_filename(self):
        if self.filename is None or not os.path.exists(self.filename):
            tf = tempfile.mktemp() + '.mp4'
            self.clip.write_videofile(tf)
            yield tf
            os.remove(tf)
        else:
            yield self.filename
def test_ffmpeg_resizing():
    """Test FFmpeg resizing, to include downscaling."""
    video_file = "media/big_buck_bunny_432_433.webm"
    target_resolutions = [(128, 128), (128, None), (None, 128), (None, 256)]
    for target_resolution in target_resolutions:
        video = VideoFileClip(video_file, target_resolution=target_resolution)
        frame = video.get_frame(0)
        for (target, observed) in zip(target_resolution, frame.shape):
            if target is not None:
                assert target == observed
        video.close()
Exemple #8
0
def test_ffmpeg_resizing():
    """Test FFmpeg resizing, to include downscaling."""
    video_file = 'media/big_buck_bunny_432_433.webm'
    target_resolution = (128, 128)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[0:2] == target_resolution

    target_resolution = (128, None)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[0] == target_resolution[0]

    target_resolution = (None, 128)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[1] == target_resolution[1]

    # Test upscaling
    target_resolution = (None, 2048)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[1] == target_resolution[1]
def test_ffmpeg_resizing():
    """Test FFmpeg resizing, to include downscaling."""
    video_file = 'media/big_buck_bunny_432_433.webm'
    target_resolution = (128, 128)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[0:2] == target_resolution

    target_resolution = (128, None)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[0] == target_resolution[0]

    target_resolution = (None, 128)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[1] == target_resolution[1]

    # Test upscaling
    target_resolution = (None, 2048)
    video = VideoFileClip(video_file, target_resolution=target_resolution)
    frame = video.get_frame(0)
    assert frame.shape[1] == target_resolution[1]
Exemple #10
0
class VideoStim(Stim, CollectionStimMixin):
    ''' A video. '''
    def __init__(self, filename, onset=None):

        self.filename = filename
        self._load_clip()
        self.fps = self.clip.fps
        self.width = self.clip.w
        self.height = self.clip.h
        self.n_frames = int(self.fps * self.clip.duration)
        duration = self.clip.duration

        super(VideoStim, self).__init__(filename, onset, duration)

    def _load_clip(self):
        self.clip = VideoFileClip(self.filename)

    def __iter__(self):
        """ Frame iteration. """
        for i, f in enumerate(self.clip.iter_frames()):
            yield VideoFrameStim(self, i, data=f)

    def __getstate__(self):
        d = self.__dict__.copy()
        d['clip'] = None
        return d

    def __setstate__(self, d):
        self.__dict__ = d
        self._load_clip()

    @property
    def frames(self):
        return (f for f in self.clip.iter_frames())

    def get_frame(self, index=None, onset=None):
        if index is not None:
            onset = float(index) / self.fps
        else:
            index = int(onset * self.fps)
        return VideoFrameStim(self, index, data=self.clip.get_frame(onset))
Exemple #11
0
class MovieStim3(BaseVisualStim, ContainerMixin, TextureMixin):
    """A stimulus class for playing movies.

    This class uses MoviePy and FFMPEG as a backend for loading and decoding
    video data from files.

    Parameters
    ----------
    filename : str
        A string giving the relative or absolute path to the movie.
    flipVert : True or *False*
        If True then the movie will be top-bottom flipped
    flipHoriz : True or *False*
        If True then the movie will be right-left flipped
    volume :
        The nominal level is 100, and 0 is silence.
    loop : bool, optional
        Whether to start the movie over from the beginning if draw is called and
        the movie is done.

    Examples
    --------
    See Movie2Stim.py for demo.

    """
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 anchor="center",
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win,
                                         units=units,
                                         name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0 / retraceRate
        self.filename = pathToString(filename)
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.anchor = anchor
        self.depth = depth
        self.opacity = opacity
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()

    @property
    def interpolate(self):
        """Enable linear interpolation (`bool').

        If `True` linear filtering will be applied to the video making the image
        less pixelated if scaled.
        """
        return self._interpolate

    @interpolate.setter
    def interpolate(self, value):
        self._interpolate = value
        self._texFilterNeedsUpdate = True

    @property
    def duration(self):
        """Duration of the video clip in seconds (`float`). Only valid after
        loading a clip, always returning `0.0` if not.
        """
        if self._mov is None:
            return 0.0

        return self._mov.duration

    @property
    def frameInterval(self):
        """Time in seconds each frame is to be presented on screen (`float`).
        Value is `0.0` if no movie is loaded.
        """
        if self._mov is None:
            return 0.0

        return 1. / self._mov.fps

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = 0.0
        self._texID = None
        self.status = NOT_STARTED
        self.nDroppedFrames = 0

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other visual
        stimuli.

        Parameters
        ----------
        filename : str
            The name of the file, including path if necessary.
        log : bool
            Log this event.

        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file.

        After the file is loaded `MovieStim.duration` is updated with the movie
        duration (in seconds).

        Parameters
        ----------
        filename : str
            The name of the file, including path if necessary.
        log : bool
            Log this event.

        """
        filename = pathToString(filename)
        self.reset()  # set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio=(1 - self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                sound = self.sound
                try:
                    self._audioStream = sound.Sound(
                        self._mov.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                except:
                    # JWE added this as a patch for a moviepy oddity where the
                    # duration is inflated in the saved file causes the
                    # audioclip to be the wrong length, so round down and it
                    # should work
                    jwe_tmp = self._mov.subclip(0, round(self._mov.duration))
                    self._audioStream = sound.Sound(
                        jwe_tmp.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                    del (jwe_tmp)
            else:  # make sure we set to None (in case prev clip had audio)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" % filename)
        # mov has attributes:
        # size, duration, fps
        # mov.audio has attributes
        # duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = 1.0 / self._mov.fps
        # self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if status != PLAYING:
            self.status = PLAYING  # moved this to get better audio behavior - JK
            # Added extra check to prevent audio doubling - JK
            if self._audioStream is not None and self._audioStream.status is not PLAYING:
                self._audioStream.play()
            if status == PAUSED:
                if self.getCurrentFrameTime(
                ) < 0:  # Check for valid timestamp, correct if needed -JK
                    self._audioSeek(0)
                else:
                    self._audioSeek(self.getCurrentFrameTime())
            self._videoClock.reset(-self.getCurrentFrameTime())
            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP,
                                   obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                if prefs.hardware['audioLib'] in ['sounddevice', 'PTB']:
                    self._audioStream.pause(
                    )  # sounddevice and PTB have a "pause" function -JK
                else:
                    self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" % (self.name),
                                   level=logging.EXP,
                                   obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" % (self.name),
                               level=logging.EXP,
                               obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted -
        it must be loaded again. Use pause() if you may need to restart
        the movie.
        """
        if self.status != STOPPED:
            self._unload()
            self.reset()
            self.status = STOPPED  # set status to STOPPED after _unload
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP,
                                   obj=self)

    def setVolume(self, volume):
        pass  # to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')
        self._needVertexUpdate = True

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = newVal
        logAttrib(self, log, 'flipVert')
        self._needVertexUpdate = True

    def getFPS(self):
        """Get the movie frames per second.

        Returns
        -------
        float
            Frames per second.

        """
        return float(self._mov.fps)

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._nextFrameT - self.frameInterval

    def _updateFrameTexture(self):
        """Update texture pixel store to contain the present frame. Decoded
        frame image samples are streamed to the texture buffer.

        """
        if self._nextFrameT is None or self._nextFrameT < 0:
            # movie has no current position (or invalid position -JK),
            # need to reset the clock to zero in order to have the
            # timing logic work otherwise the video stream would skip
            # frames until the time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0.0

        # only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif self._numpyFrame is not None:
            if self._nextFrameT > (self._videoClock.getTime() -
                                   self._retraceInterval / 2.0):
                return None

        while self._nextFrameT <= (self._videoClock.getTime() -
                                   self._frameInterval * 2):
            self.nDroppedFrames += 1
            if self.nDroppedFrames <= reportNDroppedFrames:
                logging.warning(
                    "{}: Video catchup needed, advancing self._nextFrameT from"
                    " {} to {}".format(self._videoClock.getTime(),
                                       self._nextFrameT,
                                       self._nextFrameT + self._frameInterval))
            if self.nDroppedFrames == reportNDroppedFrames:
                logging.warning(
                    "Max reportNDroppedFrames reached, will not log any more dropped frames"
                )

            self._nextFrameT += self._frameInterval

        try:
            self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        except OSError:
            if self.autoLog:
                logging.warning(
                    "Frame {} not found, moving one frame and trying again".
                    format(self._nextFrameT),
                    obj=self)
            self._nextFrameT += self._frameInterval
            self._updateFrameTexture()
        useSubTex = self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex = False

        GL.glActiveTexture(GL.GL_TEXTURE0)
        # bind that name to the target
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        # bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        # makes the texture map wrap (this is actually default anyway)
        GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP)
        GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP)
        # data from PIL/numpy is packed, but default for GL is 4 bytes
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
        # important if using bits++ because GL_LINEAR
        # sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                               GL.GL_LINEAR)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                               GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0, GL.GL_RGB,
                                GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0], GL.GL_RGB,
                                   GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        else:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                               GL.GL_NEAREST)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                               GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0, GL.GL_BGR,
                                GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0], GL.GL_BGR,
                                   GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                     GL.GL_MODULATE)  # ?? do we need this - think not!

        if self.status == PLAYING:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear.

        Parameters
        ----------
        win : :class:`~psychopy.visual.Window` or None
            Window the video is being drawn to. If `None`, the window specified
            by property `win` will be used. Default is `None`.

        """
        if (self.status == NOT_STARTED
                or (self.status == FINISHED and self.loop)):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture()  # will check if it's needed

        # scale the drawing frame and get to centre of field
        GL.glPushMatrix()  # push before drawing, pop after
        # push the data for client attributes
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)

        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        # bind textures
        GL.glActiveTexture(GL.GL_TEXTURE1)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)

        array = (GL.GLfloat * 32)(
            1,
            1,  # texture coords
            vertsPix[0, 0],
            vertsPix[0, 1],
            0.,  # vertex
            0,
            1,
            vertsPix[1, 0],
            vertsPix[1, 1],
            0.,
            0,
            0,
            vertsPix[2, 0],
            vertsPix[2, 1],
            0.,
            1,
            0,
            vertsPix[3, 0],
            vertsPix[3, 1],
            0.,
        )

        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopMatrix()
        # unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)  # implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        # video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        sound = self.sound
        if self._audioStream is None:
            return  # do nothing
        # check if sounddevice or PTB is being used. If so we can use seek. If not we
        # have to reload the audio stream and begin at the new loc
        if prefs.hardware['audioLib'] in ['sounddevice', 'PTB']:
            self._audioStream.seek(t)
        else:
            self._audioStream.stop()
            sndArray = self._mov.audio.to_soundarray()
            startIndex = int(t * self._mov.audio.fps)
            self._audioStream = sound.Sound(sndArray[startIndex:, :],
                                            sampleRate=self._mov.audio.fps)
            if self.status != PAUSED:  # Allows for seeking while paused - JK
                self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        # remove textures from graphics card to prevent crash
        self.clearTextures()
        if self._mov is not None:
            self._mov.close()
        self._mov = None
        self._numpyFrame = None
        if self._audioStream is not None:
            self._audioStream.stop()
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP,
                               obj=self)

    def __del__(self):
        try:
            self._unload()
        except (ImportError, ModuleNotFoundError, TypeError):
            pass  # has probably been garbage-collected already

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip.

        Parameters
        ----------
        val : bool
            True to add the stimulus to the draw list, False to remove it.

        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
Exemple #12
0
import glob
import numpy as np

from moviepy.video.io.VideoFileClip import VideoFileClip

from advanced_lane_lines import camera_calibration
from advanced_lane_lines.pipeline import Pipeline

images = glob.glob('./camera_cal/calibration*.jpg')
mtx, dist, img = camera_calibration(images)

#################
# Load video parameter
video_name = './test_videos/project_video.mp4'
video_clip = VideoFileClip(video_name)

pipeline = Pipeline(mtx, dist)
pipeline.warp_coordinates(video_clip.get_frame(0))
video_result = video_clip.fl_image(pipeline.pipeline)
video_result.write_videofile('./output_videos/result.mp4')

data = np.array(pipeline.lane.list_curvatures)
min_values = data.min(axis=0, initial=0)
max_values = data.max(axis=0, initial=0)
print(min_values)
print(max_values)
Exemple #13
0
class VideoFrameCollectionStim(Stim):

    ''' A collection of video frames.

    Args:
        filename (str): Path to input file, if one exists.
        frame_index (list): List of indices of frames retained from the
            original video. Uses every frame by default
            (i.e. for normal VideoStims).
        onset (float): Optional onset of the video file (in seconds) with
            respect to some more general context or timeline the user wishes
            to keep track of.
        url (str): Optional url source for a video.
        clip (VidoFileClip): Optional moviepy VideoFileClip to initialize
            from.
    '''

    _default_file_extension = '.mp4'

    def __init__(self, filename=None, frame_index=None, onset=None, url=None,
                 clip=None):
        if url is not None:
            filename = url
        self.filename = filename
        if clip:
            self.clip = clip
        else:
            self._load_clip()
        self.fps = self.clip.fps
        self.width = self.clip.w
        self.height = self.clip.h
        if frame_index:
            self.frame_index = frame_index
        else:
            self.frame_index = range(int(ceil(self.fps * self.clip.duration)))
        duration = self.clip.duration
        self.n_frames = len(self.frame_index)
        super(VideoFrameCollectionStim, self).__init__(filename,
                                                       onset=onset,
                                                       duration=duration,
                                                       url=url)

    def _load_clip(self):
        audio_fps = AudioStim.get_sampling_rate(self.filename)
        self.clip = VideoFileClip(self.filename, audio_fps=audio_fps)

    def __iter__(self):
        """ Frame iteration. """
        for i, f in enumerate(self.frame_index):
            yield self.get_frame(i)

    @property
    def frames(self):
        return (f for f in self)

    def get_frame(self, index):
        ''' Get video frame at the specified index.

        Args:
            index (int): Positional index of the desired frame.
        '''

        frame_num = self.frame_index[index]
        onset = float(frame_num) / self.fps

        if index < self.n_frames - 1:
            next_frame_num = self.frame_index[index + 1]
            end = float(next_frame_num) / self.fps
        else:
            end = float(self.duration)

        duration = end - onset if end > onset else 0.0

        return VideoFrameStim(self, frame_num,
                              data=self.clip.get_frame(onset),
                              duration=duration)

    def __getstate__(self):
        d = self.__dict__.copy()
        d['clip'] = None
        return d

    def __setstate__(self, d):
        self.__dict__ = d
        self._load_clip()

    def save(self, path):
        ''' Save source video to file.

        Args:
            path (str): Filename to save to.

        Notes: Saves entire source video to file, not just currently selected
            frames.
        '''
        # IMPORTANT WARNING: saves entire source video
        self.clip.write_videofile(path, audio_fps=self.clip.audio.fps)
Exemple #14
0
    def from_movie(filename, foldername=None, tt=None, fps=None,
                   crop=(0,0), thumbnails_width= 120, sig_dim=(2,2)):
        """
        Extracts frames from a movie and turns them into thumbnails.

        Parameters
        ==========

        filename
          Name of the legally obtained video file (batman.avi, superman.mp4,
          etc.)

        foldername
          The extracted frames and more infos will be stored in that directory.

        tt
          An array of times [t1, t2...] where to extract the frames. Optional if

        crop
          Number of seconds to crop at the beginning and the end of the video,
          to avoid opening and en credits.
          (seconds_cropped_at_the beginning, seconds_cropped_at_the_end)

        thumbnails_width
          Width in pixels of the thumbnails obtained by resizing the frames of
          the movie.

        sig_dim
          Number of pixels to consider when reducing the frames and thumbnails
          to simple (representative )signatures.
          sid_dim=(3,2) means 3x2 (WxH) pixels.



        """
        if foldername is None:
            name, ext = os.path.splitext(filename)
            foldername = name

        if not os.path.exists(foldername):
            os.mkdir(foldername)

        clip = VideoFileClip(filename).fx( resize, width=thumbnails_width)
        if not os.path.exists(foldername):
            os.mkdir(foldername)

        if tt is None:
            tt = np.arange(0,clip.duration, 1.0/fps)
            t1, t2 = crop[0], clip.duration-crop[1]
            tt = tt[ (tt>=t1)*(tt<=t2)]

        signatures = []

        result = np.zeros((clip.h*len(tt), clip.w, 3))

        for i,t in vtqdm(enumerate(sorted(tt)), total=len(tt)):
            frame= clip.get_frame(t)
            result[i*clip.h:(i+1)*clip.h] = frame
            signatures.append(colors_signature(frame, sig_dim[0], sig_dim[1]))

        target = os.path.join(foldername, "all_frames.png")
        ffmpeg_write_image(target, result)

        for (obj, name) in [(signatures, 'signatures'), (tt, 'times'),
                            (sig_dim, 'signatures_dim')]:
            target = os.path.join(foldername, name+'.txt')
            np.savetxt(target, np.array(obj))

        return MovieFrames(foldername)
Exemple #15
0
def test_ffmpeg_write_video(
    util,
    codec,
    is_valid_codec,
    ext,
    write_logfile,
    with_mask,
    bitrate,
    threads,
):
    filename = os.path.join(util.TMP_DIR, f"moviepy_ffmpeg_write_video{ext}")
    if os.path.isfile(filename):
        try:
            os.remove(filename)
        except PermissionError:
            pass

    logfile_name = filename + ".log"
    if os.path.isfile(logfile_name):
        os.remove(logfile_name)

    clip = BitmapClip([["R"], ["G"], ["B"]], fps=10).with_duration(0.3)
    if with_mask:
        clip = clip.with_mask(
            BitmapClip([["W"], ["O"], ["O"]], fps=10,
                       is_mask=True).with_duration(0.3))

    kwargs = dict(
        logger=None,
        write_logfile=write_logfile,
        with_mask=with_mask,
    )
    if codec is not None:
        kwargs["codec"] = codec
    if bitrate is not None:
        kwargs["bitrate"] = bitrate
    if threads is not None:
        kwargs["threads"] = threads

    ffmpeg_write_video(clip, filename, 10, **kwargs)

    if is_valid_codec:
        assert os.path.isfile(filename)

        final_clip = VideoFileClip(filename)

        r, g, b = final_clip.get_frame(0)[0][0]
        assert r == 254
        assert g == 0
        assert b == 0

        r, g, b = final_clip.get_frame(0.1)[0][0]
        assert r == (0 if not with_mask else 1)
        assert g == (255 if not with_mask else 1)
        assert b == 1

        r, g, b = final_clip.get_frame(0.2)[0][0]
        assert r == 0
        assert g == 0
        assert b == (255 if not with_mask else 0)

    if write_logfile:
        assert os.path.isfile(logfile_name)
Exemple #16
0
class MovieStim3(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """

    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win, units=units, name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = old_div(1.0, retraceRate)
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h],
                                    float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = None
        self._texID = None
        self.status = NOT_STARTED

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other visual
        stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary

        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self.reset()  # set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio=(1 - self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                sound = self.sound
                try:
                    self._audioStream = sound.Sound(
                        self._mov.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                except:
                    # JWE added this as a patch for a moviepy oddity where the
                    # duration is inflated in the saved file causes the
                    # audioclip to be the wrong length, so round down and it
                    # should work
                    jwe_tmp = self._mov.subclip(0, round(self._mov.duration))
                    self._audioStream = sound.Sound(
                        jwe_tmp.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                    del(jwe_tmp)
            else:  # make sure we set to None (in case prev clip had audio)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" % filename)
        # mov has attributes:
            # size, duration, fps
        # mov.audio has attributes
            # duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = old_div(1.0, self._mov.fps)
        self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if status != PLAYING:
            if self._audioStream is not None:
                self._audioStream.play()
            if status == PAUSED:
                if self.getCurrentFrameTime() < 0:
                    self._audioSeek(0)
                else:
                    self._audioSeek(self.getCurrentFrameTime())
            self.status = PLAYING
            self._videoClock.reset(-self.getCurrentFrameTime())

            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP, obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %
                                   (self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %
                               (self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted -
        it must be loaded again. Use pause() if you may need to restart
        the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self.reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP, obj=self)

    def setVolume(self, volume):
        pass  # to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')
        self._needVertexUpdate = True

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = newVal
        logAttrib(self, log, 'flipVert')
        self._needVertexUpdate = True

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._mov.fps

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._nextFrameT - self._frameInterval

    def _updateFrameTexture(self):
        if self._nextFrameT is None:
            # movie has no current position, need to reset the clock
            # to zero in order to have the timing logic work
            # otherwise the video stream would skip frames until the
            # time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0

        # only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif self._numpyFrame is not None:
            if self._nextFrameT > (self._videoClock.getTime() -
                                   old_div(self._retraceInterval, 2.0)):
                return None
        self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        useSubTex = self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex = False

        # bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        # bind that name to the target
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        # makes the texture map wrap (this is actually default anyway)
        GL.glTexParameteri(
            GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
        # data from PIL/numpy is packed, but default for GL is 4 bytes
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
        # important if using bits++ because GL_LINEAR
        # sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        else:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                     GL.GL_MODULATE)  # ?? do we need this - think not!

        if not self.status == PAUSED:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current
        position in the movie will be determined automatically.

        This method should be called on every frame that the movie is
        meant to appear.
        """

        if (self.status == NOT_STARTED or
                (self.status == FINISHED and self.loop)):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture()  # will check if it's needed

        # scale the drawing frame and get to centre of field
        GL.glPushMatrix()  # push before drawing, pop after
        # push the data for client attributes
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)

        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        # bind textures
        GL.glActiveTexture(GL.GL_TEXTURE1)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)

        array = (GL.GLfloat * 32)(
            1, 1,  # texture coords
            vertsPix[0, 0], vertsPix[0, 1], 0.,  # vertex
            0, 1,
            vertsPix[1, 0], vertsPix[1, 1], 0.,
            0, 0,
            vertsPix[2, 0], vertsPix[2, 1], 0.,
            1, 0,
            vertsPix[3, 0], vertsPix[3, 1], 0.,
        )

        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        # unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)  # implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        # video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        sound = self.sound
        # for sound we need to extract the array again and just begin at new
        # loc
        if self._audioStream is None:
            return  # do nothing
        self._audioStream.stop()
        sndArray = self._mov.audio.to_soundarray()
        startIndex = int(t * self._mov.audio.fps)
        self._audioStream = sound.Sound(
            sndArray[startIndex:, :], sampleRate=self._mov.audio.fps)
        self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        try:
            # remove textures from graphics card to prevent crash
            self.clearTextures()
        except Exception:
            pass
        self._mov = None
        self._numpyFrame = None
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP, obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
        subprocess.check_call(
            'youtube-dl --no-continue --output "{}" "{}"'.format(
                args.tmp_filepath,
                args.youtube_url,
            ),
            shell=True,
        )

    poop = args.tmp_filepath + ".mp4"
    print(poop)

    clip = VideoFileClip(
        poop,
        audio=False,
    )
    clip_frame0 = clip.get_frame(0)
    clip_resolution = (len(clip_frame0), len(clip_frame0[0]))
    if clip_resolution != TARGET_RESOLUTION:
        clip.reader.close()
        clip = VideoFileClip(
            poop,
            audio=False,
            target_resolution=TARGET_RESOLUTION,
            resize_algorithm='fast_bilinear',
        )
    med_res_clip = VideoFileClip(
        poop,
        audio=False,
        target_resolution=MID_RES_TARGET_RESOLUTION,
        resize_algorithm='fast_bilinear',
    )
Exemple #18
0
class MainWidget(QWidget):
    def __init__(self):

        super().__init__()
        self.save_path = ""
        self.video_path = None

        layout = QVBoxLayout()

        hlayout = QHBoxLayout()

        btn = QPushButton()
        btn.setIcon(self.style().standardIcon(
            getattr(QStyle, "SP_DialogOpenButton")))
        btn.clicked.connect(self.get_path)
        self.selected_video = QLabel()
        self.selected_video.setText("Selected Video: None")

        hlayout.addWidget(self.selected_video)
        hlayout.addStretch()
        hlayout.addWidget(btn)

        layout.addLayout(hlayout)

        self.selected_corner_points = QLabel(
            "Select corner points: 1. top left, 2. bottom right")
        layout.addWidget(self.selected_corner_points)

        self.img_widget = ImageWidget(self)
        scroll = QScrollArea()
        scroll.setWidget(self.img_widget)

        layout.addWidget(scroll)

        hlayout = QHBoxLayout()
        self.save_path_label = QLabel("Save As: ")
        hlayout.addWidget(self.save_path_label)
        hlayout.addStretch()
        save_as_btn = QPushButton()
        save_as_btn.setIcon(self.style().standardIcon(
            getattr(QStyle, "SP_DialogOpenButton")))
        save_as_btn.clicked.connect(self.file_save)
        hlayout.addWidget(save_as_btn)

        layout.addLayout(hlayout)

        codecs = ["libx264", "mpeg4", "png", "rawvideo"]
        codec_buttons = [QRadioButton(codec) for codec in codecs]
        codec_buttons[0].setChecked(True)

        self.codec_btngroup = QButtonGroup()
        for btn in codec_buttons:
            self.codec_btngroup.addButton(btn)

        hlayout = QHBoxLayout()
        for btn in codec_buttons:
            hlayout.addWidget(btn)

        layout.addLayout(hlayout)

        hlayout = QHBoxLayout()

        flayout = QFormLayout()
        self.fps_field = QLineEdit()
        self.fps_label = QLabel("fps: ")
        flayout.addRow(self.fps_label, self.fps_field)
        hlayout.addLayout(flayout)

        render_btn = QPushButton("Render")
        render_btn.clicked.connect(self.render_video)

        hlayout.addWidget(render_btn)

        layout.addLayout(hlayout)

        self.progressbar = QProgressBar()
        self.progressbar.setValue(0)
        self.progressbar.hide()
        layout.addWidget(self.progressbar)

        self.setLayout(layout)
        self.setWindowTitle("Simple Video Edits")

    def select_roi(self, img, roi):
        return img[roi[1]:roi[3], roi[0]:roi[2], :]

    def render_video(self):
        try:
            fps = (int((self.fps_field.text())))
            fps = fps if fps != -1 else None

            if not self.img_widget.selected_corners:
                roi = [(0, 0), self.img_widget.image_size]
            else:
                roi = self.img_widget.selected_corners
            roi = list(roi[0]) + list(roi[1])

            if self.save_path == "":
                QMessageBox.about(self, "Error", "Invalid Save Path")
                return

            output_clip = self.video_clip.fl_image(
                lambda img: self.select_roi(img, roi))
            self.progressbar.show()
            logger = MyProgressBarLogger(
                self.progressbar, (fps if fps else self.video_clip.fps) *
                self.video_clip.duration)

            checked_codec = str(self.codec_btngroup.checkedButton().text())

            output_clip.write_videofile(self.save_path,
                                        audio=False,
                                        fps=fps,
                                        logger=logger,
                                        codec=checked_codec)
        except:
            QMessageBox.about(self, "Error", "Invalid FPS or corners")

    def get_path(self):
        output = QFileDialog.getOpenFileName(self, "")
        self.video_path = output[0]

        try:
            self.video_clip = VideoFileClip(self.video_path)
            self.fps_label.setText("fps: (max " +
                                   "{:.2f}".format(self.video_clip.fps) +
                                   ", type -1 for max fps)")
            self.img_widget.set_frame(self.video_clip.get_frame(0))
            self.selected_video.setText("Selected Video: " + self.video_path)
            self.progressbar.hide()

            Image.fromarray(
                self.video_clip.get_frame(0)).save("/tmp/thumb.png")
        except:
            QMessageBox.about(self, "Error", "Invalid Path.")

    def file_save(self):
        name = QFileDialog.getSaveFileName(self, 'Save File')[0]
        self.save_path_label.setText("Save As: " + name)
        if not name.lower().endswith("mp4") and not name.lower().endswith(
                ".avi"):
            QMessageBox.about(
                self, "Error",
                "Invalid Name. File should end with .mp4 or .avi")
        else:
            self.save_path = name

    def update_rect(self):
        pt1, pt2 = self.img_widget.selected_corners

        append = "(" + str(pt1[0]) + "," + str(pt1[1]) + ") (" + str(
            pt2[0]) + "," + str(pt1[1]) + ")"
        self.selected_corner_points.setText(
            "Select corner points: 1. top left, 2. bottom right " + append)
class Decoder(object):
    """ This class loads a video file that can be played. It can 
	be passed a callback function to which decoded video frames should be passed. 
	"""
    def __init__(self, mediafile=None, videorenderfunc=None, play_audio=True):
        """
		Constructor.

		Parameters
		----------
		mediafile : str, optional
			The path to the mediafile to be loaded (default: None)
		videorenderfunc : callable (default: None)
			Callback function that takes care of the actual
			Rendering of the videoframe.\
			The specified renderfunc should be able to accept the following
			arguments:
				- frame (numpy.ndarray): the videoframe to be rendered
		play_audio : bool, optional
			Whether audio of the clip should be played.
		"""
        # Create an internal timer
        self.clock = Timer()

        # Load a video file if specified, but allow users to do this later
        # by initializing all variables to None
        if not self.load_media(mediafile, play_audio):
            self.reset()

        # Set callback function if set
        self.set_videoframerender_callback(videorenderfunc)

        # Store instance variables
        self.play_audio = play_audio

    @property
    def frame_interval(self):
        """ Duration in seconds of a single frame. """
        return self.clock.frame_interval

    @property
    def current_frame_no(self):
        """ Current frame_no of video. """
        return self.clock.current_frame

    @property
    def current_videoframe(self):
        """ Representation of current video frame as a numpy array. """
        return self.__current_videoframe

    @property
    def current_playtime(self):
        """ Clocks current runtime in seconds. """
        return self.clock.time

    @property
    def loop(self):
        """ Indicates whether the playback should loop. """
        return self._loop

    @loop.setter
    def loop(self, value):
        """ Indicates whether the playback should loop. 

		Parameters
		----------
		value : bool
			True if playback should loop, False if not.

		"""
        if not type(value) == bool:
            raise TypeError("can only be True or False")
        self._loop = value

    def reset(self):
        """ Resets the player and discards loaded data. """
        self.clip = None
        self.loaded_file = None

        self.fps = None
        self.duration = None

        self.status = UNINITIALIZED
        self.clock.reset()

        self.loop_count = 0

    def load_media(self, mediafile, play_audio=True):
        """ Loads a media file to decode. 

		If an audiostream is detected, its parameters will be stored in a
		dictionary in the variable `audioformat`. This contains the fields 

		:nbytes: the number of bytes in the stream (2 is 16-bit sound).
		:nchannels: the channels (2 for stereo, 1 for mono)
		:fps: the frames per sec/sampling rate of the sound (e.g. 44100 KhZ).
		:buffersize: the audioframes per buffer.
		
		If play_audio was set to False, or the video does not have an audiotrack,
		`audioformat` will be None.

		Parameters
		----------
		mediafile : str
			The path to the media file to load.
		play_audio : bool, optional
			Indicates whether the audio of a movie should be played.

		Raises
		------
		IOError
			When the file could not be found or loaded.
		"""
        if not mediafile is None:
            if os.path.isfile(mediafile):
                self.clip = VideoFileClip(mediafile, audio=play_audio)

                self.loaded_file = os.path.split(mediafile)[1]

                ## Timing variables
                # Clip duration
                self.duration = self.clip.duration
                self.clock.max_duration = self.clip.duration
                logger.debug("Video clip duration: {}s".format(self.duration))

                # Frames per second of clip
                self.fps = self.clip.fps
                self.clock.fps = self.clip.fps
                logger.debug("Video clip FPS: {}".format(self.fps))

                if play_audio and self.clip.audio:
                    buffersize = int(self.frame_interval * self.clip.audio.fps)
                    self.audioformat = {
                        'nbytes': 2,
                        'nchannels': self.clip.audio.nchannels,
                        'fps': self.clip.audio.fps,
                        'buffersize': buffersize
                    }
                    logger.debug("Audio loaded: \n{}".format(self.audioformat))
                    logger.debug("Creating audio buffer of length: "
                                 " {}".format(queue_length))
                    self.audioqueue = Queue(queue_length)
                else:
                    self.audioformat = None

                logger.debug('Loaded {0}'.format(mediafile))
                self.status = READY
                return True
            else:
                raise IOError("File not found: {0}".format(mediafile))
        return False

    def set_videoframerender_callback(self, func):
        """ Sets the function to call when a new frame is available. 
		This function is passed the frame (in the form of a numpy.ndarray) and
		should take care of the rendering. 

		Parameters
		----------
		func : callable
			The function to pass the new frame to once it becomes available.
		"""

        # Check if renderfunc is indeed a function
        if not func is None and not callable(func):
            raise TypeError(
                "The object passed for videorenderfunc is not a function")
        self.__videorenderfunc = func

    def set_audiorenderer(self, renderer):
        """ Sets the SoundRenderer object. This should take care of processing 
		the audioframes set in audioqueue.

		Parameters
		----------
		renderer : soundrenderers.SoundRenderer
			A subclass of soundrenderers.SoundRenderer that takes care of the
			audio rendering.

		Raises
		------
		RuntimeError
			If no information about the audiostream is available. This could be
			because no video has been loaded yet, or because no embedded 
			audiostream could be detected in the video, or play_sound was set
			to False.
		"""
        if not hasattr(self, 'audioqueue') or self.audioqueue is None:
            raise RuntimeError("No video has been loaded, or no audiostream "
                               "was detected.")
        if not isinstance(renderer, SoundRenderer):
            raise TypeError("Invalid renderer object. Not a subclass of "
                            "SoundRenderer")
        self.soundrenderer = renderer
        self.soundrenderer.queue = self.audioqueue

    def play(self):
        """ Start the playback of the video. 
		The playback loop is run in a separate thread, so this function returns 
		immediately. This allows one to implement things such as event handling 
		loops (e.g. check for key presses) elsewhere.
		"""
        ### First do some status checks

        # Make sure a file is loaded
        if self.status == UNINITIALIZED or self.clip is None:
            raise RuntimeError("Player uninitialized or no file loaded")

        # Check if playback has already finished (rewind needs to be called first)
        if self.status == EOS:
            logger.debug("End of stream has already been reached")
            return

        # Check if playback hasn't already been started (and thus if play()
        # has not been called before from another thread for instance)
        if self.status in [PLAYING, PAUSED]:
            logger.warning("Video already started")
            return

        ### If all is in order start the general playing loop
        if self.status == READY:
            self.status = PLAYING

        self.last_frame_no = 0

        if not hasattr(self, "renderloop") or not self.renderloop.isAlive():
            if self.audioformat:
                # Chop the total stream into separate audio chunks that are the
                # lenght of a video frame (this way the index of each chunk
                # corresponds to the video frame it belongs to.)
                self.__calculate_audio_frames()
                # Start audio handling thread. This thread places audioframes
                # into a sound buffer, untill this buffer is full.
                self.audioframe_handler = threading.Thread(
                    target=self.__audiorender_thread)
                self.audioframe_handler.start()

            # Start main rendering loop.
            self.renderloop = threading.Thread(target=self.__render)
            self.renderloop.start()
        else:
            logger.warning("Rendering thread already running!")

    def pause(self):
        """ Pauses or resumes the video and/or audio stream. """

        # Change playback status only if current status is PLAYING or PAUSED
        # (and not READY).
        logger.debug("Pausing playback")
        if self.status == PAUSED:
            # Recalculate audio stream position to make sure it is not out of
            # sync with the video
            self.__calculate_audio_frames()
            self.status = PLAYING
            self.clock.pause()
        elif self.status == PLAYING:
            self.status = PAUSED
            self.clock.pause()

    def stop(self):
        """ Stops the video stream and resets the clock. """

        logger.debug("Stopping playback")
        # Stop the clock
        self.clock.stop()
        # Set plauyer status to ready
        self.status = READY

    def seek(self, value):
        """ Seek to the specified time.

		Parameters
		----------
		value : str or int
			The time to seek to. Can be any of the following formats:

		    >>> 15.4 -> 15.4 # seconds
		    >>> (1,21.5) -> 81.5 # (min,sec)
		    >>> (1,1,2) -> 3662 # (hr, min, sec)
		    >>> '01:01:33.5' -> 3693.5  #(hr,min,sec)
		    >>> '01:01:33.045' -> 3693.045
		    >>> '01:01:33,5' #comma works too
		"""
        # Pause the stream
        self.pause()
        # Make sure the movie starts at 1s as 0s gives trouble.
        self.clock.time = max(0.5, value)
        logger.debug("Seeking to {} seconds; frame {}".format(
            self.clock.time, self.clock.current_frame))
        if self.audioformat:
            self.__calculate_audio_frames()
        # Resume the stream
        self.pause()

    def rewind(self):
        """ Rewinds the video to the beginning.
		Convenience function simply calling seek(0). """
        self.seek(0.5)

    def __calculate_audio_frames(self):
        """ Aligns audio with video. 
		This should be called for instance after a seeking operation or resuming 
		from a pause. """

        if self.audioformat is None:
            return
        start_frame = self.clock.current_frame
        totalsize = int(self.clip.audio.fps * self.clip.audio.duration)
        self.audio_times = list(
            range(0, totalsize, self.audioformat['buffersize'])) + [totalsize]
        # Remove audio segments up to the starting frame
        del (self.audio_times[0:start_frame])

    def __render(self):
        """ Main render loop. 

		Checks clock if new video and audio frames need to be rendered. 
		If so, it passes the frames to functions that take care 
		of rendering these frames. """

        # Render first frame
        self.__render_videoframe()

        # Start videoclock with start of this thread
        self.clock.start()

        logger.debug("Started rendering loop.")
        # Main rendering loop
        while self.status in [PLAYING, PAUSED]:
            current_frame_no = self.clock.current_frame

            # Check if end of clip has been reached
            if self.clock.time >= self.duration:
                logger.debug("End of stream reached at {}".format(
                    self.clock.time))
                if self.loop:
                    logger.debug("Looping: restarting stream")
                    # Seek to the start
                    self.rewind()
                    self.loop_count += 1
                else:
                    # End of stream has been reached
                    self.status = EOS
                    break

            if self.last_frame_no != current_frame_no:
                # A new frame is available. Get it from te stream
                self.__render_videoframe()

            self.last_frame_no = current_frame_no

            # Sleeping is a good idea to give the other threads some breathing
            # space to do their work.
            time.sleep(0.005)

        # Stop the clock.
        self.clock.stop()
        logger.debug("Rendering stopped.")

    def __render_videoframe(self):
        """ Retrieves a new videoframe from the stream.

		Sets the frame as the __current_video_frame and passes it on to
		__videorenderfunc() if it is set. """

        new_videoframe = self.clip.get_frame(self.clock.time)
        # Pass it to the callback function if this is set
        if callable(self.__videorenderfunc):
            self.__videorenderfunc(new_videoframe)
        # Set current_frame to current frame (...)
        self.__current_videoframe = new_videoframe

    def __audiorender_thread(self):
        """ Thread that takes care of the audio rendering. Do not call directly,
		but only as the target of a thread. """
        new_audioframe = None
        logger.debug("Started audio rendering thread.")

        while self.status in [PLAYING, PAUSED]:
            # Retrieve audiochunk
            if self.status == PLAYING:
                if new_audioframe is None:
                    # Get a new frame from the audiostream, skip to the next one
                    # if the current one gives a problem
                    try:
                        start = self.audio_times.pop(0)
                        stop = self.audio_times[0]
                    except IndexError:
                        logger.debug("Audio times could not be obtained")
                        time.sleep(0.02)
                        continue

                    # Get the frame numbers to extract from the audio stream.
                    chunk = (1.0 / self.audioformat['fps']) * np.arange(
                        start, stop)

                    try:
                        # Extract the frames from the audio stream. Does not always,
                        # succeed (e.g. with bad streams missing frames), so make
                        # sure this doesn't crash the whole program.
                        new_audioframe = self.clip.audio.to_soundarray(
                            tt=chunk,
                            buffersize=self.frame_interval *
                            self.clip.audio.fps,
                            quantize=True)
                    except OSError as e:
                        logger.warning("Sound decoding error: {}".format(e))
                        new_audioframe = None
                # Put audioframe in buffer/queue for soundrenderer to pick up. If
                # the queue is full, try again after a timeout (this allows to check
                # if the status is still PLAYING after a pause.)
                if not new_audioframe is None:
                    try:
                        self.audioqueue.put(new_audioframe, timeout=.05)
                        new_audioframe = None
                    except Full:
                        pass

            time.sleep(0.005)

        logger.debug("Stopped audio rendering thread.")

    def __repr__(self):
        """ Create a string representation for when print() is called. """
        return "Decoder [file loaded: {0}]".format(self.loaded_file)
Exemple #20
0
class FrameSamplingFilter():
    def __init__(self, every=None, hertz=None, top_n=None):
        if every is None and hertz is None and top_n is None:
            raise ValueError("When initializing the FrameSamplingFilter, "
                             "one of the 'every', 'hertz', or 'top_n' must "
                             "be specified.")
        self.every = every
        self.hertz = hertz
        self.top_n = top_n

    def get_audio_sampling_rate(self, filename: str):
        infos = ffmpeg_parse_infos(filename)
        fps = infos.get('audio_fps', 44100)
        if fps == 'unknown':
            fps = 44100
        return fps

    def load_clip(self, filename: str):
        audio_fps = self.get_audio_sampling_rate(filename)
        self.clip = VideoFileClip(filename, audio_fps)

    def initialize_video(self, filename: str):
        self.filename = filename
        self.load_clip(filename)
        self.fps = self.clip.fps
        self.width = self.clip.w
        self.height = self.clip.h
        self.frame_index = range(int(ceil(self.fps * self.clip.duration)))
        self.duration = self.clip.duration
        self.n_frames = len(self.frame_index)

    def get_audio_vector(self, new_sampling_rate: int = 16000):
        fd, fp = tempfile.mkstemp()
        audio = f'{fp}.wav'
        self.clip.audio.to_audiofile(audio)
        data, sampling_rate = sf.read(audio, dtype='float32')
        os.close(fd)
        os.remove(audio)
        return np.array(
            librosa.resample(data.T, sampling_rate, new_sampling_rate))

    def transform(self, filename: str):
        self.initialize_video(filename)

        if (self.every is not None):
            new_idx = range(self.n_frames)[::self.every]
        elif (self.hertz is not None):
            interval = self.fps / float(self.hertz)
            new_idx = np.arange(0, self.n_frames, interval).astype(int)
            new_idx = list(new_idx)
        elif self.top_n is not None:
            diffs = []
            for i, img in enumerate(range(self.n_frames)):
                if i == 0:
                    last = img
                    continue
                pixel_diffs = cv2.sumElems(
                    cv2.absdiff(self.get_frame(last), self.get_frame(img)))
                diffs.append(sum(pixel_diffs))
                last = img
            new_idx = sorted(range(len(diffs)),
                             key=lambda i: diffs[i],
                             reverse=True)[:self.top_n]

        result = []
        for index in new_idx:
            result.append(self.get_frame(index))
        return result

    def get_frame(self, index: int):
        return self.clip.get_frame(index)

    def iter_frames(self):
        for i, f in enumerate(self.frame_index):
            yield self.get_frame(f)
Exemple #21
0
class Decoder(object):
	""" This class loads a video file that can be played. It can 
	be passed a callback function to which decoded video frames should be passed. 
	"""

	def __init__(self, mediafile=None, videorenderfunc=None, play_audio=True):
		"""
		Constructor.

		Parameters
		----------
		mediafile : str, optional
			The path to the mediafile to be loaded (default: None)
		videorenderfunc : callable (default: None)
			Callback function that takes care of the actual
			Rendering of the videoframe.\
			The specified renderfunc should be able to accept the following
			arguments:
				- frame (numpy.ndarray): the videoframe to be rendered
		play_audio : bool, optional
			Whether audio of the clip should be played.
		"""
		# Create an internal timer
		self.clock = Timer()

		# Load a video file if specified, but allow users to do this later
		# by initializing all variables to None
		if not self.load_media(mediafile, play_audio):
			self.reset()

		# Set callback function if set
		self.set_videoframerender_callback(videorenderfunc)

		# Store instance variables
		self.play_audio = play_audio

	@property
	def frame_interval(self):
		""" Duration in seconds of a single frame. """
		return self.clock.frame_interval

	@property
	def current_frame_no(self):
		""" Current frame_no of video. """
		return self.clock.current_frame

	@property
	def current_videoframe(self):
		""" Representation of current video frame as a numpy array. """
		return self.__current_videoframe

	@property
	def current_playtime(self):
		""" Clocks current runtime in seconds. """
		return self.clock.time

	@property
	def loop(self):
		""" Indicates whether the playback should loop. """
		return self._loop
	
	@loop.setter
	def loop(self, value):
		""" Indicates whether the playback should loop. 

		Parameters
		----------
		value : bool
			True if playback should loop, False if not.

		"""
		if not type(value) == bool:
			raise TypeError("can only be True or False")
		self._loop = value

	def reset(self):
		""" Resets the player and discards loaded data. """
		self.clip = None
		self.loaded_file = None

		self.fps = None
		self.duration = None

		self.status = UNINITIALIZED
		self.clock.reset()

	def load_media(self, mediafile, play_audio=True):
		""" Loads a media file to decode. 

		If an audiostream is detected, its parameters will be stored in a
		dictionary in the variable `audioformat`. This contains the fields 

		:nbytes: the number of bytes in the stream (2 is 16-bit sound).
		:nchannels: the channels (2 for stereo, 1 for mono)
		:fps: the frames per sec/sampling rate of the sound (e.g. 44100 KhZ).
		:buffersize: the audioframes per buffer.
		
		If play_audio was set to False, or the video does not have an audiotrack,
		`audioformat` will be None.

		Parameters
		----------
		mediafile : str
			The path to the media file to load.
		play_audio : bool, optional
			Indicates whether the audio of a movie should be played.

		Raises
		------
		IOError
			When the file could not be found or loaded.
		"""
		if not mediafile is None:
			if os.path.isfile(mediafile):
				self.clip = VideoFileClip(mediafile, audio=play_audio)

				self.loaded_file = os.path.split(mediafile)[1]

				## Timing variables
				# Clip duration
				self.duration = self.clip.duration
				self.clock.max_duration = self.clip.duration
				logger.debug("Video clip duration: {}s".format(self.duration))

				# Frames per second of clip
				self.fps = self.clip.fps
				self.clock.fps = self.clip.fps
				logger.debug("Video clip FPS: {}".format(self.fps))

				if play_audio and self.clip.audio:
					buffersize = int(self.frame_interval*self.clip.audio.fps)
					self.audioformat = {
						'nbytes':  	  	2,
						'nchannels':	self.clip.audio.nchannels,
						'fps':	 	  	self.clip.audio.fps,
						'buffersize':	buffersize
					}
					logger.debug("Audio loaded: \n{}".format(self.audioformat))
					logger.debug("Creating audio buffer of length: "
						" {}".format(queue_length))
					self.audioqueue = Queue(queue_length)
				else:
					self.audioformat = None

				logger.debug('Loaded {0}'.format(mediafile))
				self.status = READY
				return True
			else:
				raise IOError("File not found: {0}".format(mediafile))
		return False

	def set_videoframerender_callback(self, func):
		""" Sets the function to call when a new frame is available. 
		This function is passed the frame (in the form of a numpy.ndarray) and
		should take care of the rendering. 

		Parameters
		----------
		func : callable
			The function to pass the new frame to once it becomes available.
		"""

		# Check if renderfunc is indeed a function
		if not func is None and not callable(func):
			raise TypeError("The object passed for videorenderfunc is not a function")
		self.__videorenderfunc = func

	def set_audiorenderer(self, renderer):
		""" Sets the SoundRenderer object. This should take care of processing 
		the audioframes set in audioqueue.

		Parameters
		----------
		renderer : soundrenderers.SoundRenderer
			A subclass of soundrenderers.SoundRenderer that takes care of the
			audio rendering.

		Raises
		------
		RuntimeError
			If no information about the audiostream is available. This could be
			because no video has been loaded yet, or because no embedded 
			audiostream could be detected in the video, or play_sound was set
			to False.
		"""
		if not hasattr(self, 'audioqueue') or self.audioqueue is None:
			raise RuntimeError("No video has been loaded, or no audiostream "
				"was detected.")
		if not isinstance(renderer, SoundRenderer):
			raise TypeError("Invalid renderer object. Not a subclass of "
				"SoundRenderer")
		self.soundrenderer = renderer
		self.soundrenderer.queue = self.audioqueue

	def play(self):
		""" Start the playback of the video. 
		The playback loop is run in a separate thread, so this function returns 
		immediately. This allows one to implement things such as event handling 
		loops (e.g. check for key presses) elsewhere.
		"""
		### First do some status checks

		# Make sure a file is loaded
		if self.status == UNINITIALIZED or self.clip is None:
			raise RuntimeError("Player uninitialized or no file loaded")

		# Check if playback has already finished (rewind needs to be called first)
		if self.status == EOS:
			logger.debug("End of stream has already been reached")
			return

		# Check if playback hasn't already been started (and thus if play()
		# has not been called before from another thread for instance)
		if self.status in [PLAYING,PAUSED]:
			logger.warning("Video already started")
			return

		### If all is in order start the general playing loop
		if self.status == READY:
			self.status = PLAYING

		self.last_frame_no = 0

		if not hasattr(self,"renderloop") or not self.renderloop.isAlive():
			if self.audioformat:
				# Chop the total stream into separate audio chunks that are the
				# lenght of a video frame (this way the index of each chunk 
				# corresponds to the video frame it belongs to.)
				self.__calculate_audio_frames()
				# Start audio handling thread. This thread places audioframes
				# into a sound buffer, untill this buffer is full.
				self.audioframe_handler = threading.Thread(
					target=self.__audiorender_thread)
				self.audioframe_handler.start()

			# Start main rendering loop.
			self.renderloop = threading.Thread(target=self.__render)
			self.renderloop.start()
		else:
			logger.warning("Rendering thread already running!")

	def pause(self):
		""" Pauses or resumes the video and/or audio stream. """

		# Change playback status only if current status is PLAYING or PAUSED 
		# (and not READY).
		logger.debug("Pausing playback")
		if self.status == PAUSED:
			# Recalculate audio stream position to make sure it is not out of
			# sync with the video
			self.__calculate_audio_frames()
			self.status = PLAYING
			self.clock.pause()
		elif self.status == PLAYING:
			self.status = PAUSED
			self.clock.pause()

	def stop(self):
		""" Stops the video stream and resets the clock. """

		logger.debug("Stopping playback")
		# Stop the clock
		self.clock.stop()
		# Set plauyer status to ready
		self.status = READY

	def seek(self, value):
		""" Seek to the specified time.

		Parameters
		----------
		value : str or int
			The time to seek to. Can be any of the following formats:

		    >>> 15.4 -> 15.4 # seconds
		    >>> (1,21.5) -> 81.5 # (min,sec)
		    >>> (1,1,2) -> 3662 # (hr, min, sec)
		    >>> '01:01:33.5' -> 3693.5  #(hr,min,sec)
		    >>> '01:01:33.045' -> 3693.045
		    >>> '01:01:33,5' #comma works too
		"""
		# Pause the stream
		self.pause()
		self.clock.time = value
		logger.debug("Seeking to {} seconds; frame {}".format(self.clock.time, 
			self.clock.current_frame))
		if self.audioformat:
			self.__calculate_audio_frames()
		# Resume the stream
		self.pause()

	def rewind(self):
		""" Rewinds the video to the beginning.
		Convenience function simply calling seek(0). """
		self.seek(0)

	def __calculate_audio_frames(self):
		""" Aligns audio with video. 
		This should be called for instance after a seeking operation or resuming 
		from a pause. """

		if self.audioformat is None:
			return
		start_frame = self.clock.current_frame
		totalsize = int(self.clip.audio.fps*self.clip.audio.duration)
		self.audio_times = list(range(0, totalsize, 
			self.audioformat['buffersize'])) + [totalsize]
		# Remove audio segments up to the starting frame
		del(self.audio_times[0:start_frame])

	def __render(self):
		""" Main render loop. 

		Checks clock if new video and audio frames need to be rendered. 
		If so, it passes the frames to functions that take care 
		of rendering these frames. """

		# Render first frame
		self.__render_videoframe()

		# Start videoclock with start of this thread
		self.clock.start()

		logger.debug("Started rendering loop.")
		# Main rendering loop
		while self.status in [PLAYING,PAUSED]:
			current_frame_no = self.clock.current_frame

			# Check if end of clip has been reached
			if self.clock.time >= self.duration:
				logger.debug("End of stream reached at {}".format(self.clock.time))
				if self.loop:
					logger.debug("Looping: restarting stream")
					# Seek to the start
					self.seek(0)
				else:
					# End of stream has been reached
					self.status = EOS
					break

			if self.last_frame_no != current_frame_no:
				# A new frame is available. Get it from te stream
				self.__render_videoframe()

			self.last_frame_no = current_frame_no

			# Sleeping is a good idea to give the other threads some breathing
			# space to do their work.
			time.sleep(0.005)

		# Stop the clock.
		self.clock.stop()
		logger.debug("Rendering stopped.")

	def __render_videoframe(self):
		""" Retrieves a new videoframe from the stream.

		Sets the frame as the __current_video_frame and passes it on to
		__videorenderfunc() if it is set. """

		new_videoframe = self.clip.get_frame(self.clock.time)
		# Pass it to the callback function if this is set
		if callable(self.__videorenderfunc):
			self.__videorenderfunc(new_videoframe)
		# Set current_frame to current frame (...)
		self.__current_videoframe = new_videoframe

	def __audiorender_thread(self):
		""" Thread that takes care of the audio rendering. Do not call directly,
		but only as the target of a thread. """
		new_audioframe = None
		logger.debug("Started audio rendering thread.")

		while self.status in [PLAYING,PAUSED]:
			# Retrieve audiochunk
			if self.status == PLAYING:
				if new_audioframe is None:
					# Get a new frame from the audiostream, skip to the next one
					# if the current one gives a problem
					try:
						start = self.audio_times.pop(0)
						stop = self.audio_times[0]
					except IndexError:
						logger.debug("Audio times could not be obtained")
						time.sleep(0.02)
						continue

					# Get the frame numbers to extract from the audio stream.
					chunk = (1.0/self.audioformat['fps'])*np.arange(start, stop)

					try:
						# Extract the frames from the audio stream. Does not always,
						# succeed (e.g. with bad streams missing frames), so make
						# sure this doesn't crash the whole program.
						new_audioframe = self.clip.audio.to_soundarray(
							tt = chunk,
							buffersize = self.frame_interval*self.clip.audio.fps,
							quantize=True
						)
					except OSError as e:
						logger.warning("Sound decoding error: {}".format(e))
						new_audioframe = None
				# Put audioframe in buffer/queue for soundrenderer to pick up. If
				# the queue is full, try again after a timeout (this allows to check
				# if the status is still PLAYING after a pause.)
				if not new_audioframe is None:
					try:
						self.audioqueue.put(new_audioframe, timeout=.05)
						new_audioframe = None
					except Full:
						pass
			
			time.sleep(0.005)
		
		logger.debug("Stopped audio rendering thread.")

	def __repr__(self):
		""" Create a string representation for when print() is called. """
		return "Decoder [file loaded: {0}]".format(self.loaded_file)
Exemple #22
0
class MovieStim3(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """
    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0,0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0,1.0,1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate = True,
        ):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win, units=units, name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0/retraceRate
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        #size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h],
                                   float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        #set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" %(self.name, str(self)))

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = None
        self._texID = None
        self.status = NOT_STARTED

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).
        This form is provided for syntactic consistency with other visual stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self.reset() #set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio= (1-self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                self._audioStream = sound.Sound(self._mov.audio.to_soundarray(),
                                            sampleRate = self._mov.audio.fps)
            else: #make sure we set to None (in case prev clip did have auido)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" %filename)
        #mov has attributes:
            # size, duration, fps
        #mov.audio has attributes
            #duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = 1.0/self._mov.fps
        self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if self._audioStream is not None:
            self._audioStream.play()
        if status != PLAYING:
            self.status = PLAYING
            self._videoClock.reset(-self.getCurrentFrameTime())

            if status == PAUSED:
                self._audioSeek(self.getCurrentFrameTime())

            if log and self.autoLog:
                    self.win.logOnFlip("Set %s playing" %(self.name),
                                       level=logging.EXP, obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %(self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %(self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """
        Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again. Use pause() if you may need to restart the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" %(self.name),
                    level=logging.EXP,obj=self)

    def setVolume(self, volume):
        pass #to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally (left-to-right).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically (top-to-bottom).
        Note that this is relative to the original, not relative to the current state.
        """
        self.flipVert = not newVal
        logAttrib(self, log, 'flipVert')

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._mov.fps

    def getCurrentFrameTime(self):
        """
        Get the time that the movie file specified the current video frame as
        having.
        """
        return self._nextFrameT - self._frameInterval

    def _updateFrameTexture(self):
        if self._nextFrameT is None:
            # movie has no current position, need to reset the clock
            # to zero in order to have the timing logic work
            # otherwise the video stream would skip frames until the
            # time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0

        #only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif (self._numpyFrame is not None) and \
            (self._nextFrameT > (self._videoClock.getTime()-self._retraceInterval/2.0)):
            return None
        self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        useSubTex=self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex=False

        #bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)#bind that name to the target
        GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_WRAP_S,GL.GL_REPEAT) #makes the texture map wrap (this is actually default anyway)
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)  # data from PIL/numpy is packed, but default for GL is 4 bytes
        #important if using bits++ because GL_LINEAR
        #sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                    self._numpyFrame.shape[1],self._numpyFrame.shape[0], 0,
                    GL.GL_RGB, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                    self._numpyFrame.shape[1], self._numpyFrame.shape[0],
                    GL.GL_RGB, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)

        else:
            GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_MAG_FILTER,GL.GL_NEAREST)
            GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_MIN_FILTER,GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],self._numpyFrame.shape[0], 0,
                                GL.GL_BGR, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                    self._numpyFrame.shape[1], self._numpyFrame.shape[0],
                    GL.GL_BGR, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_MODULATE)#?? do we need this - think not!

        if not self.status==PAUSED:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """
        Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear"""

        if self.status==NOT_STARTED or (self.status==FINISHED and self.loop):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture() #will check if it's needed yet in the function

        #scale the drawing frame and get to centre of field
        GL.glPushMatrix()#push before drawing, pop after
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)#push the data for client attributes

        self.win.setScale('pix')
        #move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        #bind textures
        GL.glActiveTexture (GL.GL_TEXTURE1)
        GL.glBindTexture (GL.GL_TEXTURE_2D,0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture (GL.GL_TEXTURE0)
        GL.glBindTexture (GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        GL.glColor4f(1, 1, 1, self.opacity)  # sets opacity (1,1,1 = RGB placeholder)

        array = (GL.GLfloat * 32)(
             1,  1, #texture coords
             vertsPix[0,0], vertsPix[0,1],    0.,  #vertex
             0,  1,
             vertsPix[1,0], vertsPix[1,1],    0.,
             0, 0,
             vertsPix[2,0], vertsPix[2,1],    0.,
             1, 0,
             vertsPix[3,0], vertsPix[3,1],    0.,
             )

        #2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)
        GL.glPopAttrib(GL.GL_ENABLE_BIT)
        GL.glPopMatrix()
        #unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)#implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        #video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        #for sound we need to extract the array again and just begin at new loc
        if self._audioStream is None:
            return #do nothing
        self._audioStream.stop()
        sndArray = self._mov.audio.to_soundarray()
        startIndex = int(t*self._mov.audio.fps)
        self._audioStream = sound.Sound(sndArray[startIndex:,:], sampleRate = self._mov.audio.fps)
        self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        try:
            self.clearTextures()#remove textures from graphics card to prevent crash
        except:
            pass
        self._mov = None
        self._numpyFrame = None
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" %(self.name),
                level=logging.EXP,obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        #add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
Exemple #23
0
class Player(object):
    """ This class loads a video file that can be played. It returns video and audioframes, but can also
	be passed a callback function that can take care of the rendering elsewhere. """
    def __init__(self,
                 videofile=None,
                 videorenderfunc=None,
                 audiorenderfunc=None,
                 play_audio=True):
        """
		Constructor

		Keyword arguments:
		videofile  	--  The path to the videofile to be loaded (default: None)
		videorenderfunc --  callback function that takes care of the actual
					rendering of the videoframe (default: None)

					The specified renderfunc should be able to accept the following
					arguments:
						- frame (numpy array): the videoframe to be rendered
		audiorenderfunc --  callback function that takes care of the actual
					rendering of the audioframe (default: None)

					The specified renderfunc should be able to accept the following
					arguments:
						- frame (numpy array): the audioframe to be rendered
		play_audio 	--  Whether audio of the clip should be played (default: True)
		"""
        # Create an internal timer
        self.clock = Timer()

        # Load a video file if specified, but allow users to do this later
        # by initializing all variables to None
        if not self.load_video(videofile, play_audio):
            self.reset()

        ## Set callback functions if set

        self.set_videoframerender_callback(videorenderfunc)
        self.set_audioframerender_callback(audiorenderfunc)

        self.play_audio = play_audio

    @property
    def frame_interval(self):
        """ Duration in seconds of a single frame """
        return self.clock.frame_interval

    @property
    def current_frame_no(self):
        """ Current frame_no of video """
        return self.clock.current_frame

    @property
    def current_videoframe(self):
        """ Representation of current video frame as a numpy array """
        return self.__current_videoframe

    @property
    def current_audioframe(self):
        """ Representation of current video frame as a numpy array """
        return self.__current_audioframe

    @property
    def current_playtime(self):
        """ Clocks current runtime in seconds """
        return self.clock.time

    def reset(self):
        self.clip = None
        self.loaded_file = None

        self.fps = None
        self.duration = None

        self.status = UNINITIALIZED
        self.clock.reset()

    def load_video(self, videofile, play_audio=True):
        if not videofile is None:
            if os.path.isfile(videofile):
                self.clip = VideoFileClip(videofile, audio=play_audio)

                if play_audio and self.clip.audio:
                    self.audioformat = {
                        'nbytes': 2,
                        'nchannels': self.clip.audio.nchannels,
                        'fps': self.clip.audio.fps,
                        'chunkduration': 1.0 / self.clip.fps
                    }
                else:
                    self.audioformat = None

                self.loaded_file = os.path.split(videofile)[1]

                ## Timing variables
                # Clip duration
                self.duration = self.clip.duration
                self.clock.max_duration = self.clip.duration
                # Frames per second of clip
                self.fps = self.clip.fps
                self.clock.fps = self.clip.fps

                print("Loaded {0}".format(videofile))
                self.status = READY
                return True
            else:
                raise IOError("File not found: {0}".format(videofile))
        return False

    def set_videoframerender_callback(self, func):
        # Check if renderfunc is indeed a function
        if not func is None:
            if not hasattr(func, '__call__'):
                raise TypeError(
                    "The object passed for videorenderfunc is not a function")
        self.__videorenderfunc = func

    def set_audioframerender_callback(self, func):
        if not func is None:
            if not hasattr(func, '__call__'):
                raise TypeError(
                    "The object passed for audiorenderfunc is not a function")
        self.__audiorenderfunc = func

    def play(self):
        ### First do some status checks

        # Make sure a file is loaded
        if self.status == UNINITIALIZED or self.clip is None:
            raise RuntimeError("Player uninitialized or no file loaded")

        # Check if playback has already finished (rewind needs to be called first)
        if self.status == EOS:
            print("End of stream has been reached")
            return

        # Check if playback hasn't already been started (and thus if play()
        # has not been called before from another thread for instance)
        if self.status in [PLAYING, PAUSED]:
            print("Video already started")
            return

        ### If all is in order start the general playing loop
        if self.status == READY:
            self.status = PLAYING

        self.last_frame_no = 0
        self.current_time = self.clock.time
        self.next_audio_refresh_t = self.current_time + self.frame_interval

        if not hasattr(self, "renderloop") or not self.renderloop.isAlive():
            if self.audioformat:
                # Create flag to set when new audioframe is available
                self.new_audioframe_available = threading.Event()
                # Start audiorender loop
                self.audioframe_handler = threading.Thread(
                    target=self.__audiorender_thread)
                self.audioframe_handler.start()

            self.renderloop = threading.Thread(target=self.__render)
            self.renderloop.start()
        else:
            print("Rendering thread already running!")

    def pause(self):
        """ Change playback status only if current status is PLAYING or
		PAUSED (and not READY) """
        if self.status == PAUSED:
            self.status = PLAYING
            self.clock.pause()
        elif self.status == PLAYING:
            self.status = PAUSED
            self.clock.pause()

    def stop(self):
        # Stop the clock
        self.clock.stop()
        # Set plauyer status to ready
        self.status = READY

    def __render(self):
        """ Main render loop. Checks clock if new video and audio frames
		need to be rendered. Is so, it passes the frames or signals on to
		functions that take care of rendering these frames """

        # Render first frame
        self.__render_videoframe()

        # Start videoclock with start of this thread
        self.clock.start()

        # Main rendering loop
        while self.status in [PLAYING, PAUSED]:
            current_frame_no = self.clock.current_frame

            # Check if end of clip has been reached
            if self.clock.time > self.duration:
                self.status = EOS
                break

            if self.last_frame_no != current_frame_no:
                # A new frame is available. Get it from te stream
                self.current_time = self.clock.time
                self.next_audio_refresh_t = self.current_time + self.frame_interval

                self.new_audioframe_available.set()
                self.__render_videoframe()

            self.last_frame_no = current_frame_no
            time.sleep(0.01)

        self.clock.stop()
        print("Rendering stopped!")

        # Make  sure audiorender thread exits gracefully and is not waiting
        # forever
        self.new_audioframe_available.set()

    def __render_videoframe(self):
        """ Handles a new videoframe once it's there. Is to be run in a separate
		thread so it does not break audio playback, if computer is too slow to render
		video frames at sufficient speed. """

        new_videoframe = self.clip.get_frame(self.clock.time)
        # Pass it to the callback function if this is set
        if self.__videorenderfunc:
            self.__videorenderfunc(new_videoframe)
        # Set current_frame to current frame (...)
        self.__current_videoframe = new_videoframe

    def __audiorender_thread(self):
        print("Starting audio render thread")
        while self.status in [PLAYING, PAUSED]:
            # Get current time boundaries for audiochunk to retrieve
            interval = np.arange(
                int(self.audioformat['fps'] * self.current_time),
                int(self.audioformat['fps'] * (self.next_audio_refresh_t) *
                    1.05),
            )

            # Retrieve audiochunk
            new_audioframe = self.clip.audio.to_soundarray(
                tt=(1.0 / self.audioformat['fps']) * interval,
                buffersize=self.frame_interval * self.clip.audio.fps,
                quantize=True)

            self.new_audioframe_available.wait()
            # Clear the flag to wait for the next audioframe in next iteration
            self.new_audioframe_available.clear()

            # Check again if player still has the right status
            # after receiving the flag
            if self.status in [PLAYING, PAUSED]:
                if self.__audiorenderfunc:
                    self.__audiorenderfunc(new_audioframe)
                self.__current_audioframe = new_audioframe

        print("Stopped audio render thread")

    # Object specific functions
    def __repr__(self):
        """ Create a string representation for when

		print(player)

		is called """
        return "Player [file loaded: {0}]".format(self.loaded_file)