예제 #1
0
class AudioPlayer:
    def __init__(self):
        self._widget = None
        self._player = None
        self._timer = None

    def toggle_playback(self, widget):
        if self._widget == widget:
            if self._player.get_pause():
                plugins.video.video_player.pause_playback()
                self._player.set_pause(False)
                self._widget.audio_state = 'play'
                self._timer = Clock.schedule_interval(self._playback_update,
                                                      .1)
            else:
                self.pause_playback()
        else:
            plugins.video.video_player.pause_playback()
            if self._widget is not None:
                self.pause_playback()
            self._widget = widget
            self._widget.audio_state = 'play'
            self._player = MediaPlayer(filename=self._widget.audio_source,
                                       ff_opts={
                                           'paused': True,
                                           'ss': self._widget.audio_pos
                                       })
            Clock.schedule_interval(self._start_playback, .1)

    def _start_playback(self, dt):
        if self._player.get_metadata()['duration'] is not None:
            self._player.set_pause(False)
            self._timer = Clock.schedule_interval(self._playback_update, .1)
            return False

    def pause_playback(self):
        if self._timer is not None:
            self._timer.cancel()
        if self._player is not None and not self._player.get_pause():
            self._player.set_pause(True)
        if self._widget is not None:
            self._widget.audio_state = 'pause'

    def _playback_update(self, dt):
        pts = self._player.get_pts()
        if pts >= self._widget.audio_length:
            self._player.set_pause(True)
            self._player.seek(pts=0, relative=False, accurate=True)
            self._widget.audio_state = 'pause'
            self._widget.audio_pos = 0
            return False
        self._widget.audio_pos = pts

    def update_audio_pos(self, widget, pts):
        if self._widget == widget and self._player is not None:
            self._player.seek(pts=pts, relative=False, accurate=True)
        widget.audio_pos = pts
class SoundFFPy(Sound):
    @staticmethod
    def extensions():
        return formats_in

    def __init__(self, **kwargs):
        self._ffplayer = None
        self.quitted = False
        self._log_callback_set = False
        self._state = ''
        self.state = 'stop'

        if not get_log_callback():
            set_log_callback(_log_callback)
            self._log_callback_set = True

        super(SoundFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()
        if self._log_callback_set:
            set_log_callback(None)

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':

            def close(*args):
                self.quitted = True
                self.unload()

            Clock.schedule_once(close, 0)
        elif selector == 'eof':
            Clock.schedule_once(self._do_eos, 0)

    def load(self):
        self.unload()
        ff_opts = {'vn': True, 'sn': True}  # only audio
        self._ffplayer = MediaPlayer(self.source,
                                     callback=self._player_callback,
                                     loglevel='info',
                                     ff_opts=ff_opts)
        player = self._ffplayer
        player.set_volume(self.volume)
        player.toggle_pause()
        self._state = 'paused'
        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while ((not player.get_metadata()['duration']) and not self.quitted
               and time.clock() - s < 10.):
            time.sleep(0.005)

    def unload(self):
        if self._ffplayer:
            self._ffplayer = None
        self._state = ''
        self.state = 'stop'
        self.quitted = False

    def play(self):
        if self._state == 'playing':
            super(SoundFFPy, self).play()
            return
        if not self._ffplayer:
            self.load()
        self._ffplayer.toggle_pause()
        self._state = 'playing'
        self.state = 'play'
        super(SoundFFPy, self).play()

    def stop(self):
        if self._ffplayer and self._state == 'playing':
            self._ffplayer.toggle_pause()
            self._state = 'paused'
            self.state = 'stop'
        super(SoundFFPy, self).stop()

    def seek(self, position):
        if self._ffplayer is None:
            return
        self._ffplayer.seek(position, relative=False)

    def get_pos(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def on_volume(self, instance, volume):
        if self._ffplayer is not None:
            self._ffplayer.set_volume(volume)

    def _get_length(self):
        if self._ffplayer is None:
            return super(SoundFFPy, self)._get_length()
        return self._ffplayer.get_metadata()['duration']

    def _do_eos(self, *args):
        if not self.loop:
            self.stop()
        else:
            self.seek(0.)
예제 #3
0
class SoundFFPy(Sound):

    @staticmethod
    def extensions():
        return formats_in

    def __init__(self, **kwargs):
        self._ffplayer = None
        self.quitted = False
        self._log_callback_set = False
        self._state = ''
        self.state = 'stop'
        self._callback_ref = WeakMethod(self._player_callback)

        if not get_log_callback():
            set_log_callback(_log_callback)
            self._log_callback_set = True

        super(SoundFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()
        if self._log_callback_set:
            set_log_callback(None)

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':
            def close(*args):
                self.quitted = True
                self.unload()
            Clock.schedule_once(close, 0)
        elif selector == 'eof':
            Clock.schedule_once(self._do_eos, 0)

    def load(self):
        self.unload()
        ff_opts = {'vn': True, 'sn': True}  # only audio
        self._ffplayer = MediaPlayer(self.source,
                                     callback=self._callback_ref,
                                     loglevel='info', ff_opts=ff_opts)
        player = self._ffplayer
        player.set_volume(self.volume)
        player.toggle_pause()
        self._state = 'paused'
        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while ((not player.get_metadata()['duration'])
               and not self.quitted and time.clock() - s < 10.):
            time.sleep(0.005)

    def unload(self):
        if self._ffplayer:
            self._ffplayer = None
        self._state = ''
        self.state = 'stop'
        self.quitted = False

    def play(self):
        if self._state == 'playing':
            super(SoundFFPy, self).play()
            return
        if not self._ffplayer:
            self.load()
        self._ffplayer.toggle_pause()
        self._state = 'playing'
        self.state = 'play'
        super(SoundFFPy, self).play()

    def stop(self):
        if self._ffplayer and self._state == 'playing':
            self._ffplayer.toggle_pause()
            self._state = 'paused'
            self.state = 'stop'
        super(SoundFFPy, self).stop()

    def seek(self, position):
        if self._ffplayer is None:
            return
        self._ffplayer.seek(position, relative=False)

    def get_pos(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def on_volume(self, instance, volume):
        if self._ffplayer is not None:
            self._ffplayer.set_volume(volume)

    def _get_length(self):
        if self._ffplayer is None:
            return super(SoundFFPy, self)._get_length()
        return self._ffplayer.get_metadata()['duration']

    def _do_eos(self, *args):
        if not self.loop:
            self.stop()
        else:
            self.seek(0.)
예제 #4
0
class DowGlImage(QtOpenGLWidgets.QOpenGLWidget, QtGui.QOpenGLFunctions):
    __vertex_shader = """
    #version 440 core
    layout(location = 0) in vec3 inPosition;
    layout(location = 1) in vec2 texCoord;
    layout(location = 2) uniform vec2 biasTexCoord;

    layout(location = 0) out vec3 outColor;
    layout(location = 1) out vec2 outCoord;

    void main()
    {
      outColor = vec3(1.0f, 0.5f, 1.0f);
      outCoord = texCoord;
      float pos_x = inPosition.x * biasTexCoord.x;
      float pos_y = inPosition.y * biasTexCoord.y;

      gl_Position = vec4(pos_x, pos_y, 0.0, 1.0);
    }"""

    __frag_shader = """
    #version 440 core
    layout(location = 0) in vec3 inColor;
    layout(location = 1) in vec2 texCoord;
    layout(location = 0) out vec4 outColor;
    uniform sampler2D inTexture;

    void main()
    {
      outColor = texture(inTexture, texCoord);
    }
    """

    def __init__(self, parent, tag=None):
        QtOpenGLWidgets.QOpenGLWidget.__init__(self, parent)
        GL.__init__(self)
        self.__data = np.array([
            -1.0, -1.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0,
            1.0, 1.0, 1.0, -1.0, 0.0, 1.0, 0.0
        ],
                               dtype=ctypes.c_float)

        self.tag = tag
        self.__mutex = threading.Lock()
        self._is_video = False
        self._is_video_playing = False

        self.__texture_generator = None
        self.__player = None
        self.__uniform_tex_bias = -1

    def __del__(self):
        self.__video_thread._stop()
        self.__video_thread.join()

    def initializeGL(self):
        self.initializeOpenGLFunctions()
        self.glClearColor(0, 0, 0, 1)

        self.__program = QtOpenGL.QOpenGLShaderProgram()
        self.__program.addShaderFromSourceCode(QtOpenGL.QOpenGLShader.Vertex,
                                               self.__vertex_shader)
        self.__program.addShaderFromSourceCode(QtOpenGL.QOpenGLShader.Fragment,
                                               self.__frag_shader)
        self.__program.link()

        self.__uniform_tex_bias = self.__program.uniformLocation(
            "biasTexCoord")

        self.__vao = QtOpenGL.QOpenGLVertexArrayObject()
        self.__vao.create()
        self.__vao.bind()

        self.__buffer = QtOpenGL.QOpenGLBuffer(
            QtOpenGL.QOpenGLBuffer.Type.VertexBuffer)
        self.__buffer.create()
        self.__buffer.bind()

        float_size = ctypes.sizeof(ctypes.c_float)
        null = VoidPtr(0)
        pointer = VoidPtr(3 * float_size)

        self.__buffer.allocate(self.__data.tobytes(),
                               self.__data.size * float_size)
        self.glVertexAttribPointer(0, 3, int(pygl.GL_FLOAT),
                                   int(pygl.GL_FALSE), 5 * float_size, null)
        self.glVertexAttribPointer(1, 2, int(pygl.GL_FLOAT),
                                   int(pygl.GL_FALSE), 5 * float_size, pointer)
        self.glEnableVertexAttribArray(0)
        self.glEnableVertexAttribArray(1)
        self.__vao.release()
        self.__buffer.release()

        self.__video_thread = threading.Thread(target=self.__video_play,
                                               args=(),
                                               daemon=True)
        self.__video_thread.start()

    def resizeGL(self, w, h):
        self.glViewport(0, 0, w, h)

    def paintGL(self):
        self.glClear(pygl.GL_COLOR_BUFFER_BIT)

        self.__mutex.acquire()

        if self.__texture_generator is not None:
            texture = None
            try:
                texture = next(self.__texture_generator)
            except:
                pass

            if texture is not None:
                rate = min(self.size().width() / texture.width(),
                           self.size().height() / texture.height())
                rate_x = (texture.width() / self.size().width()) * rate
                rate_y = (texture.height() / self.size().height()) * rate
                self.__program.bind()
                if self.__uniform_tex_bias > -1:
                    self.__program.setUniformValue(self.__uniform_tex_bias,
                                                   rate_x, rate_y)

                self.__vao.bind()
                self.glActiveTexture(pygl.GL_TEXTURE0)
                texture.bind()
                self.glDrawArrays(int(pygl.GL_POLYGON), 0, 4)
                texture.release()
                self.__vao.release()
                self.__program.release()
                if self._is_video:
                    texture.destroy()
            else:
                self.__texture_generator = None
                self._is_video = False

        self.__mutex.release()

    def __create_texture(self, image):
        texture = QtOpenGL.QOpenGLTexture(QtOpenGL.QOpenGLTexture.Target2D)
        texture.setMinMagFilters(QtOpenGL.QOpenGLTexture.Filter.Nearest,
                                 QtOpenGL.QOpenGLTexture.Filter.Linear)
        texture.setBorderColor(0, 0, 0, 1)
        texture.setWrapMode(QtOpenGL.QOpenGLTexture.ClampToBorder)
        texture.setAutoMipMapGenerationEnabled(False)
        texture.DontGenerateMipMaps = True
        texture.setData(
            QtGui.QImage(image, image.shape[1], image.shape[0],
                         QtGui.QImage.Format_RGBA8888).mirrored())
        return texture

    def __video_stream(self, filename):
        video = cv2.VideoCapture(str(filename))
        if self.__player is not None:
            self.__player.close_player()
            self.__player = None

        self.__player = MediaPlayer(str(filename))
        self.__player.set_volume(1.0)
        self._is_video_playing = True
        while video.isOpened():
            ret, frame = video.read()
            self.__player.get_frame(show=False)
            if ret:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
                tex = self.__create_texture(frame)
                yield tex
            else:
                video.set(cv2.CAP_PROP_POS_FRAMES, 0)
                self.__player.seek(0, relative=False)

        self._is_video_playing = False
        return None

    def __image_stream(self, filename):
        image = cv2.imread(str(filename), cv2.IMREAD_UNCHANGED)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
        tex = self.__create_texture(image)
        if self.__player is not None:
            self.__player.close_player()
            self.__player = None

        while True:
            yield tex

    def SetImage(self, filename):
        self.__mutex.acquire()
        self._is_video = False
        if self.__texture_generator != None:
            tex = next(self.__texture_generator)
            tex.destroy()
        self.__texture_generator = self.__image_stream(filename)
        self.__mutex.release()

    def SetVideo(self, filename):
        self.__mutex.acquire()
        self._is_video = True
        self.__texture_generator = self.__video_stream(filename)
        self.__mutex.release()

    def Clear(self):
        self.__mutex.acquire()
        self.__texture_generator = None
        self.__mutex.release()

    def __video_play(self):
        while True:
            try:
                pass
                self.update()
            except:
                break
            finally:
                pass
            time.sleep(0.0416)
예제 #5
0
파일: test.py 프로젝트: varung/ffpyplayer
class PlayerApp(App):

    def __init__(self, **kwargs):
        super(PlayerApp, self).__init__(**kwargs)
        self.texture = None
        self.size = (0, 0)
        self.next_frame = None
        self._done = False
        self._lock = RLock()
        self._thread = Thread(target=self._next_frame, name='Next frame')
        self._trigger = Clock.create_trigger(self.redraw)
        self._force_refresh = False

    def build(self):
        self.root = Root()
        return self.root

    def on_start(self):
        self.callback_ref = WeakMethod(self.callback)
        filename = sys.argv[1]
        logging.info('ffpyplayer: Playing file "{}"'.format(filename))
        # try ff_opts = {'vf':'edgedetect'} http://ffmpeg.org/ffmpeg-filters.html
        ff_opts = {}
        self.ffplayer = MediaPlayer(filename, callback=self.callback_ref,
                                    loglevel=log_level, ff_opts=ff_opts)
        self._thread.start()
        self.keyboard = Window.request_keyboard(None, self.root)
        self.keyboard.bind(on_key_down=self.on_keyboard_down)

    def resize(self):
        if self.ffplayer:
            w, h = self.ffplayer.get_metadata()['src_vid_size']
            if not h:
                return
            lock = self._lock
            lock.acquire()
            if self.root.image.width < self.root.image.height * w / float(h):
                self.ffplayer.set_size(-1, self.root.image.height)
            else:
                self.ffplayer.set_size(self.root.image.width, -1)
            lock.release()
            logging.debug('ffpyplayer: Resized video.')

    def update_pts(self, *args):
        if self.ffplayer:
            self.root.seek.value = self.ffplayer.get_pts()

    def on_keyboard_down(self, keyboard, keycode, text, modifiers):
        if not self.ffplayer:
            return False
        lock = self._lock
        ctrl = 'ctrl' in modifiers
        if keycode[1] == 'p' or keycode[1] == 'spacebar':
            logging.info('Toggled pause.')
            self.ffplayer.toggle_pause()
        elif keycode[1] == 'r':
            logging.debug('ffpyplayer: Forcing a refresh.')
            self._force_refresh = True
        elif keycode[1] == 'v':
            logging.debug('ffpyplayer: Changing video stream.')
            lock.acquire()
            self.ffplayer.request_channel('video',
                                          'close' if ctrl else 'cycle')
            lock.release()
            Clock.unschedule(self.update_pts)
            if ctrl:    # need to continue updating pts, since video is disabled.
                Clock.schedule_interval(self.update_pts, 0.05)
        elif keycode[1] == 'a':
            logging.debug('ffpyplayer: Changing audio stream.')
            lock.acquire()
            self.ffplayer.request_channel('audio',
                                          'close' if ctrl else 'cycle')
            lock.release()
        elif keycode[1] == 't':
            logging.debug('ffpyplayer: Changing subtitle stream.')
            lock.acquire()
            self.ffplayer.request_channel('subtitle',
                                          'close' if ctrl else 'cycle')
            lock.release()
        elif keycode[1] == 'right':
            logging.debug('ffpyplayer: Seeking forward by 10s.')
            self.ffplayer.seek(10.)
        elif keycode[1] == 'left':
            logging.debug('ffpyplayer: Seeking back by 10s.')
            self.ffplayer.seek(-10.)
        elif keycode[1] == 'up':
            logging.debug('ffpyplayer: Increasing volume.')
            self.ffplayer.set_volume(self.ffplayer.get_volume() + 0.01)
            self.root.volume.value = self.ffplayer.get_volume()
        elif keycode[1] == 'down':
            logging.debug('ffpyplayer: Decreasing volume.')
            self.ffplayer.set_volume(self.ffplayer.get_volume() - 0.01)
            self.root.volume.value = self.ffplayer.get_volume()
        return True

    def touch_down(self, touch):
        if self.root.seek.collide_point(*touch.pos) and self.ffplayer:
            pts = ((touch.pos[0] - self.root.volume.width) /
            self.root.seek.width * self.ffplayer.get_metadata()['duration'])
            logging.debug('ffpyplayer: Seeking to {}.'.format(pts))
            self.ffplayer.seek(pts, relative=False)
            self._force_refresh = True
            return True
        return False

    def callback(self, selector, value):
        if self.ffplayer is None:
            return
        if selector == 'quit':
            logging.debug('ffpyplayer: Quitting.')
            def close(*args):
                self._done = True
                self.ffplayer = None
            Clock.schedule_once(close, 0)
        # called from internal thread, it typically reads forward
        elif selector == 'display_sub':
            self.display_subtitle(*value)

    def _next_frame(self):
        ffplayer = self.ffplayer
        sleep = time.sleep
        trigger = self._trigger
        while not self._done:
            force = self._force_refresh
            if force:
                self._force_refresh = False
            frame, val = ffplayer.get_frame(force_refresh=force)

            if val == 'eof':
                logging.debug('ffpyplayer: Got eof.')
                sleep(1 / 30.)
            elif val == 'paused':
                logging.debug('ffpyplayer: Got paused.')
                sleep(1 / 30.)
            else:
                if frame:
                    logging.debug('ffpyplayer: Next frame: {}.'.format(val))
                    sleep(val)
                    self.next_frame = frame
                    trigger()
                else:
                    val = val if val else (1 / 30.)
                    logging.debug('ffpyplayer: Schedule next frame check: {}.'
                                  .format(val))
                    sleep(val)

    def redraw(self, dt=0, force_refresh=False):
        if not self.ffplayer:
            return
        if self.next_frame:
            img, pts = self.next_frame
            if img.get_size() != self.size or self.texture is None:
                self.root.image.canvas.remove_group(str(self)+'_display')
                self.texture = Texture.create(size=img.get_size(),
                                              colorfmt='rgb')
                # by adding 'vf':'vflip' to the player initialization ffmpeg
                # will do the flipping
                self.texture.flip_vertical()
                self.texture.add_reload_observer(self.reload_buffer)
                self.size = img.get_size()
                logging.debug('ffpyplayer: Creating new image texture of '
                              'size: {}.'.format(self.size))
            self.texture.blit_buffer(img.to_memoryview()[0])
            self.root.image.texture = None
            self.root.image.texture = self.texture
            self.root.seek.value = pts
            logging.debug('ffpyplayer: Blitted new frame with time: {}.'
                          .format(pts))

        if self.root.seek.value:
            self.root.seek.max = self.ffplayer.get_metadata()['duration']

    def display_subtitle(self, text, fmt, pts, t_start, t_end):
        pass # fmt is text (unformatted), or ass (formatted subs)

    def reload_buffer(self, *args):
        logging.debug('ffpyplayer: Reloading buffer.')
        frame = self.next_frame
        if not frame:
            return
        self.texture.blit_buffer(frame[0].to_memoryview()[0], colorfmt='rgb',
                                 bufferfmt='ubyte')
예제 #6
0
def process(inputDir):
    cv2.namedWindow("frame", cv2.WINDOW_NORMAL)

    cap = cv2.VideoCapture(os.path.join(inputDir, 'worldCamera.mp4'))
    t2i = Timestamp2Index(os.path.join(inputDir, 'frame_timestamps.tsv'))
    ff_opts = {'vn': False, 'volume': 1.}  #{'sync':'video', 'framedrop':True}
    player = MediaPlayer(os.path.join(inputDir, 'worldCamera.mp4'),
                         ff_opts=ff_opts)
    while player.get_metadata()['src_vid_size'] == (0, 0):
        time.sleep(0.01)
    frame_size = player.get_metadata()['src_vid_size']
    frateInfo = player.get_metadata()['frame_rate']
    frate = float(frateInfo[0]) / frateInfo[1]
    print(frateInfo, frate)
    width = int(frame_size[0])
    height = int(frame_size[1])
    val = ''
    cvImg = np.zeros((height, width, 3))
    print(np.shape(cvImg))

    # Read gaze data
    gazes = {}
    with open(os.path.join(inputDir, 'gazeData_world.tsv'), 'r') as f:
        reader = DictReader(f, delimiter='\t')
        for entry in reader:
            frame_idx = int(float(entry['frame_idx']))
            confidence = float(entry['confidence'])
            try:
                gx = float(entry['norm_pos_x']) * width
                gy = float(entry['norm_pos_y']) * height
                gaze = Gaze(gx, gy, confidence)
                if frame_idx in gazes:
                    gazes[frame_idx].append(gaze)
                else:
                    gazes[frame_idx] = [gaze]
            except Exception as e:
                sys.stderr.write(str(e) + '\n')
                sys.stderr.write('[WARNING] Problematic entry: %s\n' % (entry))

    # Read ground truth and transformation
    gt = {}
    transformation = {}
    with open(os.path.join(inputDir, 'transformations.tsv'), 'r') as f:
        reader = csv.DictReader(f, delimiter='\t')
        for entry in reader:
            frame_idx = int(entry['frame_idx'])

            # ground truth pixel position in undistorted image
            tmp = entry['gt'].split(',')
            gt[frame_idx] = (float(tmp[0]), float(tmp[1]))

    lastIdx = None
    while val != 'eof':
        frame, val = player.get_frame(True)
        if val != 'eof' and frame is not None:
            img, video_pts = frame
            #cvImg = np.reshape(np.asarray(img.to_bytearray()[0]), (height, width, 3)).copy()
            #cvImg = cv2.cvtColor(cvImg, cv2.COLOR_RGB2BGR)
            audio_pts = player.get_pts(
            )  # this is audio_pts because we're in default audio sync mode

            # assumes the frame rate is constant, which is dangerous (frame drops and what not)
            #idx = math.floor(video_pts*frate)

            # the audio is my shepherd and nothing shall I lack :-)
            # From the experience, PROP_POS_MSEC is utterly broken; let's use indexes instead
            idx = t2i.find(
                audio_pts) - 1  # opencv starts at 0; processed data at 1
            idxOffset = cap.get(cv2.CAP_PROP_POS_FRAMES) - idx
            if abs(idxOffset) > 0:
                cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            if lastIdx is None or lastIdx != idx:
                # print(idx,cap.get(cv2.CAP_PROP_FRAME_COUNT))
                ret, cvImg = cap.read()

                if idx in gazes:
                    for gaze in gazes[idx]:
                        gaze.draw(cvImg)

                if idx in gt:
                    x = int(round(gt[idx][0]))
                    y = int(round(gt[idx][1]))
                    cv2.line(cvImg, (x, 0), (x, int(height)), (0, 255, 0), 2)
                    cv2.line(cvImg, (0, y), (int(width), y), (0, 255, 0), 2)

                cv2.rectangle(cvImg, (0, int(height)),
                              (int(0.25 * width), int(height) - 30), (0, 0, 0),
                              -1)
                cv2.putText(cvImg, ("%8.2f [%6d]" % (audio_pts, idx)),
                            (0, int(height) - 5), cv2.FONT_HERSHEY_PLAIN, 2,
                            (0, 255, 255), 2)

                cv2.imshow("frame", cvImg)
                if width > 1280:
                    cv2.resizeWindow('frame', 1280, 720)
                lastIdx = idx

            key = cv2.waitKey(1) & 0xFF
            if key == ord('k'):
                player.seek(audio_pts + 10, relative=False)
            if key == ord('j'):
                player.seek(max(0, audio_pts - 10), relative=False)
            if key == ord('l'):
                player.seek(audio_pts + 5, relative=False)
            if key == ord('h'):
                player.seek(max(0, audio_pts - 5), relative=False)
            if key == ord('p'):
                player.toggle_pause()
            if key == ord('q'):
                break

    cap.release()
예제 #7
0
class VideoPlayer:
    def __init__(self):
        self._widget = None
        self._player = None
        self._timer = None
        self._frame = None
        self._texture = None
        self._trigger = Clock.create_trigger(self._redraw)

    def toggle_playback(self, widget):
        if self._widget == widget:
            if self._player.get_pause():
                plugins.audio.audio_player.pause_playback()
                self._player.set_pause(False)
                self._widget.video_state = 'play'
                Clock.schedule_once(self._next_frame)
            else:
                self.pause_playback()
        else:
            plugins.audio.audio_player.pause_playback()
            if self._widget is not None:
                self.pause_playback()
            self._widget = widget
            self._widget.video_state = 'play'
            self._texture = None
            self._player = MediaPlayer(filename=self._widget.video_source,
                                       ff_opts={
                                           'paused': True,
                                           'ss': self._widget.video_pos
                                       })
            Clock.schedule_interval(self._start_playback, .1)

    def _start_playback(self, dt):
        if self._player.get_metadata()['duration'] is None:
            return
        if self._player.get_pause():
            self._player.set_pause(False)
        Clock.schedule_once(self._next_frame, 0)
        return False

    def pause_playback(self):
        if self._timer is not None:
            self._timer.cancel()
        if self._player is not None:
            self._player.set_pause(True)
        self._frame = None
        self._texture = None
        if self._widget is not None:
            self._widget.video_state = 'pause'

    def update_video_pos(self, widget, pts):
        if self._widget == widget and self._player is not None:
            self._player.seek(pts=pts, relative=False, accurate=True)
        widget.video_pos = pts

    def _next_frame(self, dt):
        frame, val = self._player.get_frame()
        if val == 'eof':
            self._player.set_pause(True)
            self._player.seek(pts=0, relative=False, accurate=True)
            self._widget.video_image.texture = self._widget.video_cover_image_texture
            self._widget.video_state = 'pause'
            self._widget.video_pos = 0
        elif val == 'paused':
            return
        elif frame is None:
            Clock.schedule_once(self._next_frame, 1 / 100)
        else:
            val = val if val else 1 / 30
            self._frame = frame
            self._trigger()
            Clock.schedule_once(self._next_frame, val)

    def _redraw(self, dt):
        if self._player.get_pause() is None or self._frame is None:
            return
        img, pts = self._frame
        if self._texture is None:
            self._texture = Texture.create(size=img.get_size(), colorfmt='rgb')
            self._texture.flip_vertical()
        self._texture.blit_buffer(img.to_memoryview()[0])
        self._widget.video_image.texture = None
        self._widget.video_image.texture = self._texture
        self._widget.video_pos = pts
예제 #8
0
class PlayerThread(QThread):
    image_sig = pyqtSignal(QtGui.QImage)
    status_sig = pyqtSignal(bool)
    progress_sig = pyqtSignal(float)

    def __init__(self, parent):
        super().__init__(parent)
        self.label = parent.label
        self.image_sig.connect(parent.set_image)
        self.status_sig.connect(parent.set_status)
        self.progress_sig.connect(parent.set_progress)
        self.player = None
        self.duration = None
        self.progress = 0
        self.ratio_mode = Qt.KeepAspectRatio
        self.config = {}

    def set_video_name(self, video_name):
        if self.player is not None:
            self.player.close_player()
        self.player = MediaPlayer(video_name)
        self.status_sig.emit(self.player.get_pause())
        self.start()

    def set_config(self, config):
        self.config = config

    def close(self):
        if self.player is not None:
            self.player.close_player()
        self.quit()

    def pause(self):
        if self.player is not None:
            self.player.set_pause(True)
            self.status_sig.emit(True)

    def toggle_pause(self):
        if self.player is not None:
            self.player.toggle_pause()
            self.status_sig.emit(self.player.get_pause())

    def next_prev(self, is_forward):
        if self.player is not None:
            chunk_position = self.find_chunk(self.progress)
            if is_forward:
                if chunk_position < self.config['total'] - 1:
                    chunk_position += 1
                    self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)
            else:
                if chunk_position > 0:
                    chunk_position -= 1
                self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)

    def find_chunk(self, pts):
        if self.config:
            pts_ms = int(1000 * pts)
            front = 0
            rear = self.config['total'] - 1
            chunks = self.config['chunks']
            while front != rear:
                middle = (front + rear) // 2
                if pts_ms > chunks[middle][0]:
                    if pts_ms < chunks[middle + 1][0]:
                        break
                    else:
                        front = middle + 1
                else:
                    rear = middle
            return (front + rear) // 2
        else:
            return 0

    def seek(self, ratio):
        if self.duration is not None:
            pts = ratio * self.duration
            self.player.seek(pts, relative=False, accurate=False)

    def image_stretch(self, is_stretch):
        if is_stretch:
            self.ratio_mode = Qt.IgnoreAspectRatio
        else:
            self.ratio_mode = Qt.KeepAspectRatio

    def run(self):
        val = ''
        while val != 'eof':
            frame, val = self.player.get_frame()
            if self.duration is None:
                self.duration = self.player.get_metadata()['duration']
            if val != 'eof' and frame is not None:
                img, t = frame
                if img is not None:
                    byte = img.to_bytearray()[0]
                    width, height = img.get_size()
                    convert_to_qt_format = QtGui.QImage(byte, width, height, QImage.Format_RGB888)
                    p = convert_to_qt_format.scaled(self.label.width(), self.label.height(), self.ratio_mode)
                    self.image_sig.emit(p)
                    self.progress = t
                    if self.duration is not None:
                        self.progress_sig.emit(t / self.duration)
                    time.sleep(val)
예제 #9
0
class CustomImage(KivyImage):
    """Custom image display widget.
    Enables editing operations, displaying them in real-time using a low resolution preview of the original image file.
    All editing variables are watched by the widget and it will automatically update the preview when they are changed.
    """

    exif = ''
    pixel_format = ''
    length = NumericProperty(0)
    framerate = ListProperty()
    video = BooleanProperty(False)
    player = ObjectProperty(None, allownone=True)
    position = NumericProperty(0.0)
    start_point = NumericProperty(0.0)
    end_point = NumericProperty(1.0)
    original_image = ObjectProperty()
    photoinfo = ListProperty()
    original_width = NumericProperty(0)
    original_height = NumericProperty(0)
    flip_horizontal = BooleanProperty(False)
    flip_vertical = BooleanProperty(False)
    mirror = BooleanProperty(False)
    angle = NumericProperty(0)
    rotate_angle = NumericProperty(0)
    fine_angle = NumericProperty(0)
    brightness = NumericProperty(0)
    shadow = NumericProperty(0)
    contrast = NumericProperty(0)
    gamma = NumericProperty(0)
    saturation = NumericProperty(0)
    temperature = NumericProperty(0)
    tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    curve = ListProperty()
    crop_top = NumericProperty(0)
    crop_bottom = NumericProperty(0)
    crop_left = NumericProperty(0)
    crop_right = NumericProperty(0)
    filter = StringProperty('')
    filter_amount = NumericProperty(0)
    autocontrast = BooleanProperty(False)
    equalize = NumericProperty(0)
    histogram = ListProperty()
    edit_image = ObjectProperty()
    cropping = BooleanProperty(False)
    touch_point = ObjectProperty()
    active_cropping = BooleanProperty(False)
    crop_start = ListProperty()
    sharpen = NumericProperty(0)
    bilateral = NumericProperty(0.5)
    bilateral_amount = NumericProperty(0)
    median_blur = NumericProperty(0)
    vignette_amount = NumericProperty(0)
    vignette_size = NumericProperty(.5)
    edge_blur_amount = NumericProperty(0)
    edge_blur_size = NumericProperty(.5)
    edge_blur_intensity = NumericProperty(.5)
    cropper = ObjectProperty()  #Holder for the cropper overlay
    crop_controls = ObjectProperty()  #Holder for the cropper edit panel object
    adaptive_clip = NumericProperty(0)
    border_opacity = NumericProperty(1)
    border_image = ListProperty()
    border_tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    border_x_scale = NumericProperty(.5)
    border_y_scale = NumericProperty(.5)
    crop_min = NumericProperty(100)
    size_multiple = NumericProperty(1)

    #Denoising variables
    denoise = BooleanProperty(False)
    luminance_denoise = NumericProperty(10)
    color_denoise = NumericProperty(10)
    search_window = NumericProperty(15)
    block_size = NumericProperty(5)

    frame_number = 0
    max_frames = 0
    start_seconds = 0
    first_frame = None

    def start_video_convert(self):
        self.close_video()
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 0.0,
                                      'an': True
                                  })
        self.player.set_volume(0)
        self.frame_number = 0
        if self.start_point > 0 or self.end_point < 1:
            all_frames = self.length * (self.framerate[0] / self.framerate[1])
            self.max_frames = all_frames * (self.end_point - self.start_point)
        else:
            self.max_frames = 0

        #need to wait for load so the seek routine doesnt crash python
        self.first_frame = self.wait_frame()

        if self.start_point > 0:
            self.start_seconds = self.length * self.start_point
            self.first_frame = self.seek_player(self.start_seconds)

    def wait_frame(self):
        #Ensures that a frame is gotten
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        return frame

    def start_seek(self, seek):
        #tell the player to seek to a position
        self.player.set_pause(False)
        self.player.seek(pts=seek, relative=False, accurate=True)
        self.player.set_pause(True)

    def seek_player(self, seek):
        self.start_seek(seek)

        framerate = self.framerate[0] / self.framerate[1]
        target_seek_frame = seek * framerate

        loops = 0
        total_loops = 0
        while True:
            loops += 1
            total_loops += 1
            if loops > 5:
                #seek has been stuck for a while, try to seek again
                self.start_seek(seek)
                loops = 0
            #check if seek has gotten within a couple frames yet
            frame = self.wait_frame()
            current_seek = frame[1]
            current_seek_frame = current_seek * framerate
            frame_distance = abs(target_seek_frame - current_seek_frame)
            if frame_distance < 2 or total_loops >= 30:
                #seek has finished, or give up after a lot of tries to not freeze the program...
                break
        return frame

    def get_converted_frame(self):
        if self.first_frame:
            frame = self.first_frame
            self.first_frame = None
        else:
            self.player.set_pause(False)
            frame = None
            while not frame:
                frame, value = self.player.get_frame(force_refresh=False)
                if value == 'eof':
                    return None
            self.player.set_pause(True)
        self.frame_number = self.frame_number + 1
        if self.max_frames:
            if self.frame_number > self.max_frames:
                return None
        frame_image = frame[0]
        frame_size = frame_image.get_size()
        frame_converter = SWScale(frame_size[0],
                                  frame_size[1],
                                  frame_image.get_pixel_format(),
                                  ofmt='rgb24')
        new_frame = frame_converter.scale(frame_image)
        image_data = bytes(new_frame.to_bytearray()[0])
        image = Image.frombuffer(mode='RGB',
                                 size=(frame_size[0], frame_size[1]),
                                 data=image_data,
                                 decoder_name='raw')
        #for some reason, video frames are read upside-down? fix it here...
        image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return [image, frame[1]]

    def close_video(self):
        if self.player:
            self.player.close_player()
            self.player = None

    def open_video(self):
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 1.0,
                                      'an': True
                                  })
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        data = self.player.get_metadata()
        self.length = data['duration']
        self.framerate = data['frame_rate']
        self.pixel_format = data['src_pix_fmt']

    def set_aspect(self, aspect_x, aspect_y):
        """Adjusts the cropping of the image to be a given aspect ratio.
        Attempts to keep the image as large as possible
        Arguments:
            aspect_x: Horizontal aspect ratio element, numerical value.
            aspect_y: Vertical aspect ratio element, numerical value.
        """

        width = self.original_width - self.crop_left - self.crop_right
        height = self.original_height - self.crop_top - self.crop_bottom
        if aspect_x != width or aspect_y != height:
            current_ratio = width / height
            target_ratio = aspect_x / aspect_y
            if target_ratio > current_ratio:
                #crop top/bottom, width is the same
                new_height = width / target_ratio
                height_difference = height - new_height
                crop_right = 0
                crop_left = 0
                crop_top = height_difference / 2
                crop_bottom = crop_top
            else:
                #crop sides, height is the same
                new_width = height * target_ratio
                width_difference = width - new_width
                crop_top = 0
                crop_bottom = 0
                crop_left = width_difference / 2
                crop_right = crop_left
        else:
            crop_top = 0
            crop_right = 0
            crop_bottom = 0
            crop_left = 0
        self.crop_top = self.crop_top + crop_top
        self.crop_right = self.crop_right + crop_right
        self.crop_bottom = self.crop_bottom + crop_bottom
        self.crop_left = self.crop_left + crop_left
        self.reset_cropper()

    def crop_percent(self, side, percent):
        texture_width = self.original_width
        texture_height = self.original_height
        crop_min = self.crop_min

        if side == 'top':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_bottom) < crop_min:
                crop_amount = texture_height - self.crop_bottom - crop_min
            self.crop_top = crop_amount
        elif side == 'right':
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_left) < crop_min:
                crop_amount = texture_width - self.crop_left - crop_min
            self.crop_right = crop_amount
        elif side == 'bottom':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_top) < crop_min:
                crop_amount = texture_height - self.crop_top - crop_min
            self.crop_bottom = crop_amount
        else:
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_right) < crop_min:
                crop_amount = texture_width - self.crop_right - crop_min
            self.crop_left = crop_amount
        self.reset_cropper()
        if self.crop_controls:
            self.crop_controls.update_crop()

    def get_crop_percent(self):
        width = self.original_width
        height = self.original_height
        top_percent = self.crop_top / height
        right_percent = self.crop_right / width
        bottom_percent = self.crop_bottom / height
        left_percent = self.crop_left / width
        return [top_percent, right_percent, bottom_percent, left_percent]

    def get_crop_size(self):
        new_width = self.original_width - self.crop_left - self.crop_right
        new_height = self.original_height - self.crop_top - self.crop_bottom
        new_aspect = new_width / new_height
        old_aspect = self.original_width / self.original_height
        return "Size: " + str(
            int(new_width)) + "x" + str(int(new_height)) + ", Aspect: " + str(
                round(new_aspect, 2)) + " (Original: " + str(
                    round(old_aspect, 2)) + ")"

    def reset_crop(self):
        """Sets the crop values back to 0 for all sides"""

        self.crop_top = 0
        self.crop_bottom = 0
        self.crop_left = 0
        self.crop_right = 0
        self.reset_cropper(setup=True)

    def reset_cropper(self, setup=False):
        """Updates the position and size of the cropper overlay object."""

        if self.cropper:
            texture_size = self.get_texture_size()
            texture_top_edge = texture_size[0]
            texture_right_edge = texture_size[1]
            texture_bottom_edge = texture_size[2]
            texture_left_edge = texture_size[3]

            texture_width = (texture_right_edge - texture_left_edge)
            #texture_height = (texture_top_edge - texture_bottom_edge)

            divisor = self.original_width / texture_width
            top_edge = texture_top_edge - (self.crop_top / divisor)
            bottom_edge = texture_bottom_edge + (self.crop_bottom / divisor)
            left_edge = texture_left_edge + (self.crop_left / divisor)
            right_edge = texture_right_edge - (self.crop_right / divisor)
            width = right_edge - left_edge
            height = top_edge - bottom_edge

            self.cropper.pos = [left_edge, bottom_edge]
            self.cropper.size = [width, height]
            if setup:
                self.cropper.max_resizable_width = width
                self.cropper.max_resizable_height = height

    def get_texture_size(self):
        """Returns a list of the texture size coordinates.
        Returns:
            List of numbers: [Top edge, Right edge, Bottom edge, Left edge]
        """

        left_edge = (self.size[0] / 2) - (self.norm_image_size[0] / 2)
        right_edge = left_edge + self.norm_image_size[0]
        bottom_edge = (self.size[1] / 2) - (self.norm_image_size[1] / 2)
        top_edge = bottom_edge + self.norm_image_size[1]
        return [top_edge, right_edge, bottom_edge, left_edge]

    def point_over_texture(self, pos):
        """Checks if the given pos (x,y) value is over the image texture.
        Returns False if not over texture, returns point transformed to texture coordinates if over texture.
        """

        texture_size = self.get_texture_size()
        top_edge = texture_size[0]
        right_edge = texture_size[1]
        bottom_edge = texture_size[2]
        left_edge = texture_size[3]
        if pos[0] > left_edge and pos[0] < right_edge:
            if pos[1] > bottom_edge and pos[1] < top_edge:
                texture_x = pos[0] - left_edge
                texture_y = pos[1] - bottom_edge
                return [texture_x, texture_y]
        return False

    def detect_crop_edges(self, first, second):
        """Given two points, this will detect the proper crop area for the image.
        Arguments:
            first: First crop corner.
            second: Second crop corner.
        Returns a list of cropping values:
            [crop_top, crop_bottom, crop_left, crop_right]
        """

        if first[0] < second[0]:
            left = first[0]
            right = second[0]
        else:
            left = second[0]
            right = first[0]
        if first[1] < second[1]:
            top = second[1]
            bottom = first[1]
        else:
            top = first[1]
            bottom = second[1]
        scale = self.original_width / self.norm_image_size[0]
        crop_top = (self.norm_image_size[1] - top) * scale
        crop_bottom = bottom * scale
        crop_left = left * scale
        crop_right = (self.norm_image_size[0] - right) * scale
        return [crop_top, crop_bottom, crop_left, crop_right]

    def set_crop(self, posx, posy, width, height):
        """Sets the crop values based on the cropper widget."""

        texture_size = self.get_texture_size()
        texture_top_edge = texture_size[0]
        texture_right_edge = texture_size[1]
        texture_bottom_edge = texture_size[2]
        texture_left_edge = texture_size[3]

        left_crop = posx - texture_left_edge
        bottom_crop = posy - texture_bottom_edge
        right_crop = texture_right_edge - width - posx
        top_crop = texture_top_edge - height - posy

        texture_width = (texture_right_edge - texture_left_edge)
        divisor = self.original_width / texture_width
        if left_crop < 0:
            self.crop_left = 0
        else:
            self.crop_left = left_crop * divisor
        if right_crop < 0:
            self.crop_right = 0
        else:
            self.crop_right = right_crop * divisor
        if top_crop < 0:
            self.crop_top = 0
        else:
            self.crop_top = top_crop * divisor
        if bottom_crop < 0:
            self.crop_bottom = 0
        else:
            self.crop_bottom = bottom_crop * divisor
        #self.update_preview(recrop=False)
        if self.crop_controls:
            self.crop_controls.update_crop()

    def on_sharpen(self, *_):
        self.update_preview()

    def on_bilateral(self, *_):
        self.update_preview()

    def on_bilateral_amount(self, *_):
        self.update_preview()

    def on_median_blur(self, *_):
        self.update_preview()

    def on_border_opacity(self, *_):
        self.update_preview()

    def on_border_image(self, *_):
        self.update_preview()

    def on_border_x_scale(self, *_):
        self.update_preview()

    def on_border_y_scale(self, *_):
        self.update_preview()

    def on_vignette_amount(self, *_):
        self.update_preview()

    def on_vignette_size(self, *_):
        self.update_preview()

    def on_edge_blur_amount(self, *_):
        self.update_preview()

    def on_edge_blur_size(self, *_):
        self.update_preview()

    def on_edge_blur_intensity(self, *_):
        self.update_preview()

    def on_rotate_angle(self, *_):
        self.update_preview()

    def on_fine_angle(self, *_):
        self.update_preview()

    def on_flip_horizontal(self, *_):
        self.update_preview()

    def on_flip_vertical(self, *_):
        self.update_preview()

    def on_autocontrast(self, *_):
        self.update_preview()

    def on_adaptive_clip(self, *_):
        self.update_preview()

    def on_equalize(self, *_):
        self.update_preview()

    def on_brightness(self, *_):
        self.update_preview()

    def on_shadow(self, *_):
        self.update_preview()

    def on_gamma(self, *_):
        self.update_preview()

    def on_contrast(self, *_):
        self.update_preview()

    def on_saturation(self, *_):
        self.update_preview()

    def on_temperature(self, *_):
        self.update_preview()

    def on_curve(self, *_):
        self.update_preview()

    def on_tint(self, *_):
        self.update_preview()

    def on_border_tint(self, *_):
        self.update_preview()

    def on_size(self, *_):
        pass

    def on_source(self, *_):
        """The source file has been changed, reload image and regenerate preview."""

        self.video = os.path.splitext(self.source)[1].lower() in movietypes
        if self.video:
            self.open_video()
        self.reload_edit_image()
        self.update_texture(self.edit_image)
        #self.update_preview()

    def on_position(self, *_):
        pass

    def reload_edit_image(self):
        """Regenerate the edit preview image."""
        if self.video:
            if not self.player:
                return
            location = self.length * self.position
            frame = self.seek_player(location)
            frame = frame[0]
            frame_size = frame.get_size()
            pixel_format = frame.get_pixel_format()
            frame_converter = SWScale(frame_size[0],
                                      frame_size[1],
                                      pixel_format,
                                      ofmt='rgb24')
            new_frame = frame_converter.scale(frame)
            image_data = bytes(new_frame.to_bytearray()[0])

            original_image = Image.frombuffer(mode='RGB',
                                              size=(frame_size[0],
                                                    frame_size[1]),
                                              data=image_data,
                                              decoder_name='raw')
            #for some reason, video frames are read upside-down? fix it here...
            original_image = original_image.transpose(
                PIL.Image.FLIP_TOP_BOTTOM)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            self.original_image = original_image
            image = original_image.copy()

        else:
            original_image = Image.open(self.source)
            try:
                self.exif = original_image.info.get('exif', b'')
            except:
                self.exif = ''
            if self.angle != 0:
                if self.angle == 90:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_90)
                if self.angle == 180:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_180)
                if self.angle == 270:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_270)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            image = original_image.copy()
            self.original_image = original_image.copy()
            original_image.close()
        image_width = Window.width * .75
        width = int(image_width)
        height = int(image_width * (image.size[1] / image.size[0]))
        if width < 10:
            width = 10
        if height < 10:
            height = 10
        image = image.resize((width, height))
        if image.mode != 'RGB':
            image = image.convert('RGB')
        self.size_multiple = self.original_width / image.size[0]
        self.edit_image = image
        Clock.schedule_once(
            self.update_histogram
        )  #Need to delay this because kivy will mess up the drawing of it on first load.
        #self.histogram = image.histogram()

    def update_histogram(self, *_):
        self.histogram = self.edit_image.histogram()

    def on_texture(self, instance, value):
        if value is not None:
            self.texture_size = list(value.size)
        if self.mirror:
            self.texture.flip_horizontal()

    def denoise_preview(self, width, height, pos_x, pos_y):
        left = pos_x
        right = pos_x + width
        lower = pos_y + width
        upper = pos_y
        original_image = self.original_image
        preview = original_image.crop(box=(left, upper, right, lower))
        if preview.mode != 'RGB':
            preview = preview.convert('RGB')
        preview_cv = cv2.cvtColor(numpy.array(preview), cv2.COLOR_RGB2BGR)
        preview_cv = cv2.fastNlMeansDenoisingColored(preview_cv, None,
                                                     self.luminance_denoise,
                                                     self.color_denoise,
                                                     self.search_window,
                                                     self.block_size)
        preview_cv = cv2.cvtColor(preview_cv, cv2.COLOR_BGR2RGB)
        preview = Image.fromarray(preview_cv)
        preview_bytes = BytesIO()
        preview.save(preview_bytes, 'jpeg')
        preview_bytes.seek(0)
        return preview_bytes

    def update_preview(self, denoise=False, recrop=True):
        """Update the preview image."""

        image = self.adjust_image(self.edit_image)
        if denoise and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        self.update_texture(image)
        self.histogram = image.histogram()
        if recrop:
            self.reset_cropper(setup=True)

    def adjust_image(self, image, preview=True):
        """Applies all current editing opterations to an image.
        Arguments:
            image: A PIL image.
            preview: Generate edit image in preview mode (faster)
        Returns: A PIL image.
        """

        if not preview:
            orientation = self.photoinfo[13]
            if orientation == 3 or orientation == 4:
                image = image.transpose(PIL.Image.ROTATE_180)
            elif orientation == 5 or orientation == 6:
                image = image.transpose(PIL.Image.ROTATE_90)
            elif orientation == 7 or orientation == 8:
                image = image.transpose(PIL.Image.ROTATE_270)
            if orientation in [2, 4, 5, 7]:
                image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
            size_multiple = self.size_multiple
        else:
            size_multiple = 1

        if self.sharpen != 0:
            enhancer = ImageEnhance.Sharpness(image)
            image = enhancer.enhance(self.sharpen + 1)
        if self.median_blur != 0 and opencv:
            max_median = 10 * size_multiple
            median = int(self.median_blur * max_median)
            if median % 2 == 0:
                median = median + 1
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.medianBlur(open_cv_image, median)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.bilateral != 0 and self.bilateral_amount != 0 and opencv:
            diameter = int(self.bilateral * 10 * size_multiple)
            if diameter < 1:
                diameter = 1
            sigma_color = self.bilateral_amount * 100 * size_multiple
            if sigma_color < 1:
                sigma_color = 1
            sigma_space = sigma_color
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.bilateralFilter(open_cv_image, diameter,
                                                sigma_color, sigma_space)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.vignette_amount > 0 and self.vignette_size > 0:
            vignette = Image.new(mode='RGB', size=image.size, color=(0, 0, 0))
            filter_color = int((1 - self.vignette_amount) * 255)
            vignette_mixer = Image.new(mode='L',
                                       size=image.size,
                                       color=filter_color)
            draw = ImageDraw.Draw(vignette_mixer)
            shrink_x = int((self.vignette_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.vignette_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            vignette_mixer = vignette_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.vignette_amount * 60) +
                                         60))
            image = Image.composite(image, vignette, vignette_mixer)
        if self.edge_blur_amount > 0 and self.edge_blur_intensity > 0 and self.edge_blur_size > 0:
            blur_image = image.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            filter_color = int((1 - self.edge_blur_intensity) * 255)
            blur_mixer = Image.new(mode='L',
                                   size=image.size,
                                   color=filter_color)
            draw = ImageDraw.Draw(blur_mixer)
            shrink_x = int((self.edge_blur_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.edge_blur_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            blur_mixer = blur_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            image = Image.composite(image, blur_image, blur_mixer)
        if self.crop_top != 0 or self.crop_bottom != 0 or self.crop_left != 0 or self.crop_right != 0:
            if preview:
                overlay = Image.new(mode='RGB',
                                    size=image.size,
                                    color=(0, 0, 0))
                divisor = self.original_width / image.size[0]
                draw = ImageDraw.Draw(overlay)
                draw.rectangle(
                    [0, 0, (self.crop_left / divisor), image.size[1]],
                    fill=(255, 255, 255))
                draw.rectangle(
                    [0, 0, image.size[0], (self.crop_top / divisor)],
                    fill=(255, 255, 255))
                draw.rectangle([(image.size[0] -
                                 (self.crop_right / divisor)), 0,
                                (image.size[0]), image.size[1]],
                               fill=(255, 255, 255))
                draw.rectangle([
                    0, (image.size[1] - (self.crop_bottom / divisor)),
                    image.size[0], image.size[1]
                ],
                               fill=(255, 255, 255))
                bright = ImageEnhance.Brightness(overlay)
                overlay = bright.enhance(.333)
                image = ImageChops.subtract(image, overlay)
            else:
                if self.crop_left >= image.size[0]:
                    crop_left = 0
                else:
                    crop_left = int(self.crop_left)
                if self.crop_top >= image.size[1]:
                    crop_top = 0
                else:
                    crop_top = int(self.crop_top)
                if self.crop_right >= image.size[0]:
                    crop_right = image.size[0]
                else:
                    crop_right = int(image.size[0] - self.crop_right)
                if self.crop_bottom >= image.size[1]:
                    crop_bottom = image.size[1]
                else:
                    crop_bottom = int(image.size[1] - self.crop_bottom)
                if self.video:
                    #ensure that image size is divisible by 2
                    new_width = crop_right - crop_left
                    new_height = crop_bottom - crop_top
                    if new_width % 2 == 1:
                        if crop_right < image.size[0]:
                            crop_right = crop_right + 1
                        else:
                            crop_right = crop_right - 1
                    if new_height % 2 == 1:
                        if crop_bottom < image.size[1]:
                            crop_bottom = crop_bottom + 1
                        else:
                            crop_bottom = crop_bottom - 1
                image = image.crop(
                    (crop_left, crop_top, crop_right, crop_bottom))
        if self.flip_horizontal:
            image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
        if self.flip_vertical:
            image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if self.rotate_angle != 0:
            if self.rotate_angle == 90:
                image = image.transpose(PIL.Image.ROTATE_270)
            if self.rotate_angle == 180:
                image = image.transpose(PIL.Image.ROTATE_180)
            if self.rotate_angle == 270:
                image = image.transpose(PIL.Image.ROTATE_90)
        if self.fine_angle != 0:
            total_angle = -self.fine_angle * 10
            angle_radians = math.radians(abs(total_angle))
            width, height = rotated_rect_with_max_area(image.size[0],
                                                       image.size[1],
                                                       angle_radians)
            x = int((image.size[0] - width) / 2)
            y = int((image.size[1] - height) / 2)
            if preview:
                image = image.rotate(total_angle, expand=False)
            else:
                image = image.rotate(total_angle,
                                     resample=PIL.Image.BICUBIC,
                                     expand=False)
            image = image.crop((x, y, image.size[0] - x, image.size[1] - y))
        if self.autocontrast:
            image = ImageOps.autocontrast(image)
        if self.equalize != 0:
            equalize_image = ImageOps.equalize(image)
            image = Image.blend(image, equalize_image, self.equalize)
        temperature = int(round(abs(self.temperature) * 100))
        if temperature != 0:
            temperature = temperature - 1
            if self.temperature > 0:
                kelvin = negative_kelvin[99 - temperature]
            else:
                kelvin = positive_kelvin[temperature]
            matrix = ((kelvin[0] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[1] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[2] / 255.0), 0.0)
            image = image.convert('RGB', matrix)
        if self.brightness != 0:
            enhancer = ImageEnhance.Brightness(image)
            image = enhancer.enhance(1 + self.brightness)
        if self.shadow != 0:
            if self.shadow < 0:
                floor = int(abs(self.shadow) * 128)
                table = [0] * floor
                remaining_length = 256 - floor
                for index in range(0, remaining_length):
                    value = int(round((index / remaining_length) * 256))
                    table.append(value)
                lut = table * 3
            else:
                floor = int(abs(self.shadow) * 128)
                table = []
                for index in range(0, 256):
                    percent = 1 - (index / 255)
                    value = int(round(index + (floor * percent)))
                    table.append(value)
                lut = table * 3
            image = image.point(lut)

        if self.gamma != 0:
            if self.gamma == -1:
                gamma = 99999999999999999
            elif self.gamma < 0:
                gamma = 1 / (self.gamma + 1)
            elif self.gamma > 0:
                gamma = 1 / ((self.gamma + 1) * (self.gamma + 1))
            else:
                gamma = 1
            lut = [pow(x / 255, gamma) * 255 for x in range(256)]
            lut = lut * 3
            image = image.point(lut)
        if self.contrast != 0:
            enhancer = ImageEnhance.Contrast(image)
            image = enhancer.enhance(1 + self.contrast)
        if self.saturation != 0:
            enhancer = ImageEnhance.Color(image)
            image = enhancer.enhance(1 + self.saturation)
        if self.tint != [1.0, 1.0, 1.0, 1.0]:
            matrix = (self.tint[0], 0.0, 0.0, 0.0, 0.0, self.tint[1], 0.0, 0.0,
                      0.0, 0.0, self.tint[2], 0.0)
            image = image.convert('RGB', matrix)
        if self.curve:
            lut = self.curve * 3
            image = image.point(lut)

        if self.denoise and not preview and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        if self.adaptive_clip > 0 and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2Lab)
            channels = cv2.split(open_cv_image)
            clahe = cv2.createCLAHE(clipLimit=(self.adaptive_clip * 4),
                                    tileGridSize=(8, 8))
            clahe_image = clahe.apply(channels[0])
            channels[0] = clahe_image
            open_cv_image = cv2.merge(channels)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_Lab2RGB)
            image = Image.fromarray(open_cv_image)

        if self.border_image:
            image_aspect = image.size[0] / image.size[1]
            closest_aspect = min(self.border_image[1],
                                 key=lambda x: abs(x - image_aspect))
            index = self.border_image[1].index(closest_aspect)
            image_file = os.path.join('borders', self.border_image[2][index])
            if preview:
                resample = PIL.Image.NEAREST
            else:
                resample = PIL.Image.BICUBIC
            border_image = Image.open(image_file)
            border_crop_x = int(border_image.size[0] *
                                ((self.border_x_scale + 1) / 15))
            border_crop_y = int(border_image.size[1] *
                                ((self.border_y_scale + 1) / 15))
            border_image = border_image.crop(
                (border_crop_x, border_crop_y,
                 border_image.size[0] - border_crop_x,
                 border_image.size[1] - border_crop_y))
            border_image = border_image.resize(image.size, resample)

            if os.path.splitext(image_file)[1].lower() == '.jpg':
                alpha_file = os.path.splitext(image_file)[0] + '-mask.jpg'
                if not os.path.exists(alpha_file):
                    alpha_file = image_file
                alpha = Image.open(alpha_file)
                alpha = alpha.convert('L')
                alpha = alpha.crop((border_crop_x, border_crop_y,
                                    alpha.size[0] - border_crop_x,
                                    alpha.size[1] - border_crop_y))
                alpha = alpha.resize(image.size, resample)
            else:
                alpha = border_image.split()[-1]
                border_image = border_image.convert('RGB')
            if self.border_tint != [1.0, 1.0, 1.0, 1.0]:
                matrix = (self.border_tint[0], 0.0, 0.0, 1.0, 0.0,
                          self.border_tint[1], 0.0, 1.0, 0.0, 0.0,
                          self.border_tint[2], 1.0)
                border_image = border_image.convert('RGB', matrix)

            enhancer = ImageEnhance.Brightness(alpha)
            alpha = enhancer.enhance(self.border_opacity)
            image = Image.composite(border_image, image, alpha)

        return image

    def update_texture(self, image):
        """Saves a PIL image to the visible texture.
        Argument:
            image: A PIL image
        """

        image_bytes = BytesIO()
        image.save(image_bytes, 'jpeg')
        image_bytes.seek(0)
        self._coreimage = CoreImage(image_bytes, ext='jpg')
        self._on_tex_change()

    def get_full_quality(self):
        """Generate a full sized and full quality version of the source image.
        Returns: A PIL image.
        """

        image = self.original_image.copy()
        if not self.video:
            if self.angle != 0:
                if self.angle == 90:
                    image = image.transpose(PIL.Image.ROTATE_90)
                if self.angle == 180:
                    image = image.transpose(PIL.Image.ROTATE_180)
                if self.angle == 270:
                    image = image.transpose(PIL.Image.ROTATE_270)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return image

    def close_image(self):
        self.original_image.close()
예제 #10
0
class VideoPlayer:
    def __init__(self, video, trackbar_name, window_name):
        self.cur_frame = 0
        self.src = video.src
        self.video = video
        self.audio = MediaPlayer(video.src)
        self.frame_max = video.frame_max
        self.trackbar = trackbar_name
        self.window = window_name
        self.ostream = self.init_ostream()
        self.queue = Queue(maxsize=_G.MaxQueueSize)
        self.FLAG_CODEC_STOP = False
        cv2.namedWindow(self.window)
        cv2.createTrackbar(self.trackbar, self.window, 0, self.frame_max,
                           self.set_current_frame)

    def init_ostream(self):
        fname = make_out_filename(self.video.src)
        fourcc = cv2.VideoWriter_fourcc(*_G.VideoCodec)
        _fps = self.video.fps
        _res = (_G.CanvasWidth, _G.CanvasHeight)
        return cv2.VideoWriter(fname, fourcc, _fps, _res)

    def set_current_frame(self, n):
        # self.cur_frame = n
        pass

    def set_audio_frame(self, n):
        t = self.video.frame2timestamp(n)
        self.audio.seek(t, False)

    def start(self):
        self.codec_t = Thread(target=self.update_codec)
        self.codec_t.daemon = True
        self.codec_t.start()
        _t = Thread(target=self.extract_audio)
        _t.start()
        return self

    def extract_audio(self):
        fname = make_audio_filename(self.src)
        if not os.path.exists(fname):
            v = mp.VideoFileClip(self.src)
            v.audio.write_audiofile(fname)

    def update_codec(self):
        while not self.FLAG_CODEC_STOP:
            if not self.queue.full():
                ret, frame = self.video.read()
                if not ret:
                    self.FLAG_CODEC_STOP = True
                    return
                frame = self.make_frame(frame)
                self.queue.put(frame)
        print("Codec Ended")

    def frame_available(self):
        return self.queue.qsize() > 0

    def get_frame(self):
        return self.queue.get()

    def update(self):
        self.update_frame()
        self.update_input()

    def update_frame(self):
        if self.is_ended() or _G.FLAG_PAUSE:
            return

        cv2.setTrackbarPos(self.trackbar, self.window, self.cur_frame)

        frame = self.get_frame()
        if frame is None:
            return

        cv2.imshow(self.window, frame)
        # print(f"qsize={self.queue.qsize()}")
        self.ostream.write(frame)

        if not _G.FLAG_PAUSE:
            self.cur_frame += 1

    def update_input(self):
        key = cv2.waitKey(_G.UPS)
        if key == _G.VK_ESC:
            _G.FLAG_STOP = True
        elif key == _G.VK_SPACE:
            _G.FLAG_PAUSE ^= True
            self.audio.toggle_pause()

    def is_ended(self):
        return self.cur_frame >= self.frame_max

    def make_audio_window(self):
        window, val = self.audio.get_frame()
        if window is None or val == 'eof':
            return (None, None)
        return window

    def make_frame(self, frame):
        canvas = np.zeros((_G.CanvasHeight, _G.CanvasWidth, 3), np.uint8)

        mx, my = _G.CanvasWidth // 2, _G.CanvasHeight // 2
        frame = cv2.resize(frame, (mx, my))

        frame2 = filter.greyscale(frame)
        frame3 = filter.sharpen(frame)
        frame4 = filter.inverted(frame)

        canvas[0:frame.shape[0], 0:frame.shape[1]] += frame
        canvas[0:frame.shape[0], mx:mx + frame.shape[1]] += frame2
        canvas[my:my + frame.shape[0], 0:frame.shape[1]] += frame3
        canvas[my:my + frame.shape[0], mx:mx + frame.shape[1]] += frame4
        return canvas
예제 #11
0
def calculate_frame_diffs_wcall(video_file,
                                masks,
                                cut_ranges,
                                pixel_diff_threshold=10,
                                callback=None,
                                sec_callback=5):
    """
    Calculates frame differences for a video file.
    """
    distances = []
    column_names = (["Range", "Time", "Overall"] +
                    ["ROI{0}".format(j) for j, _ in enumerate(masks)])
    masks = [m.flatten() for m in masks]
    distances.append([-1, -1, 1] + [np.mean(m) for m in masks])

    player = MediaPlayer(video_file,
                         thread_lib="SDL",
                         ff_opts={
                             "out_fmt": "gray8",
                             "an": True,
                             "sn": True
                         })

    frame = get_frame(player)
    if frame is None:
        return pd.DataFrame(distances, columns=column_names)
    img, t = frame

    metadata = player.get_metadata()
    duration, vid_size = metadata["duration"], metadata["src_vid_size"]
    vid_size = (vid_size[1], vid_size[0])

    img, t = frame
    oframe = np.asarray(img.to_memoryview(keep_align=False)[0], dtype=np.uint8)

    range_end = [r * duration for r in cut_ranges[0]]
    range_selected = [True] + cut_ranges[1]

    t0 = 0
    last_callback = 0
    crange = 0

    while True:
        # Get next frame
        frame = get_frame(player, t0)
        if frame is None:
            break
        img, t = frame

        # Update current range
        if t >= range_end[crange]:
            nrange = update_range(crange, range_selected)
            if nrange == len(range_selected):
                break
            if nrange > crange:
                if t < range_end[nrange - 1]:
                    player.seek(range_end[nrange - 1], relative=False)
                    oframe = None
                    t0 = range_end[nrange - 1]
                    continue
                crange = nrange

        # Calculate frame difference
        cframe = np.asarray(img.to_memoryview(keep_align=False)[0])
        if oframe is not None:
            frame_diff = ((cframe - oframe > pixel_diff_threshold) &
                          (oframe - cframe > pixel_diff_threshold))
            distances.append([crange, t, np.mean(frame_diff)] +
                             [np.mean(frame_diff & mask) for mask in masks])
            # Callback
            if callback is not None and (t - last_callback) >= sec_callback:
                last_callback = t
                callback(t / duration, frame_diff.reshape(vid_size))
        oframe = cframe
        t0 = t

    player.close_player()

    return pd.DataFrame(distances, columns=column_names)
예제 #12
0
class VideoStream:
    def __init__(self, video_source=None):
        ff_opts = {'paused': True, 'autoexit': False}  # Audio options
        self.video_surce = video_source
        # Open the video source
        self.player = MediaPlayer(video_source, ff_opts=ff_opts)
        # TODO: colocar pausa de tiempo para cargas mediaplayer y obtener los datos
        # conseguir el frame rate para la sincronizacion self.dalay
        while self.player.get_metadata()['src_vid_size'] == (0, 0):
            time.sleep(0.01)
        data = self.player.get_metadata()
        print('data -->', data)
        self.f_rate = data['frame_rate']
        print('delay -> ', self.f_rate)
        self.w, self.h = data['src_vid_size']
        print('WxH -> ', self.w, self.h)
        self.pts = self.player.get_pts(
        )  # Returns the elapsed play time. float
        print('pts ->', self.pts)
        self.duration = data['duration']
        print('duration', self.duration)
        self.pause = self.player.get_pause(
        )  # Returns whether the player is paused.
        print('pause ->', self.pause)
        self.volume = self.player.get_volume(
        )  # Returns the volume of the audio. loat: A value between 0.0 - 1.0
        print('volume ->', self.volume)
        self.player.toggle_pause(
        )  # Toggles -alterna- the player’s pause state
        # self.player.set_pause(False) # auses or un-pauses the file. state: bool
        cond = True
        while cond:
            self.l_frame, self.val = self.player.get_frame()
            if self.val == 'eof':
                print('can not open source: ', video_source)
                break
            elif self.l_frame is None:
                time.sleep(0.01)
            else:
                self._imagen, self.pts = self.l_frame
                print('pts ->', self.pts)
                # arr = self._imagen.to_memoryview()[0] # array image
                # self.imagen = Image.frombytes("RGB", self.original_size, arr.memview)
                # self.imagen.show()
                cond = False

    # propierties.
    @property
    def f_rate(self):
        return self.__f_rate

    @f_rate.setter
    def f_rate(self, val):
        import math
        vn = val[0]
        vd = val[1]
        if vd <= 1:
            self.__f_rate = vn
        elif vd > 1:
            self.__f_rate = int(round(vn / vd))
        else:
            self.__f_rate = 30

    # end properties.

    def get_frame(self):
        '''
        Return valores:
            val : 'eof' or 'pause' 
            pts : time location aduio imagen.
            imagen : frame image
        Return (val, t, imagen)
        '''
        self.l_frame, self.val = self.player.get_frame()
        if self.val == 'eof':
            # condicion final fichero, salimos if and while
            # self.player.toggle_pause() # ponemos en pause
            return self.val, None, None
        elif self.l_frame is None:
            time.sleep(0.01)
            return self.val, None, None
        else:
            # import math
            self._imagen, self.pts = self.l_frame
            return self.val, self.pts, self._imagen
            # w, h = self._imagen.get_size()
            # linesize = [int(math.ceil(w * 3 / 32.) * 32)]
            # self._imagen = pic.Image(plane_buffers=[bytes(b' ') * (h * linesize[0])],
            #             pix_fmt=self._imagen.get_pixel_format(), size=(w, h), linesize=linesize)
            # self._imagen.get_linesizes(keep_align=True)

            # if self.new_size is not None:
            #     sws = None
            #     n_w , n_h = self.new_size
            #     if n_w > n_h:
            #         sws = pic.SWScale(w, h, self._imagen.get_pixel_format(), oh=n_h)
            #     else:
            #         sws = pic.SWScale(w, h, self._imagen.get_pixel_format(), ow=n_w)
            #     self._imagen = sws.scale(self._imagen)

            # size = self._imagen.get_size()
            # arr = self._imagen.to_memoryview()[0] # array image
            # self.imagen = Image.frombytes("RGB", size, arr.memview)
            # print('>>> videostream::get_frame()::self.pts ->', self.pts)

    def toggle_pause(self):
        '''
            Function: toggle_pause
        '''
        try:  # Stopping audio
            self.player.toggle_pause()
            # self.player = None
        except:
            pass

    def seek(self, pts=None, relative=False, accurate=False):
        if not pts:
            return
        self.player.seek(pts, relative=False, accurate=False)

    def snapshot(self, road=None):
        '''
        get current frame
        '''
        img = self.l_frame[0]
        if img is not None:
            size = img.get_size()
            arr = img.to_memoryview()[0]  # array image
            img = Image.frombytes("RGB", size, arr.memview)
            # vamos a guardar esto.
            time_str = time.strftime("%d-%m-%Y-%H-%M-%S")
            frame_name = f"frame-{time_str}.jpg"
            if not road:
                ruta = os.path.dirname(self.video_surce)
                name_out = os.path.join(ruta, frame_name)
            else:
                name_out = os.path.join(road, frame_name)
            img.save(name_out)

    # Release the video source when the object is destroyed
    def __del__(self):
        self.player.close_player()
        print('__del__')