예제 #1
0
 def audio_thread(self):
     player = MediaPlayer(self.video_path, ff_opts={'vn': True})
     old_signal = self.positive_signal
     signal_timestamp = time.time()
     try:
         player.set_volume(1.0)
         self.audio_start_time_sec = time.time()
         while self.player_is_playing:
             signal = self.positive_signal
             if old_signal != signal:
                 if signal:
                     player.set_volume(1.0)
                     signal_timestamp = time.time()
                 elif time.time() - signal_timestamp > 2:
                     player.set_volume(0.4)
                     signal_timestamp = time.time()
                 old_signal = signal
             time.sleep(0.1)
     except Exception as e:
         print(e)
     finally:
         self.player_is_playing = False
         player.close_player()
예제 #2
0
class VideoFFPy(VideoBase):

    YUV_RGB_FS = """
    $HEADER$
    uniform sampler2D tex_y;
    uniform sampler2D tex_u;
    uniform sampler2D tex_v;

    void main(void) {
        float y = texture2D(tex_y, tex_coord0).r;
        float u = texture2D(tex_u, tex_coord0).r - 0.5;
        float v = texture2D(tex_v, tex_coord0).r - 0.5;
        float r = y +             1.402 * v;
        float g = y - 0.344 * u - 0.714 * v;
        float b = y + 1.772 * u;
        gl_FragColor = vec4(r, g, b, 1.0);
    }
    """

    _trigger = None

    def __init__(self, **kwargs):
        self._ffplayer = None
        self._thread = None
        self._next_frame = None
        self._seek_queue = []
        self._ffplayer_need_quit = False
        self._trigger = Clock.create_trigger(self._redraw)

        super(VideoFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':

            def close(*args):
                self.unload()

            Clock.schedule_once(close, 0)

    def _get_position(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def _set_position(self, pos):
        self.seek(pos)

    def _set_volume(self, volume):
        self._volume = volume
        if self._ffplayer:
            self._ffplayer.set_volume(self._volume)

    def _get_duration(self):
        if self._ffplayer is None:
            return 0
        return self._ffplayer.get_metadata()['duration']

    @mainthread
    def _do_eos(self):
        if self.eos == 'pause':
            self.pause()
        elif self.eos == 'stop':
            self.stop()
        elif self.eos == 'loop':
            self.position = 0

        self.dispatch('on_eos')

    @mainthread
    def _change_state(self, state):
        self._state = state

    def _redraw(self, *args):
        if not self._ffplayer:
            return
        next_frame = self._next_frame
        if not next_frame:
            return

        img, pts = next_frame
        if img.get_size() != self._size or self._texture is None:
            self._size = w, h = img.get_size()

            if self._out_fmt == 'yuv420p':
                w2 = int(w / 2)
                h2 = int(h / 2)
                self._tex_y = Texture.create(size=(w, h), colorfmt='luminance')
                self._tex_u = Texture.create(size=(w2, h2),
                                             colorfmt='luminance')
                self._tex_v = Texture.create(size=(w2, h2),
                                             colorfmt='luminance')
                self._fbo = fbo = Fbo(size=self._size)
                with fbo:
                    BindTexture(texture=self._tex_u, index=1)
                    BindTexture(texture=self._tex_v, index=2)
                    Rectangle(size=fbo.size, texture=self._tex_y)
                fbo.shader.fs = VideoFFPy.YUV_RGB_FS
                fbo['tex_y'] = 0
                fbo['tex_u'] = 1
                fbo['tex_v'] = 2
                self._texture = fbo.texture
            else:
                self._texture = Texture.create(size=self._size,
                                               colorfmt='rgba')

            # XXX FIXME
            # self.texture.add_reload_observer(self.reload_buffer)
            self._texture.flip_vertical()
            self.dispatch('on_load')

        if self._texture:
            if self._out_fmt == 'yuv420p':
                dy, du, dv, _ = img.to_memoryview()
                if dy and du and dv:
                    self._tex_y.blit_buffer(dy, colorfmt='luminance')
                    self._tex_u.blit_buffer(du, colorfmt='luminance')
                    self._tex_v.blit_buffer(dv, colorfmt='luminance')
                    self._fbo.ask_update()
                    self._fbo.draw()
            else:
                self._texture.blit_buffer(img.to_memoryview()[0],
                                          colorfmt='rgba')

            self.dispatch('on_frame')

    def _next_frame_run(self):
        ffplayer = self._ffplayer
        sleep = time.sleep
        trigger = self._trigger
        did_dispatch_eof = False
        seek_queue = self._seek_queue

        # fast path, if the source video is yuv420p, we'll use a glsl shader
        # for buffer conversion to rgba
        while not self._ffplayer_need_quit:
            src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
            if not src_pix_fmt:
                sleep(0.005)
                continue

            if src_pix_fmt == 'yuv420p':
                self._out_fmt = 'yuv420p'
                ffplayer.set_output_pix_fmt(self._out_fmt)
            self._ffplayer.toggle_pause()
            break

        if self._ffplayer_need_quit:
            return

        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while not self._ffplayer_need_quit:
            if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
                break
            # XXX if will fail later then?
            if time.clock() - s > 10.:
                break
            sleep(0.005)

        if self._ffplayer_need_quit:
            return

        # we got all the informations, now, get the frames :)
        self._change_state('playing')

        while not self._ffplayer_need_quit:
            seek_happened = False
            if seek_queue:
                vals = seek_queue[:]
                del seek_queue[:len(vals)]
                ffplayer.seek(vals[-1] * ffplayer.get_metadata()['duration'],
                              relative=False)
                seek_happened = True
                self._next_frame = None

            # Get next frame if paused:
            if seek_happened and ffplayer.get_pause():
                ffplayer.set_volume(0.0)  # Try to do it silently.
                ffplayer.set_pause(False)
                try:
                    # We don't know concrete number of frames to skip,
                    # this number worked fine on couple of tested videos:
                    to_skip = 6
                    while True:
                        frame, val = ffplayer.get_frame(show=False)
                        # Exit loop on invalid val:
                        if val in ('paused', 'eof'):
                            break
                        # Exit loop on seek_queue updated:
                        if seek_queue:
                            break
                        # Wait for next frame:
                        if frame is None:
                            sleep(0.005)
                            continue
                        # Wait until we skipped enough frames:
                        to_skip -= 1
                        if to_skip == 0:
                            break
                    # Assuming last frame is actual, just get it:
                    frame, val = ffplayer.get_frame(force_refresh=True)
                finally:
                    ffplayer.set_pause(bool(self._state == 'paused'))
                    ffplayer.set_volume(self._volume)
            # Get next frame regular:
            else:
                frame, val = ffplayer.get_frame()

            if val == 'eof':
                sleep(0.2)
                if not did_dispatch_eof:
                    self._do_eos()
                    did_dispatch_eof = True
            elif val == 'paused':
                did_dispatch_eof = False
                sleep(0.2)
            else:
                did_dispatch_eof = False
                if frame:
                    self._next_frame = frame
                    trigger()
                else:
                    val = val if val else (1 / 30.)
                sleep(val)

    def seek(self, percent):
        if self._ffplayer is None:
            return
        self._seek_queue.append(percent)

    def stop(self):
        self.unload()

    def pause(self):
        if self._ffplayer and self._state != 'paused':
            self._ffplayer.toggle_pause()
            self._state = 'paused'

    def play(self):
        if self._ffplayer and self._state == 'paused':
            self._ffplayer.toggle_pause()
            self._state = 'playing'
            return

        self.load()
        self._out_fmt = 'rgba'
        ff_opts = {
            'paused': True,
            'out_fmt': self._out_fmt,
            'sn': True,
        }
        self._ffplayer = MediaPlayer(self._filename,
                                     callback=self._player_callback,
                                     thread_lib='SDL',
                                     loglevel='info',
                                     ff_opts=ff_opts)
        self._ffplayer.set_volume(self._volume)

        self._thread = Thread(target=self._next_frame_run, name='Next frame')
        self._thread.daemon = True
        self._thread.start()

    def load(self):
        self.unload()

    def unload(self):
        if self._trigger is not None:
            self._trigger.cancel()
        self._ffplayer_need_quit = True
        if self._thread:
            self._thread.join()
            self._thread = None
        if self._ffplayer:
            self._ffplayer = None
        self._next_frame = None
        self._size = (0, 0)
        self._state = ''
        self._ffplayer_need_quit = False
class SoundFFPy(Sound):
    @staticmethod
    def extensions():
        return formats_in

    def __init__(self, **kwargs):
        self._ffplayer = None
        self.quitted = False
        self._log_callback_set = False
        self._state = ''
        self.state = 'stop'

        if not get_log_callback():
            set_log_callback(_log_callback)
            self._log_callback_set = True

        super(SoundFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()
        if self._log_callback_set:
            set_log_callback(None)

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':

            def close(*args):
                self.quitted = True
                self.unload()

            Clock.schedule_once(close, 0)
        elif selector == 'eof':
            Clock.schedule_once(self._do_eos, 0)

    def load(self):
        self.unload()
        ff_opts = {'vn': True, 'sn': True}  # only audio
        self._ffplayer = MediaPlayer(self.source,
                                     callback=self._player_callback,
                                     loglevel='info',
                                     ff_opts=ff_opts)
        player = self._ffplayer
        player.set_volume(self.volume)
        player.toggle_pause()
        self._state = 'paused'
        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while ((not player.get_metadata()['duration']) and not self.quitted
               and time.clock() - s < 10.):
            time.sleep(0.005)

    def unload(self):
        if self._ffplayer:
            self._ffplayer = None
        self._state = ''
        self.state = 'stop'
        self.quitted = False

    def play(self):
        if self._state == 'playing':
            super(SoundFFPy, self).play()
            return
        if not self._ffplayer:
            self.load()
        self._ffplayer.toggle_pause()
        self._state = 'playing'
        self.state = 'play'
        super(SoundFFPy, self).play()

    def stop(self):
        if self._ffplayer and self._state == 'playing':
            self._ffplayer.toggle_pause()
            self._state = 'paused'
            self.state = 'stop'
        super(SoundFFPy, self).stop()

    def seek(self, position):
        if self._ffplayer is None:
            return
        self._ffplayer.seek(position, relative=False)

    def get_pos(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def on_volume(self, instance, volume):
        if self._ffplayer is not None:
            self._ffplayer.set_volume(volume)

    def _get_length(self):
        if self._ffplayer is None:
            return super(SoundFFPy, self)._get_length()
        return self._ffplayer.get_metadata()['duration']

    def _do_eos(self, *args):
        if not self.loop:
            self.stop()
        else:
            self.seek(0.)
예제 #4
0
class SoundFFPy(Sound):

    @staticmethod
    def extensions():
        return formats_in

    def __init__(self, **kwargs):
        self._ffplayer = None
        self.quitted = False
        self._log_callback_set = False
        self._state = ''
        self.state = 'stop'
        self._callback_ref = WeakMethod(self._player_callback)

        if not get_log_callback():
            set_log_callback(_log_callback)
            self._log_callback_set = True

        super(SoundFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()
        if self._log_callback_set:
            set_log_callback(None)

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':
            def close(*args):
                self.quitted = True
                self.unload()
            Clock.schedule_once(close, 0)
        elif selector == 'eof':
            Clock.schedule_once(self._do_eos, 0)

    def load(self):
        self.unload()
        ff_opts = {'vn': True, 'sn': True}  # only audio
        self._ffplayer = MediaPlayer(self.source,
                                     callback=self._callback_ref,
                                     loglevel='info', ff_opts=ff_opts)
        player = self._ffplayer
        player.set_volume(self.volume)
        player.toggle_pause()
        self._state = 'paused'
        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while ((not player.get_metadata()['duration'])
               and not self.quitted and time.clock() - s < 10.):
            time.sleep(0.005)

    def unload(self):
        if self._ffplayer:
            self._ffplayer = None
        self._state = ''
        self.state = 'stop'
        self.quitted = False

    def play(self):
        if self._state == 'playing':
            super(SoundFFPy, self).play()
            return
        if not self._ffplayer:
            self.load()
        self._ffplayer.toggle_pause()
        self._state = 'playing'
        self.state = 'play'
        super(SoundFFPy, self).play()

    def stop(self):
        if self._ffplayer and self._state == 'playing':
            self._ffplayer.toggle_pause()
            self._state = 'paused'
            self.state = 'stop'
        super(SoundFFPy, self).stop()

    def seek(self, position):
        if self._ffplayer is None:
            return
        self._ffplayer.seek(position, relative=False)

    def get_pos(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def on_volume(self, instance, volume):
        if self._ffplayer is not None:
            self._ffplayer.set_volume(volume)

    def _get_length(self):
        if self._ffplayer is None:
            return super(SoundFFPy, self)._get_length()
        return self._ffplayer.get_metadata()['duration']

    def _do_eos(self, *args):
        if not self.loop:
            self.stop()
        else:
            self.seek(0.)
예제 #5
0
class VideoFFPy(VideoBase):

    YUV_RGB_FS = """
    $HEADER$
    uniform sampler2D tex_y;
    uniform sampler2D tex_u;
    uniform sampler2D tex_v;

    void main(void) {
        float y = texture2D(tex_y, tex_coord0).r;
        float u = texture2D(tex_u, tex_coord0).r - 0.5;
        float v = texture2D(tex_v, tex_coord0).r - 0.5;
        float r = y +             1.402 * v;
        float g = y - 0.344 * u - 0.714 * v;
        float b = y + 1.772 * u;
        gl_FragColor = vec4(r, g, b, 1.0);
    }
    """

    _trigger = None

    def __init__(self, **kwargs):
        self._ffplayer = None
        self._thread = None
        self._next_frame = None
        self._seek_queue = []
        self._ffplayer_need_quit = False
        self._trigger = Clock.create_trigger(self._redraw)

        super(VideoFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':
            def close(*args):
                self.unload()
            Clock.schedule_once(close, 0)

    def _get_position(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def _set_position(self, pos):
        self.seek(pos)

    def _set_volume(self, volume):
        self._volume = volume
        if self._ffplayer:
            self._ffplayer.set_volume(self._volume)

    def _get_duration(self):
        if self._ffplayer is None:
            return 0
        return self._ffplayer.get_metadata()['duration']

    @mainthread
    def _do_eos(self):
        if self.eos == 'pause':
            self.pause()
        elif self.eos == 'stop':
            self.stop()
        elif self.eos == 'loop':
            self.position = 0

        self.dispatch('on_eos')

    @mainthread
    def _change_state(self, state):
        self._state = state

    def _redraw(self, *args):
        if not self._ffplayer:
            return
        next_frame = self._next_frame
        if not next_frame:
            return

        img, pts = next_frame
        if img.get_size() != self._size or self._texture is None:
            self._size = w, h = img.get_size()

            if self._out_fmt == 'yuv420p':
                w2 = int(w / 2)
                h2 = int(h / 2)
                self._tex_y = Texture.create(
                    size=(w, h), colorfmt='luminance')
                self._tex_u = Texture.create(
                    size=(w2, h2), colorfmt='luminance')
                self._tex_v = Texture.create(
                    size=(w2, h2), colorfmt='luminance')
                self._fbo = fbo = Fbo(size=self._size)
                with fbo:
                    BindTexture(texture=self._tex_u, index=1)
                    BindTexture(texture=self._tex_v, index=2)
                    Rectangle(size=fbo.size, texture=self._tex_y)
                fbo.shader.fs = VideoFFPy.YUV_RGB_FS
                fbo['tex_y'] = 0
                fbo['tex_u'] = 1
                fbo['tex_v'] = 2
                self._texture = fbo.texture
            else:
                self._texture = Texture.create(size=self._size, colorfmt='rgba')

            # XXX FIXME
            #self.texture.add_reload_observer(self.reload_buffer)
            self._texture.flip_vertical()
            self.dispatch('on_load')

        if self._texture:
            if self._out_fmt == 'yuv420p':
                dy, du, dv, _ = img.to_memoryview()
                self._tex_y.blit_buffer(dy, colorfmt='luminance')
                self._tex_u.blit_buffer(du, colorfmt='luminance')
                self._tex_v.blit_buffer(dv, colorfmt='luminance')
                self._fbo.ask_update()
                self._fbo.draw()
            else:
                self._texture.blit_buffer(
                    img.to_memoryview()[0], colorfmt='rgba')

            self.dispatch('on_frame')

    def _next_frame_run(self):
        ffplayer = self._ffplayer
        sleep = time.sleep
        trigger = self._trigger
        did_dispatch_eof = False
        seek_queue = self._seek_queue

        # fast path, if the source video is yuv420p, we'll use a glsl shader for
        # buffer conversion to rgba
        while not self._ffplayer_need_quit:
            src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
            if not src_pix_fmt:
                sleep(0.005)
                continue

            if src_pix_fmt == 'yuv420p':
                self._out_fmt = 'yuv420p'
                ffplayer.set_output_pix_fmt(self._out_fmt)
            self._ffplayer.toggle_pause()
            break

        if self._ffplayer_need_quit:
            return

        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while not self._ffplayer_need_quit:
            if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
                break
            # XXX if will fail later then?
            if time.clock() - s > 10.:
                break
            sleep(0.005)

        if self._ffplayer_need_quit:
            return

        # we got all the informations, now, get the frames :)
        self._change_state('playing')

        while not self._ffplayer_need_quit:
            if seek_queue:
                vals = seek_queue[:]
                del seek_queue[:len(vals)]
                ffplayer.seek(
                    vals[-1] * ffplayer.get_metadata()['duration'],
                    relative=False)
                self._next_frame = None

            t1 = time.time()
            frame, val = ffplayer.get_frame()
            t2 = time.time()
            if val == 'eof':
                sleep(0.2)
                if not did_dispatch_eof:
                    self._do_eos()
                    did_dispatch_eof = True
            elif val == 'paused':
                did_dispatch_eof = False
                sleep(0.2)
            else:
                did_dispatch_eof = False
                if frame:
                    self._next_frame = frame
                    trigger()
                else:
                    val = val if val else (1 / 30.)
                sleep(val)

    def seek(self, percent):
        if self._ffplayer is None:
            return
        self._seek_queue.append(percent)

    def stop(self):
        self.unload()

    def pause(self):
        if self._ffplayer and self._state != 'paused':
            self._ffplayer.toggle_pause()
            self._state = 'paused'

    def play(self):
        if self._ffplayer and self._state == 'paused':
            self._ffplayer.toggle_pause()
            self._state = 'playing'
            return

        self.load()
        self._out_fmt = 'rgba'
        ff_opts = {
            'paused': True,
            'out_fmt': self._out_fmt
        }
        self._ffplayer = MediaPlayer(
                self._filename, callback=self._player_callback,
                thread_lib='SDL',
                loglevel='info', ff_opts=ff_opts)
        self._ffplayer.set_volume(self._volume)

        self._thread = Thread(target=self._next_frame_run, name='Next frame')
        self._thread.daemon = True
        self._thread.start()

    def load(self):
        self.unload()

    def unload(self):
        if self._trigger is not None:
            self._trigger.cancel()
        self._ffplayer_need_quit = True
        if self._thread:
            self._thread.join()
            self._thread = None
        if self._ffplayer:
            self._ffplayer = None
        self._next_frame = None
        self._size = (0, 0)
        self._state = ''
        self._ffplayer_need_quit = False
예제 #6
0
파일: test.py 프로젝트: varung/ffpyplayer
class PlayerApp(App):

    def __init__(self, **kwargs):
        super(PlayerApp, self).__init__(**kwargs)
        self.texture = None
        self.size = (0, 0)
        self.next_frame = None
        self._done = False
        self._lock = RLock()
        self._thread = Thread(target=self._next_frame, name='Next frame')
        self._trigger = Clock.create_trigger(self.redraw)
        self._force_refresh = False

    def build(self):
        self.root = Root()
        return self.root

    def on_start(self):
        self.callback_ref = WeakMethod(self.callback)
        filename = sys.argv[1]
        logging.info('ffpyplayer: Playing file "{}"'.format(filename))
        # try ff_opts = {'vf':'edgedetect'} http://ffmpeg.org/ffmpeg-filters.html
        ff_opts = {}
        self.ffplayer = MediaPlayer(filename, callback=self.callback_ref,
                                    loglevel=log_level, ff_opts=ff_opts)
        self._thread.start()
        self.keyboard = Window.request_keyboard(None, self.root)
        self.keyboard.bind(on_key_down=self.on_keyboard_down)

    def resize(self):
        if self.ffplayer:
            w, h = self.ffplayer.get_metadata()['src_vid_size']
            if not h:
                return
            lock = self._lock
            lock.acquire()
            if self.root.image.width < self.root.image.height * w / float(h):
                self.ffplayer.set_size(-1, self.root.image.height)
            else:
                self.ffplayer.set_size(self.root.image.width, -1)
            lock.release()
            logging.debug('ffpyplayer: Resized video.')

    def update_pts(self, *args):
        if self.ffplayer:
            self.root.seek.value = self.ffplayer.get_pts()

    def on_keyboard_down(self, keyboard, keycode, text, modifiers):
        if not self.ffplayer:
            return False
        lock = self._lock
        ctrl = 'ctrl' in modifiers
        if keycode[1] == 'p' or keycode[1] == 'spacebar':
            logging.info('Toggled pause.')
            self.ffplayer.toggle_pause()
        elif keycode[1] == 'r':
            logging.debug('ffpyplayer: Forcing a refresh.')
            self._force_refresh = True
        elif keycode[1] == 'v':
            logging.debug('ffpyplayer: Changing video stream.')
            lock.acquire()
            self.ffplayer.request_channel('video',
                                          'close' if ctrl else 'cycle')
            lock.release()
            Clock.unschedule(self.update_pts)
            if ctrl:    # need to continue updating pts, since video is disabled.
                Clock.schedule_interval(self.update_pts, 0.05)
        elif keycode[1] == 'a':
            logging.debug('ffpyplayer: Changing audio stream.')
            lock.acquire()
            self.ffplayer.request_channel('audio',
                                          'close' if ctrl else 'cycle')
            lock.release()
        elif keycode[1] == 't':
            logging.debug('ffpyplayer: Changing subtitle stream.')
            lock.acquire()
            self.ffplayer.request_channel('subtitle',
                                          'close' if ctrl else 'cycle')
            lock.release()
        elif keycode[1] == 'right':
            logging.debug('ffpyplayer: Seeking forward by 10s.')
            self.ffplayer.seek(10.)
        elif keycode[1] == 'left':
            logging.debug('ffpyplayer: Seeking back by 10s.')
            self.ffplayer.seek(-10.)
        elif keycode[1] == 'up':
            logging.debug('ffpyplayer: Increasing volume.')
            self.ffplayer.set_volume(self.ffplayer.get_volume() + 0.01)
            self.root.volume.value = self.ffplayer.get_volume()
        elif keycode[1] == 'down':
            logging.debug('ffpyplayer: Decreasing volume.')
            self.ffplayer.set_volume(self.ffplayer.get_volume() - 0.01)
            self.root.volume.value = self.ffplayer.get_volume()
        return True

    def touch_down(self, touch):
        if self.root.seek.collide_point(*touch.pos) and self.ffplayer:
            pts = ((touch.pos[0] - self.root.volume.width) /
            self.root.seek.width * self.ffplayer.get_metadata()['duration'])
            logging.debug('ffpyplayer: Seeking to {}.'.format(pts))
            self.ffplayer.seek(pts, relative=False)
            self._force_refresh = True
            return True
        return False

    def callback(self, selector, value):
        if self.ffplayer is None:
            return
        if selector == 'quit':
            logging.debug('ffpyplayer: Quitting.')
            def close(*args):
                self._done = True
                self.ffplayer = None
            Clock.schedule_once(close, 0)
        # called from internal thread, it typically reads forward
        elif selector == 'display_sub':
            self.display_subtitle(*value)

    def _next_frame(self):
        ffplayer = self.ffplayer
        sleep = time.sleep
        trigger = self._trigger
        while not self._done:
            force = self._force_refresh
            if force:
                self._force_refresh = False
            frame, val = ffplayer.get_frame(force_refresh=force)

            if val == 'eof':
                logging.debug('ffpyplayer: Got eof.')
                sleep(1 / 30.)
            elif val == 'paused':
                logging.debug('ffpyplayer: Got paused.')
                sleep(1 / 30.)
            else:
                if frame:
                    logging.debug('ffpyplayer: Next frame: {}.'.format(val))
                    sleep(val)
                    self.next_frame = frame
                    trigger()
                else:
                    val = val if val else (1 / 30.)
                    logging.debug('ffpyplayer: Schedule next frame check: {}.'
                                  .format(val))
                    sleep(val)

    def redraw(self, dt=0, force_refresh=False):
        if not self.ffplayer:
            return
        if self.next_frame:
            img, pts = self.next_frame
            if img.get_size() != self.size or self.texture is None:
                self.root.image.canvas.remove_group(str(self)+'_display')
                self.texture = Texture.create(size=img.get_size(),
                                              colorfmt='rgb')
                # by adding 'vf':'vflip' to the player initialization ffmpeg
                # will do the flipping
                self.texture.flip_vertical()
                self.texture.add_reload_observer(self.reload_buffer)
                self.size = img.get_size()
                logging.debug('ffpyplayer: Creating new image texture of '
                              'size: {}.'.format(self.size))
            self.texture.blit_buffer(img.to_memoryview()[0])
            self.root.image.texture = None
            self.root.image.texture = self.texture
            self.root.seek.value = pts
            logging.debug('ffpyplayer: Blitted new frame with time: {}.'
                          .format(pts))

        if self.root.seek.value:
            self.root.seek.max = self.ffplayer.get_metadata()['duration']

    def display_subtitle(self, text, fmt, pts, t_start, t_end):
        pass # fmt is text (unformatted), or ass (formatted subs)

    def reload_buffer(self, *args):
        logging.debug('ffpyplayer: Reloading buffer.')
        frame = self.next_frame
        if not frame:
            return
        self.texture.blit_buffer(frame[0].to_memoryview()[0], colorfmt='rgb',
                                 bufferfmt='ubyte')
예제 #7
0
class DowGlImage(QtOpenGLWidgets.QOpenGLWidget, QtGui.QOpenGLFunctions):
    __vertex_shader = """
    #version 440 core
    layout(location = 0) in vec3 inPosition;
    layout(location = 1) in vec2 texCoord;
    layout(location = 2) uniform vec2 biasTexCoord;

    layout(location = 0) out vec3 outColor;
    layout(location = 1) out vec2 outCoord;

    void main()
    {
      outColor = vec3(1.0f, 0.5f, 1.0f);
      outCoord = texCoord;
      float pos_x = inPosition.x * biasTexCoord.x;
      float pos_y = inPosition.y * biasTexCoord.y;

      gl_Position = vec4(pos_x, pos_y, 0.0, 1.0);
    }"""

    __frag_shader = """
    #version 440 core
    layout(location = 0) in vec3 inColor;
    layout(location = 1) in vec2 texCoord;
    layout(location = 0) out vec4 outColor;
    uniform sampler2D inTexture;

    void main()
    {
      outColor = texture(inTexture, texCoord);
    }
    """

    def __init__(self, parent, tag=None):
        QtOpenGLWidgets.QOpenGLWidget.__init__(self, parent)
        GL.__init__(self)
        self.__data = np.array([
            -1.0, -1.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0,
            1.0, 1.0, 1.0, -1.0, 0.0, 1.0, 0.0
        ],
                               dtype=ctypes.c_float)

        self.tag = tag
        self.__mutex = threading.Lock()
        self._is_video = False
        self._is_video_playing = False

        self.__texture_generator = None
        self.__player = None
        self.__uniform_tex_bias = -1

    def __del__(self):
        self.__video_thread._stop()
        self.__video_thread.join()

    def initializeGL(self):
        self.initializeOpenGLFunctions()
        self.glClearColor(0, 0, 0, 1)

        self.__program = QtOpenGL.QOpenGLShaderProgram()
        self.__program.addShaderFromSourceCode(QtOpenGL.QOpenGLShader.Vertex,
                                               self.__vertex_shader)
        self.__program.addShaderFromSourceCode(QtOpenGL.QOpenGLShader.Fragment,
                                               self.__frag_shader)
        self.__program.link()

        self.__uniform_tex_bias = self.__program.uniformLocation(
            "biasTexCoord")

        self.__vao = QtOpenGL.QOpenGLVertexArrayObject()
        self.__vao.create()
        self.__vao.bind()

        self.__buffer = QtOpenGL.QOpenGLBuffer(
            QtOpenGL.QOpenGLBuffer.Type.VertexBuffer)
        self.__buffer.create()
        self.__buffer.bind()

        float_size = ctypes.sizeof(ctypes.c_float)
        null = VoidPtr(0)
        pointer = VoidPtr(3 * float_size)

        self.__buffer.allocate(self.__data.tobytes(),
                               self.__data.size * float_size)
        self.glVertexAttribPointer(0, 3, int(pygl.GL_FLOAT),
                                   int(pygl.GL_FALSE), 5 * float_size, null)
        self.glVertexAttribPointer(1, 2, int(pygl.GL_FLOAT),
                                   int(pygl.GL_FALSE), 5 * float_size, pointer)
        self.glEnableVertexAttribArray(0)
        self.glEnableVertexAttribArray(1)
        self.__vao.release()
        self.__buffer.release()

        self.__video_thread = threading.Thread(target=self.__video_play,
                                               args=(),
                                               daemon=True)
        self.__video_thread.start()

    def resizeGL(self, w, h):
        self.glViewport(0, 0, w, h)

    def paintGL(self):
        self.glClear(pygl.GL_COLOR_BUFFER_BIT)

        self.__mutex.acquire()

        if self.__texture_generator is not None:
            texture = None
            try:
                texture = next(self.__texture_generator)
            except:
                pass

            if texture is not None:
                rate = min(self.size().width() / texture.width(),
                           self.size().height() / texture.height())
                rate_x = (texture.width() / self.size().width()) * rate
                rate_y = (texture.height() / self.size().height()) * rate
                self.__program.bind()
                if self.__uniform_tex_bias > -1:
                    self.__program.setUniformValue(self.__uniform_tex_bias,
                                                   rate_x, rate_y)

                self.__vao.bind()
                self.glActiveTexture(pygl.GL_TEXTURE0)
                texture.bind()
                self.glDrawArrays(int(pygl.GL_POLYGON), 0, 4)
                texture.release()
                self.__vao.release()
                self.__program.release()
                if self._is_video:
                    texture.destroy()
            else:
                self.__texture_generator = None
                self._is_video = False

        self.__mutex.release()

    def __create_texture(self, image):
        texture = QtOpenGL.QOpenGLTexture(QtOpenGL.QOpenGLTexture.Target2D)
        texture.setMinMagFilters(QtOpenGL.QOpenGLTexture.Filter.Nearest,
                                 QtOpenGL.QOpenGLTexture.Filter.Linear)
        texture.setBorderColor(0, 0, 0, 1)
        texture.setWrapMode(QtOpenGL.QOpenGLTexture.ClampToBorder)
        texture.setAutoMipMapGenerationEnabled(False)
        texture.DontGenerateMipMaps = True
        texture.setData(
            QtGui.QImage(image, image.shape[1], image.shape[0],
                         QtGui.QImage.Format_RGBA8888).mirrored())
        return texture

    def __video_stream(self, filename):
        video = cv2.VideoCapture(str(filename))
        if self.__player is not None:
            self.__player.close_player()
            self.__player = None

        self.__player = MediaPlayer(str(filename))
        self.__player.set_volume(1.0)
        self._is_video_playing = True
        while video.isOpened():
            ret, frame = video.read()
            self.__player.get_frame(show=False)
            if ret:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
                tex = self.__create_texture(frame)
                yield tex
            else:
                video.set(cv2.CAP_PROP_POS_FRAMES, 0)
                self.__player.seek(0, relative=False)

        self._is_video_playing = False
        return None

    def __image_stream(self, filename):
        image = cv2.imread(str(filename), cv2.IMREAD_UNCHANGED)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
        tex = self.__create_texture(image)
        if self.__player is not None:
            self.__player.close_player()
            self.__player = None

        while True:
            yield tex

    def SetImage(self, filename):
        self.__mutex.acquire()
        self._is_video = False
        if self.__texture_generator != None:
            tex = next(self.__texture_generator)
            tex.destroy()
        self.__texture_generator = self.__image_stream(filename)
        self.__mutex.release()

    def SetVideo(self, filename):
        self.__mutex.acquire()
        self._is_video = True
        self.__texture_generator = self.__video_stream(filename)
        self.__mutex.release()

    def Clear(self):
        self.__mutex.acquire()
        self.__texture_generator = None
        self.__mutex.release()

    def __video_play(self):
        while True:
            try:
                pass
                self.update()
            except:
                break
            finally:
                pass
            time.sleep(0.0416)
예제 #8
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.flag = True

    def Listadd(self):
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for filelist in f:
                    filelist = filelist.strip()
                    self.list.addItem(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        self.list.addItems(filelists)
        self.Listchanged()

    def Remove(self):
        self.list.takeItem(self.list.currentRow())
        self.Listchanged()

    def Clear(self):
        self.list.clear()
        os.remove('CPlayerlist.txt')

    def Listchanged(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write(self.list.item(i).text() + '\n')

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环')

    def Play(self):
        try:
            if self.flag:
                self.playitem = self.list.currentItem().text()
                self.player = MediaPlayer("%s" % self.playitem)
                self.timer = QTimer()
                self.timer.start(50)
                self.timer.timeout.connect(self.Show)
                self.steptimer = QTimer()
                self.steptimer.start(1000)
                self.steptimer.timeout.connect(self.Step)
                self.flag = False
                self.bplay.setIcon(QIcon(r'img\pause.png'))
                self.bplay.setToolTip('暂停')
            else:
                if self.list.currentItem().text() == self.playitem:
                    self.player.toggle_pause()
                    if self.player.get_pause():
                        self.timer.stop()
                        self.steptimer.stop()
                        self.bplay.setIcon(QIcon(r'img\play.png'))
                        self.bplay.setToolTip('播放')
                    else:
                        self.timer.start()
                        self.steptimer.start()
                        self.bplay.setIcon(QIcon(r'img\pause.png'))
                        self.bplay.setToolTip('暂停')
                else:
                    self.step = 0
                    self.stime.setValue(0)
                    self.playitem = self.list.currentItem().text()
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
        except:
            QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        self.stime.setMaximum(int(self.mediatime))
        mediamin, mediasec = divmod(self.mediatime, 60)
        mediahour, mediamin = divmod(mediamin, 60)
        playmin, playsec = divmod(self.step, 60)
        playhour, playmin = divmod(playmin, 60)
        self.ltime.setText(
            '%02d:%02d:%02d/%02d:%02d:%02d' %
            (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放')
            self.lmedia.setPixmap(QPixmap(''))

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音')
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value())
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音')

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.timer.stop()
                    self.steptimer.stop()
                    self.step = 0
                    self.loop = 1
                    self.flag = True
                    self.stime.setValue(0)
                    self.player.close_player()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放')
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')

    def Fastforward(self):
        self.step += 10
        if self.step >= int(self.mediatime):
            self.stime.setValue(int(self.mediatime))
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')

    def Fastback(self):
        self.step -= 10
        if self.step <= 0:
            self.step = 0
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')
예제 #9
0
class Application(tk.Frame):
    def __init__(self, master=None):
        super().__init__(master)
        self.master = master
        self.master.geometry("640x420")
        self.winfo_toplevel().title("YouTube downloader")
        #Create icon from base64 string
        icon_file = io.BytesIO(base64.b64decode(icon))
        img = Image.open(icon_file, mode='r')
        self.master.iconphoto(True, ImageTk.PhotoImage(image=img))

        self.video_link = tk.StringVar()  #Link to the youtube video
        self.download_path = tk.StringVar()  #Folder to download videos to
        self.video_folder = tk.StringVar(
        )  #Represents the folder to play videos from
        self.selected_video = 0  #Current playing video, idx
        self.playlist = []  #Array of video's to play
        self.downloadLeft = [0, 0]
        self.download_count = None  #Download count text widget
        self.video_list = None
        self.video_embed = None
        #Styles
        self.mainBgColor = "#121212"
        self.labelBgColor = "#1E1E1E"
        self.fontColor = "white"
        self.btnHighlight = "#FF00FF"

        #Video player
        self.video_player = None
        #Create blank image for video player
        img_str = io.BytesIO(base64.b64decode(blank))
        img = Image.open(img_str, mode='r')
        self.blank_img = ImageTk.PhotoImage(image=img)

        #Play/Stop video buttons
        self.playButton = None
        self.stopButton = None
        self.pauseButton = None
        self.playlistChanged = False  #Flag for seeing whether playlist has changed
        self.songChanged = False  #Flag for seeing if user pressed next button
        self.playback_buttons_frame = None  #Bind the playback buttons frame for swapping buttons inside of it
        self.isPlaying = False  #Flag used seeing if video is being streamed (Used in next/prev song, not in thread termination!)
        self.curVolume = 50
        self.curBassBoost = 0  #Maybe used one day
        self.now_playing = ""

        self.master.resizable(False, False)
        self.create_widgets()
        self.fps = 30

    def create_widgets(self):
        self.master.config(bg=self.mainBgColor)

        ##DOWNLOADING
        download_frame = LabelFrame(self.master, bg=self.mainBgColor, width=20)
        download_frame.grid(row=0, column=0, pady=10)

        #Input/Link frame
        input_frame = LabelFrame(download_frame, bg=self.mainBgColor, bd=0)
        input_frame.grid(row=0, column=0, padx=5)

        link_lable = tk.Label(input_frame,
                              text="YouTube link: ",
                              width=10,
                              bg=self.mainBgColor,
                              fg=self.fontColor)
        link_lable.grid(row=1, column=0, pady=5, padx=20)

        self.master.linkText = tk.Entry(input_frame,
                                        width=54,
                                        textvariable=self.video_link,
                                        bg=self.labelBgColor,
                                        fg=self.fontColor)
        self.master.linkText.grid(row=1, column=1, padx=2)

        #destination frame (for better button positioning)
        destination_frame = LabelFrame(download_frame,
                                       bg=self.mainBgColor,
                                       bd=0)
        destination_frame.grid(row=2, column=0)

        destination_label = tk.Label(destination_frame,
                                     text="Destination: ",
                                     width=10,
                                     bg=self.mainBgColor,
                                     fg=self.fontColor)
        destination_label.grid(row=0, column=0, padx=20)

        self.master.destinationText = tk.Entry(destination_frame,
                                               width=40,
                                               textvariable=self.download_path,
                                               bg=self.labelBgColor,
                                               fg=self.fontColor)
        self.master.destinationText.grid(row=0, column=1, padx=(4, 0))

        browse_B = tk.Button(destination_frame,
                             text="Browse",
                             command=self.BrowseDestination,
                             width=10,
                             bg=self.labelBgColor,
                             fg=self.fontColor,
                             activebackground=self.btnHighlight,
                             activeforeground="black")
        browse_B.grid(row=0, column=2, padx=4)

        #Download frame
        download_btn_frame = LabelFrame(download_frame,
                                        bg=self.mainBgColor,
                                        bd=0)
        download_btn_frame.grid(row=3, column=0)

        download_b = tk.Button(download_btn_frame,
                               text="Download",
                               command=self.Download,
                               width=20,
                               bg=self.labelBgColor,
                               fg="white",
                               activebackground=self.btnHighlight,
                               activeforeground="black")
        download_b.grid(row=0, column=0)

        self.download_count = tk.Text(download_btn_frame,
                                      width=22,
                                      height=1,
                                      bg=self.mainBgColor,
                                      fg="white",
                                      bd=0)
        self.download_count.grid(row=0, column=1, padx=5)
        self.download_count.insert(tk.END, "Download status: ")
        self.download_count.insert(tk.END, self.downloadLeft[0])
        self.download_count.insert(tk.END, " / ")
        self.download_count.insert(tk.END, self.downloadLeft[1])

        ##VIDEO PLAYER

        #Playback buttons and volume sliders container
        playback_container_frame = LabelFrame(self.master,
                                              bg=self.mainBgColor,
                                              bd=0)
        playback_container_frame.grid(row=1, column=0, pady=5)

        #Container that has bottom and top frame for playback buttons
        buttons_container = LabelFrame(playback_container_frame,
                                       bg=self.mainBgColor,
                                       bd=0)
        buttons_container.grid(row=0, column=0)

        #Top frame (Empty)
        top_frame = LabelFrame(buttons_container,
                               bg=self.mainBgColor,
                               height=22,
                               bd=0)
        top_frame.grid(row=0, column=0)

        self.playback_buttons_frame = LabelFrame(buttons_container,
                                                 bg=self.mainBgColor,
                                                 bd=0)
        self.playback_buttons_frame.grid(row=1,
                                         column=0,
                                         padx=(50, 0),
                                         pady=(8, 0))

        prevVid = tk.Button(self.playback_buttons_frame,
                            text="Prev",
                            command=self.PreviousVideo,
                            width=10,
                            bg=self.labelBgColor,
                            fg="white",
                            activebackground=self.btnHighlight,
                            activeforeground="black")

        prevVid.grid(row=0, column=0)

        self.playButton = tk.Button(self.playback_buttons_frame,
                                    text="Play",
                                    command=self.PlayVideo,
                                    width=10,
                                    bg=self.labelBgColor,
                                    fg="white",
                                    activebackground=self.btnHighlight,
                                    activeforeground="black")
        self.playButton.grid(row=0, column=1)

        nextVid = tk.Button(self.playback_buttons_frame,
                            text="Next",
                            command=self.NextVideo,
                            width=10,
                            bg=self.labelBgColor,
                            fg="white",
                            activebackground=self.btnHighlight,
                            activeforeground="black")
        nextVid.grid(row=0, column=2)

        self.pauseButton = tk.Button(self.playback_buttons_frame,
                                     text="Pause",
                                     command=self.PauseVideo,
                                     width=10,
                                     bg=self.labelBgColor,
                                     fg="white",
                                     activebackground=self.btnHighlight,
                                     activeforeground="black")

        self.pauseButton.grid(row=0, column=3)

        #Volume slider and bass EQ
        eq_frame = LabelFrame(playback_container_frame,
                              bg=self.mainBgColor,
                              bd=0)
        eq_frame.grid(row=0, column=1, padx=50)

        volumeText = tk.Text(eq_frame,
                             width=10,
                             height=1,
                             bg=self.mainBgColor,
                             fg="white",
                             bd=0)
        volumeText.tag_configure("center", justify="center")
        volumeText.insert("1.0", "Volume")
        volumeText.tag_add("center", "1.0", "end")
        volumeText.grid(row=0, column=0, padx=(30, 0))
        volume_slider = Scale(eq_frame,
                              from_=0,
                              to=100,
                              orient=tk.HORIZONTAL,
                              bg=self.mainBgColor,
                              bd=0,
                              fg="white",
                              troughcolor=self.labelBgColor,
                              highlightbackground=self.mainBgColor,
                              activebackground="#FF00FF",
                              command=self.VolumeSlider,
                              length=150)

        volume_slider.grid(row=1, column=0, padx=(30, 0))
        volume_slider.set(self.curVolume)

        #EQ/Bass boost slider for future use
        #TODO:: Re-compile ffpyplayer module with custom sdl_audio_callback function call, which will take use DSP for changing the pitch of the audio
        #Bind this slider to callback the sdl_audio_callback custom function
        # bassText = tk.Text(eq_frame, width=10, height=1, bg=self.mainBgColor, fg="white", bd=0)
        # bassText.insert(tk.END, "Bass boost")
        # bassText.grid(row=0, column=1)
        # bass_slider = Scale(eq_frame, from_=0, to=100, orient=tk.HORIZONTAL,
        #                     bg=self.mainBgColor, bd=0, fg="white",
        #                     troughcolor=self.labelBgColor, highlightbackground=self.mainBgColor,
        #                     activebackground="#FF00FF")
        # bass_slider.grid(row=1, column=1)
        # bass_slider.set(self.curBassBoost)

        #Video stream frame
        video_player_frame = LabelFrame(self.master,
                                        bg=self.labelBgColor,
                                        bd=1)
        video_player_frame.grid(row=2, column=0, padx=17)

        self.video_embed = tk.Label(video_player_frame,
                                    text="Video",
                                    image=self.blank_img,
                                    bg=self.labelBgColor)
        self.video_embed.grid(row=0, column=0)

        self.now_playing = Text(video_player_frame,
                                width=50,
                                height=1,
                                bg=self.mainBgColor,
                                fg="white")
        self.now_playing.insert(tk.END, "Now playing: ")
        self.now_playing.grid(row=1, column=0)

        #Video queue frame
        queue_frame = LabelFrame(video_player_frame,
                                 bg=self.labelBgColor,
                                 bd=0)
        queue_frame.grid_rowconfigure(0, weight=0)
        queue_frame.grid_columnconfigure(0, weight=1)
        queue_frame.grid(row=0, column=1)

        queue_buttons = LabelFrame(queue_frame, bg=self.labelBgColor)
        queue_buttons.grid(row=0, column=0)

        browse_In = tk.Button(queue_buttons,
                              text="Browse",
                              command=self.BrowseInputFolder,
                              width=10,
                              bg=self.labelBgColor,
                              fg=self.fontColor,
                              bd=1,
                              activebackground=self.btnHighlight,
                              activeforeground="black")
        browse_In.grid(row=0, column=1)

        playAll = tk.Button(queue_buttons,
                            text="Select all",
                            command=self.SelectAll,
                            width=10,
                            bg=self.labelBgColor,
                            fg=self.fontColor,
                            bd=1,
                            activebackground=self.btnHighlight,
                            activeforeground="black")
        playAll.grid(row=0, column=2)

        self.video_list = tk.Listbox(queue_frame,
                                     font=("Helvetica", 12),
                                     selectmode=tk.EXTENDED,
                                     exportselection=0,
                                     height=9,
                                     bg=self.labelBgColor,
                                     fg=self.fontColor,
                                     bd=0,
                                     selectbackground=self.btnHighlight)
        self.video_list.grid(row=1, column=0)
        self.video_list.bind("<<ListboxSelect>>", self.listbox_sel_callback)
        #self.video_list.bind('<Double-Button>', self.PlayVideo)            #Double clicking video causes thread exceptions for some reason

        scrollbar = Scrollbar(queue_frame,
                              orient="vertical",
                              command=self.video_list.yview,
                              bg=self.labelBgColor,
                              highlightcolor=self.btnHighlight,
                              bd=0)
        self.video_list.config(yscrollcommand=scrollbar.set)
        scrollbar.grid(row=1, column=1, sticky='ns')

        self.curVolume = 50
        #Search bar for videos
        # search_bar = tk.Entry(queue_frame, bd=0, )
        # search_bar.grid(row=2,column=0)

    def listbox_sel_callback(self, event):
        self.playlist = []
        indices = self.video_list.curselection()
        for i in indices:
            self.playlist.append(self.video_list.get(i))
        self.playlistChanged = True

    def BrowseInputFolder(self):
        video_dir = filedialog.askdirectory(initialdir="C:\\YoutubeVideos")
        self.video_folder.set(video_dir)
        self.video_list.delete(0, tk.END)
        for root, dirs, files in os.walk(self.video_folder.get()):
            for filename in files:
                self.video_list.insert(tk.END, filename)

    def PlayVideo(self):
        global stop_thread
        stop_thread = True
        time.sleep(0.05)  #Dangerous way of waiting for thread lol
        stop_thread = False
        self.isPlaying = True
        self.playlistChanged = False

        # if self.selected_video >= 0 and self.selected_video < len(self.playlist):
        self.start_videostream()
        #self.video_player.set_volume(float(self.curVolume)/100)
        thread = threading.Thread(target=self.Video_data_stream)
        thread.daemon = 1
        thread.start()
        self.playButton.grid_forget()
        self.stopButton = tk.Button(self.playback_buttons_frame,
                                    text="Stop",
                                    command=self.StopVideo,
                                    width=10,
                                    bg="#FF00FF",
                                    fg="black")
        self.stopButton.grid(row=0, column=1)

        #Change the "now playing"
        self.changeNowPlaying()

    def changeNowPlaying(self):
        self.now_playing.delete("1.0", tk.END)
        self.now_playing.insert(tk.END, "Now playing: ")
        if self.isPlaying:
            self.now_playing.insert(tk.END, self.playlist[self.selected_video])

    def StopVideo(self):
        global stop_thread
        global pause_thread
        self.isPlaying = False
        stop_thread = True
        pause_thread = True
        self.PauseVideo()

        self.stopButton.grid_forget()
        self.playButton = tk.Button(self.playback_buttons_frame,
                                    text="Play",
                                    command=self.PlayVideo,
                                    width=10,
                                    bg=self.labelBgColor,
                                    fg=self.fontColor)
        self.playButton.grid(row=0, column=1)
        self.changeNowPlaying()

    def PauseVideo(self):
        global pause_thread
        if pause_thread:
            #Why isn't this done in play/stop aswell lol
            self.pauseButton.config(text="Pause",
                                    bg=self.labelBgColor,
                                    fg="white")
            pause_thread = False
            self.video_player.set_pause(False)
        else:
            self.pauseButton.config(text="Unpause",
                                    bg=self.btnHighlight,
                                    fg="black")
            pause_thread = True
            self.video_player.set_pause(True)

    def start_videostream(self):
        #Start new instance of player
        if self.video_player:
            self.video_player.close_player()
        cVol = float(self.curVolume) / 100
        print(cVol)
        self.video_player = MediaPlayer(self.video_folder.get() + "\\" +
                                        self.playlist[self.selected_video],
                                        ff_opts={
                                            'paused': True,
                                            'volume': 0.03
                                        })
        self.video_player.set_size(400, 200)
        #while not self.video_player:
        #    continue
        time.sleep(0.1)
        if self.video_player:
            self.video_player.set_volume(cVol)
        self.video_player.set_pause(False)

    def NextVideo(self):
        if self.isPlaying == False:
            return
        #Destroy current player if there's one
        self.video_player.close_player()

        #Inform the video stream that video was changed
        self.songChanged = True

        #If playlist was changed, reset the index to 0
        if self.playlistChanged:
            self.selected_video = 0
            self.playlistChanged = False
        #Other wise just increment idx or start from 0 idx
        elif self.selected_video < len(self.playlist) - 1:
            self.selected_video += 1
        else:
            self.selected_video = 0

        self.start_videostream()
        #self.video_player.set_volume(float(self.curVolume)/100)
        self.changeNowPlaying()

    def PreviousVideo(self):
        if self.isPlaying == False:
            return

        #Destroy current player if there's one
        self.video_player.close_player()

        self.songChanged = True

        if self.playlistChanged:
            self.selected_video = 0
            self.playlistChanged = False
        elif self.selected_video > 0:
            self.selected_video -= 1
        else:
            self.selected_video = len(self.playlist) - 1

        self.start_videostream()
        self.changeNowPlaying()

    def SelectAll(self):
        #Select every line in listbox / Every video from list
        for i in range(0, self.video_list.size()):
            self.video_list.selection_set(i)
        #Since manual selection doesn't call callback functions, just add them to playlist manually
        self.playlist = []
        indices = self.video_list.curselection()
        for i in indices:
            self.playlist.append(self.video_list.get(i))
        self.playlistChanged = True

    def VolumeSlider(self, value):
        if self.video_player:
            self.video_player.set_volume(float(value) / 100)
        self.curVolume = value

    def Video_data_stream(self):
        global stop_thread
        global pause_thread
        stop_thread = False
        pause_thread = False

        #Start video/audio stream
        #todo:: len(self.playlist will change)
        while True:
            try:

                frame, val = self.video_player.get_frame()
                if val == 'eof':
                    self.video_player.close_player()
                    self.NextVideo()  #Increment the video index
                    self.video_player.set_volume(float(self.curVolume) / 100)
                    #If we still have videos left in playlist, play another one
                    # if self.selected_video < len(self.playlist):
                    #     self.video_player = MediaPlayer(self.video_folder.get() + "\\" + self.playlist[self.selected_video])
                    #     self.video_player.set_size(400, 200)
                elif frame is None:
                    time.sleep(0.01)
                else:
                    image, t = frame
                    w, h = image.get_size()
                    img = np.asarray(image.to_bytearray()[0]).reshape(h, w, 3)
                    the_frame = ImageTk.PhotoImage(Image.fromarray(img))
                    self.video_embed.config(image=the_frame)
                    self.video_embed.image = the_frame
                    if stop_thread:
                        self.video_player.close_player()
                        #Reset the embed image
                        self.video_embed.config(image=self.blank_img)
                        return
                    while pause_thread:
                        #Do nothing
                        if stop_thread:
                            pause_thread = False
                            return
                        continue
                    if val <= 1:
                        time.sleep(val)
            except:
                #Exception (e.g outside thread changes to player can cause exception)
                continue

    def BrowseDestination(self):
        download_directory = filedialog.askdirectory(
            initialdir="C:\\YoutubeVideos")
        self.download_path.set(download_directory)

    def Download(self):
        self.Update_Download_Status()

        link = self.video_link.get()
        download_folder = self.download_path.get()
        if "list" in link:
            playlist = Playlist(link)
            thread = threading.Thread(target=self.Download_Playlist,
                                      args=(
                                          playlist,
                                          download_folder,
                                      ))
            thread.daemon = 1
            thread.start()
        else:
            thread = threading.Thread(target=self.Download_Single,
                                      args=(
                                          link,
                                          download_folder,
                                      ))
            thread.daemon = 1
            thread.start()

    def Download_Playlist(self, playlist, folder):
        self.downloadLeft = [0, len(playlist.video_urls)]
        for url in playlist.video_urls:
            self.Update_Download_Status()
            try:
                getVideo = YouTube(url)
                video_stream_buffer = getVideo.streams.first()
                video_stream_buffer.download(folder)
                self.downloadLeft[0] += 1
            except:
                if self.downloadLeft[1] > 0:
                    self.downloadLeft[1] -= 1
                continue
        self.Update_Download_Status()
        messagebox.showinfo("Download complete!",
                            "Downloaded videos from playlist to:\n" + folder)

    def Download_Single(self, link, folder):
        self.downloadLeft = [0, 1]
        try:
            self.Update_Download_Status()
            getVideo = YouTube(link)

            video_stream_buffer = getVideo.streams.first()
            video_stream_buffer.download(folder)
            self.downloadLeft = [1, 1]
            messagebox.showinfo("Download complete!",
                                "Downloaded video to:\n" + folder)
        except:
            messagebox.showinfo("Download failed!",
                                "Video not available:\n" + folder)
        self.Update_Download_Status()

    def Update_Download_Status(self):
        self.download_count.delete('1.0', tk.END)
        self.download_count.insert(tk.END, "Download status: ")
        self.download_count.insert(tk.END, self.downloadLeft[0])
        self.download_count.insert(tk.END, " / ")
        self.download_count.insert(tk.END, self.downloadLeft[1])
예제 #10
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.tag = self.flag = self.listtag = self.fulltag = True
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        self.move(int((screen.width() - size.width()) / 2),
                  int((screen.height() - size.height()) / 2))

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_P:
            self.Listhide()
        if event.key() == Qt.Key_T:
            self.Fastback()
        if event.key() == Qt.Key_L:
            self.Loop()
        if event.key() == Qt.Key_Space:
            self.Play()
        if event.key() == Qt.Key_S:
            self.Stop()
        if event.key() == Qt.Key_F:
            self.Full()
        if event.key() == Qt.Key_J:
            self.Fastforward()
        if event.key() == Qt.Key_M:
            self.Mute()
        if event.key() == Qt.Key_A:
            self.svolume.setValue(self.svolume.value() + 1)
        if event.key() == Qt.Key_R:
            self.svolume.setValue(self.svolume.value() - 1)

    def eventFilter(self, sender, event):
        if (event.type() == event.ChildRemoved):
            self.Moved()
        return False

    def Listmenu(self, position):
        lm = QMenu()
        addact = QAction("添加到播放列表", self, triggered=self.Add)
        removeact = QAction("从播放列表移除", self, triggered=self.Remove)
        renameact = QAction('重命名', self, triggered=self.Rename)
        clearact = QAction('清空播放列表', self, triggered=self.Clear)
        saveact = QAction('保存当前播放列表', self, triggered=self.Saved)
        lm.addAction(addact)
        if self.list.itemAt(position):
            lm.addAction(removeact)
            lm.addAction(renameact)
        lm.addAction(clearact)
        lm.addAction(saveact)
        lm.exec_(self.list.mapToGlobal(position))

    def Listadd(self):
        self.l = []
        self.list.installEventFilter(self)
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for i in f:
                    i = i.strip()
                    name = i[0:i.find(',')]
                    filelist = i[i.find(',') + 1:len(i)]
                    self.list.addItem(name)
                    self.l.append(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        for filelist in filelists:
            name = filelist[filelist.rfind('/') + 1:filelist.rfind('.')]
            self.list.addItem(name)
            self.l.append(filelist)

    def Remove(self):
        ltmp = []
        for i in self.list.selectedIndexes():
            ltmp.append(i.row())
        ltmp.sort(reverse=True)
        for j in ltmp:
            self.list.takeItem(j)
            self.l.pop(j)

    def Rename(self):
        item = self.list.item(self.list.currentRow())
        item.setFlags(item.flags() | Qt.ItemIsEditable)
        self.list.editItem(item)

    def Clear(self):
        self.l = []
        self.list.clear()
        if os.path.isfile('CPlayerlist.txt'):
            os.remove('CPlayerlist.txt')

    def Drag(self):
        self.tmp1 = []
        self.tmp2 = self.l[:]
        for i in range(self.list.count()):
            self.tmp1.append(self.list.item(i).text())

    def Moved(self):
        for i in range(self.list.count()):
            if self.list.item(i).text() == self.tmp1[i]:
                continue
            else:
                self.l[i] = self.tmp2[self.tmp1.index(
                    self.list.item(i).text())]

    def Saved(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write('%s,%s\n' % (self.list.item(i).text(), self.l[i]))
        QMessageBox.information(self, '保存', '播放列表保存成功!')

    def Listhide(self):
        if self.listtag:
            self.frame.hide()
            self.listtag = False
        else:
            self.frame.show()
            self.listtag = True

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放,快捷键“l”')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环,快捷键“l”')

    def Play(self):
        if self.flag:
            try:
                self.playitem = self.l[self.list.currentRow()]
                if os.path.isfile("%s" % self.playitem):
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer = QTimer()
                    self.timer.start(50)
                    self.timer.timeout.connect(self.Show)
                    self.steptimer = QTimer()
                    self.steptimer.start(1000)
                    self.steptimer.timeout.connect(self.Step)
                    self.flag = False
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')
            except:
                QMessageBox.warning(self, '错误', '找不到要播放的文件!')
        else:
            if self.l[self.list.currentRow()] == self.playitem:
                self.player.toggle_pause()
                if self.player.get_pause():
                    self.timer.stop()
                    self.steptimer.stop()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
                else:
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
            else:
                self.playitem = self.l[self.list.currentRow()]
                if os.path.isfile("%s" % self.playitem):
                    self.step = 0
                    self.stime.setValue(0)
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        if self.tag:
            self.player.set_volume(self.svolume.value() / 100)
        else:
            self.player.set_volume(0)
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        if self.mediatime:
            self.stime.setMaximum(int(self.mediatime))
            mediamin, mediasec = divmod(self.mediatime, 60)
            mediahour, mediamin = divmod(mediamin, 60)
            playmin, playsec = divmod(self.step, 60)
            playhour, playmin = divmod(playmin, 60)
            self.ltime.setText(
                '%02d:%02d:%02d/%02d:%02d:%02d' %
                (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放,快捷键“Space”')
            self.lmedia.setPixmap(QPixmap(''))

    def Full(self):
        if self.fulltag:
            self.frame.hide()
            self.frame_2.hide()
            self.showFullScreen()
            self.bfull.setIcon(QIcon(r'img\exitfullscreen.png'))
            self.bfull.setToolTip('退出全屏,快捷键“f”')
            self.fulltag = False
        else:
            self.frame.show()
            self.frame_2.show()
            self.showNormal()
            self.bfull.setIcon(QIcon(r'img\expandfullscreen.png'))
            self.bfull.setToolTip('全屏,快捷键“f”')
            self.fulltag = True

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
                self.tag = False
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value() / 100)
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
                self.tag = True

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.stime.setValue(0)
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.Stop()
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        if self.flag == False:
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastforward(self):
        if self.flag == False:
            self.step += 10
            if self.step >= int(self.mediatime):
                self.stime.setValue(int(self.mediatime))
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastback(self):
        if self.flag == False:
            self.step -= 10
            if self.step <= 0:
                self.step = 0
                self.stime.setValue(0)
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')
예제 #11
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.tag = True
        self.flag = True
        self.hidetag = True
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        self.move(int((screen.width() - size.width()) / 2),
                  int((screen.height() - size.height()) / 2))

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_P:
            self.Listhide()
        if event.key() == Qt.Key_T:
            self.Fastback()
        if event.key() == Qt.Key_L:
            self.Loop()
        if event.key() == Qt.Key_Space:
            self.Play()
        if event.key() == Qt.Key_S:
            self.Stop()
        if event.key() == Qt.Key_F:
            self.Full()
        if event.key() == Qt.Key_J:
            self.Fastforward()
        if event.key() == Qt.Key_M:
            self.Mute()
        if event.key() == Qt.Key_A:
            self.svolume.setValue(self.svolume.value() + 1)
        if event.key() == Qt.Key_R:
            self.svolume.setValue(self.svolume.value() - 1)

    def Listadd(self):
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for filelist in f:
                    filelist = filelist.strip()
                    self.list.addItem(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        self.list.addItems(filelists)
        self.Listchanged()

    def Remove(self):
        self.list.takeItem(self.list.currentRow())
        self.Listchanged()

    def Clear(self):
        self.list.clear()
        os.remove('CPlayerlist.txt')

    def Listchanged(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write(self.list.item(i).text() + '\n')

    def Listhide(self):
        if self.hidetag:
            self.frame.hide()
            self.hidetag = False
        else:
            self.frame.show()
            self.hidetag = True

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放,快捷键“l”')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环,快捷键“l”')

    def Play(self):
        if self.flag:
            try:
                self.playitem = self.list.currentItem().text()
                if os.path.isfile("%s" % self.playitem):
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer = QTimer()
                    self.timer.start(50)
                    self.timer.timeout.connect(self.Show)
                    self.steptimer = QTimer()
                    self.steptimer.start(1000)
                    self.steptimer.timeout.connect(self.Step)
                    self.flag = False
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')
            except:
                QMessageBox.warning(self, '错误', '找不到要播放的文件!')
        else:
            if self.list.currentItem().text() == self.playitem:
                self.player.toggle_pause()
                if self.player.get_pause():
                    self.timer.stop()
                    self.steptimer.stop()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
                else:
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
            else:
                self.playitem = self.list.currentItem().text()
                if os.path.isfile("%s" % self.playitem):
                    self.step = 0
                    self.stime.setValue(0)
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        if self.tag:
            self.player.set_volume(self.svolume.value() / 100)
        else:
            self.player.set_volume(0)
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        if self.mediatime:
            self.stime.setMaximum(int(self.mediatime))
            mediamin, mediasec = divmod(self.mediatime, 60)
            mediahour, mediamin = divmod(mediamin, 60)
            playmin, playsec = divmod(self.step, 60)
            playhour, playmin = divmod(playmin, 60)
            self.ltime.setText(
                '%02d:%02d:%02d/%02d:%02d:%02d' %
                (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放,快捷键“Space”')
            self.lmedia.setPixmap(QPixmap(''))

    def Full(self):
        if self.hidetag:
            self.setWindowFlags(Qt.FramelessWindowHint)
            rect = QApplication.desktop().geometry()
            self.setGeometry(rect)
            self.frame.hide()
            self.frame_2.hide()
            self.show()
            self.bfull.setIcon(QIcon(r'img\exitfullscreen.png'))
            self.bfull.setToolTip('退出全屏,快捷键“f”')
            self.hidetag = False
        else:
            self.setWindowFlags(Qt.Widget)
            self.setGeometry(0, 0, 1144, 705)
            self.frame.show()
            self.frame_2.show()
            screen = QDesktopWidget().screenGeometry()
            size = self.geometry()
            self.move(int((screen.width() - size.width()) / 2),
                      int((screen.height() - size.height()) / 2))
            self.show()
            self.bfull.setIcon(QIcon(r'img\expandfullscreen.png'))
            self.bfull.setToolTip('全屏,快捷键“f”')
            self.hidetag = True

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
                self.tag = False
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value() / 100)
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
                self.tag = True

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.timer.stop()
                    self.steptimer.stop()
                    self.step = 0
                    self.loop = 1
                    self.flag = True
                    self.stime.setValue(0)
                    self.player.close_player()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastforward(self):
        if self.flag == False:
            self.step += 10
            if self.step >= int(self.mediatime):
                self.stime.setValue(int(self.mediatime))
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastback(self):
        if self.flag == False:
            self.step -= 10
            if self.step <= 0:
                self.step = 0
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')
예제 #12
0
class CustomImage(KivyImage):
    """Custom image display widget.
    Enables editing operations, displaying them in real-time using a low resolution preview of the original image file.
    All editing variables are watched by the widget and it will automatically update the preview when they are changed.
    """

    exif = ''
    pixel_format = ''
    length = NumericProperty(0)
    framerate = ListProperty()
    video = BooleanProperty(False)
    player = ObjectProperty(None, allownone=True)
    position = NumericProperty(0.0)
    start_point = NumericProperty(0.0)
    end_point = NumericProperty(1.0)
    original_image = ObjectProperty()
    photoinfo = ListProperty()
    original_width = NumericProperty(0)
    original_height = NumericProperty(0)
    flip_horizontal = BooleanProperty(False)
    flip_vertical = BooleanProperty(False)
    mirror = BooleanProperty(False)
    angle = NumericProperty(0)
    rotate_angle = NumericProperty(0)
    fine_angle = NumericProperty(0)
    brightness = NumericProperty(0)
    shadow = NumericProperty(0)
    contrast = NumericProperty(0)
    gamma = NumericProperty(0)
    saturation = NumericProperty(0)
    temperature = NumericProperty(0)
    tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    curve = ListProperty()
    crop_top = NumericProperty(0)
    crop_bottom = NumericProperty(0)
    crop_left = NumericProperty(0)
    crop_right = NumericProperty(0)
    filter = StringProperty('')
    filter_amount = NumericProperty(0)
    autocontrast = BooleanProperty(False)
    equalize = NumericProperty(0)
    histogram = ListProperty()
    edit_image = ObjectProperty()
    cropping = BooleanProperty(False)
    touch_point = ObjectProperty()
    active_cropping = BooleanProperty(False)
    crop_start = ListProperty()
    sharpen = NumericProperty(0)
    bilateral = NumericProperty(0.5)
    bilateral_amount = NumericProperty(0)
    median_blur = NumericProperty(0)
    vignette_amount = NumericProperty(0)
    vignette_size = NumericProperty(.5)
    edge_blur_amount = NumericProperty(0)
    edge_blur_size = NumericProperty(.5)
    edge_blur_intensity = NumericProperty(.5)
    cropper = ObjectProperty()  #Holder for the cropper overlay
    crop_controls = ObjectProperty()  #Holder for the cropper edit panel object
    adaptive_clip = NumericProperty(0)
    border_opacity = NumericProperty(1)
    border_image = ListProperty()
    border_tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    border_x_scale = NumericProperty(.5)
    border_y_scale = NumericProperty(.5)
    crop_min = NumericProperty(100)
    size_multiple = NumericProperty(1)

    #Denoising variables
    denoise = BooleanProperty(False)
    luminance_denoise = NumericProperty(10)
    color_denoise = NumericProperty(10)
    search_window = NumericProperty(15)
    block_size = NumericProperty(5)

    frame_number = 0
    max_frames = 0
    start_seconds = 0
    first_frame = None

    def start_video_convert(self):
        self.close_video()
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 0.0,
                                      'an': True
                                  })
        self.player.set_volume(0)
        self.frame_number = 0
        if self.start_point > 0 or self.end_point < 1:
            all_frames = self.length * (self.framerate[0] / self.framerate[1])
            self.max_frames = all_frames * (self.end_point - self.start_point)
        else:
            self.max_frames = 0

        #need to wait for load so the seek routine doesnt crash python
        self.first_frame = self.wait_frame()

        if self.start_point > 0:
            self.start_seconds = self.length * self.start_point
            self.first_frame = self.seek_player(self.start_seconds)

    def wait_frame(self):
        #Ensures that a frame is gotten
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        return frame

    def start_seek(self, seek):
        #tell the player to seek to a position
        self.player.set_pause(False)
        self.player.seek(pts=seek, relative=False, accurate=True)
        self.player.set_pause(True)

    def seek_player(self, seek):
        self.start_seek(seek)

        framerate = self.framerate[0] / self.framerate[1]
        target_seek_frame = seek * framerate

        loops = 0
        total_loops = 0
        while True:
            loops += 1
            total_loops += 1
            if loops > 5:
                #seek has been stuck for a while, try to seek again
                self.start_seek(seek)
                loops = 0
            #check if seek has gotten within a couple frames yet
            frame = self.wait_frame()
            current_seek = frame[1]
            current_seek_frame = current_seek * framerate
            frame_distance = abs(target_seek_frame - current_seek_frame)
            if frame_distance < 2 or total_loops >= 30:
                #seek has finished, or give up after a lot of tries to not freeze the program...
                break
        return frame

    def get_converted_frame(self):
        if self.first_frame:
            frame = self.first_frame
            self.first_frame = None
        else:
            self.player.set_pause(False)
            frame = None
            while not frame:
                frame, value = self.player.get_frame(force_refresh=False)
                if value == 'eof':
                    return None
            self.player.set_pause(True)
        self.frame_number = self.frame_number + 1
        if self.max_frames:
            if self.frame_number > self.max_frames:
                return None
        frame_image = frame[0]
        frame_size = frame_image.get_size()
        frame_converter = SWScale(frame_size[0],
                                  frame_size[1],
                                  frame_image.get_pixel_format(),
                                  ofmt='rgb24')
        new_frame = frame_converter.scale(frame_image)
        image_data = bytes(new_frame.to_bytearray()[0])
        image = Image.frombuffer(mode='RGB',
                                 size=(frame_size[0], frame_size[1]),
                                 data=image_data,
                                 decoder_name='raw')
        #for some reason, video frames are read upside-down? fix it here...
        image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return [image, frame[1]]

    def close_video(self):
        if self.player:
            self.player.close_player()
            self.player = None

    def open_video(self):
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 1.0,
                                      'an': True
                                  })
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        data = self.player.get_metadata()
        self.length = data['duration']
        self.framerate = data['frame_rate']
        self.pixel_format = data['src_pix_fmt']

    def set_aspect(self, aspect_x, aspect_y):
        """Adjusts the cropping of the image to be a given aspect ratio.
        Attempts to keep the image as large as possible
        Arguments:
            aspect_x: Horizontal aspect ratio element, numerical value.
            aspect_y: Vertical aspect ratio element, numerical value.
        """

        width = self.original_width - self.crop_left - self.crop_right
        height = self.original_height - self.crop_top - self.crop_bottom
        if aspect_x != width or aspect_y != height:
            current_ratio = width / height
            target_ratio = aspect_x / aspect_y
            if target_ratio > current_ratio:
                #crop top/bottom, width is the same
                new_height = width / target_ratio
                height_difference = height - new_height
                crop_right = 0
                crop_left = 0
                crop_top = height_difference / 2
                crop_bottom = crop_top
            else:
                #crop sides, height is the same
                new_width = height * target_ratio
                width_difference = width - new_width
                crop_top = 0
                crop_bottom = 0
                crop_left = width_difference / 2
                crop_right = crop_left
        else:
            crop_top = 0
            crop_right = 0
            crop_bottom = 0
            crop_left = 0
        self.crop_top = self.crop_top + crop_top
        self.crop_right = self.crop_right + crop_right
        self.crop_bottom = self.crop_bottom + crop_bottom
        self.crop_left = self.crop_left + crop_left
        self.reset_cropper()

    def crop_percent(self, side, percent):
        texture_width = self.original_width
        texture_height = self.original_height
        crop_min = self.crop_min

        if side == 'top':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_bottom) < crop_min:
                crop_amount = texture_height - self.crop_bottom - crop_min
            self.crop_top = crop_amount
        elif side == 'right':
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_left) < crop_min:
                crop_amount = texture_width - self.crop_left - crop_min
            self.crop_right = crop_amount
        elif side == 'bottom':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_top) < crop_min:
                crop_amount = texture_height - self.crop_top - crop_min
            self.crop_bottom = crop_amount
        else:
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_right) < crop_min:
                crop_amount = texture_width - self.crop_right - crop_min
            self.crop_left = crop_amount
        self.reset_cropper()
        if self.crop_controls:
            self.crop_controls.update_crop()

    def get_crop_percent(self):
        width = self.original_width
        height = self.original_height
        top_percent = self.crop_top / height
        right_percent = self.crop_right / width
        bottom_percent = self.crop_bottom / height
        left_percent = self.crop_left / width
        return [top_percent, right_percent, bottom_percent, left_percent]

    def get_crop_size(self):
        new_width = self.original_width - self.crop_left - self.crop_right
        new_height = self.original_height - self.crop_top - self.crop_bottom
        new_aspect = new_width / new_height
        old_aspect = self.original_width / self.original_height
        return "Size: " + str(
            int(new_width)) + "x" + str(int(new_height)) + ", Aspect: " + str(
                round(new_aspect, 2)) + " (Original: " + str(
                    round(old_aspect, 2)) + ")"

    def reset_crop(self):
        """Sets the crop values back to 0 for all sides"""

        self.crop_top = 0
        self.crop_bottom = 0
        self.crop_left = 0
        self.crop_right = 0
        self.reset_cropper(setup=True)

    def reset_cropper(self, setup=False):
        """Updates the position and size of the cropper overlay object."""

        if self.cropper:
            texture_size = self.get_texture_size()
            texture_top_edge = texture_size[0]
            texture_right_edge = texture_size[1]
            texture_bottom_edge = texture_size[2]
            texture_left_edge = texture_size[3]

            texture_width = (texture_right_edge - texture_left_edge)
            #texture_height = (texture_top_edge - texture_bottom_edge)

            divisor = self.original_width / texture_width
            top_edge = texture_top_edge - (self.crop_top / divisor)
            bottom_edge = texture_bottom_edge + (self.crop_bottom / divisor)
            left_edge = texture_left_edge + (self.crop_left / divisor)
            right_edge = texture_right_edge - (self.crop_right / divisor)
            width = right_edge - left_edge
            height = top_edge - bottom_edge

            self.cropper.pos = [left_edge, bottom_edge]
            self.cropper.size = [width, height]
            if setup:
                self.cropper.max_resizable_width = width
                self.cropper.max_resizable_height = height

    def get_texture_size(self):
        """Returns a list of the texture size coordinates.
        Returns:
            List of numbers: [Top edge, Right edge, Bottom edge, Left edge]
        """

        left_edge = (self.size[0] / 2) - (self.norm_image_size[0] / 2)
        right_edge = left_edge + self.norm_image_size[0]
        bottom_edge = (self.size[1] / 2) - (self.norm_image_size[1] / 2)
        top_edge = bottom_edge + self.norm_image_size[1]
        return [top_edge, right_edge, bottom_edge, left_edge]

    def point_over_texture(self, pos):
        """Checks if the given pos (x,y) value is over the image texture.
        Returns False if not over texture, returns point transformed to texture coordinates if over texture.
        """

        texture_size = self.get_texture_size()
        top_edge = texture_size[0]
        right_edge = texture_size[1]
        bottom_edge = texture_size[2]
        left_edge = texture_size[3]
        if pos[0] > left_edge and pos[0] < right_edge:
            if pos[1] > bottom_edge and pos[1] < top_edge:
                texture_x = pos[0] - left_edge
                texture_y = pos[1] - bottom_edge
                return [texture_x, texture_y]
        return False

    def detect_crop_edges(self, first, second):
        """Given two points, this will detect the proper crop area for the image.
        Arguments:
            first: First crop corner.
            second: Second crop corner.
        Returns a list of cropping values:
            [crop_top, crop_bottom, crop_left, crop_right]
        """

        if first[0] < second[0]:
            left = first[0]
            right = second[0]
        else:
            left = second[0]
            right = first[0]
        if first[1] < second[1]:
            top = second[1]
            bottom = first[1]
        else:
            top = first[1]
            bottom = second[1]
        scale = self.original_width / self.norm_image_size[0]
        crop_top = (self.norm_image_size[1] - top) * scale
        crop_bottom = bottom * scale
        crop_left = left * scale
        crop_right = (self.norm_image_size[0] - right) * scale
        return [crop_top, crop_bottom, crop_left, crop_right]

    def set_crop(self, posx, posy, width, height):
        """Sets the crop values based on the cropper widget."""

        texture_size = self.get_texture_size()
        texture_top_edge = texture_size[0]
        texture_right_edge = texture_size[1]
        texture_bottom_edge = texture_size[2]
        texture_left_edge = texture_size[3]

        left_crop = posx - texture_left_edge
        bottom_crop = posy - texture_bottom_edge
        right_crop = texture_right_edge - width - posx
        top_crop = texture_top_edge - height - posy

        texture_width = (texture_right_edge - texture_left_edge)
        divisor = self.original_width / texture_width
        if left_crop < 0:
            self.crop_left = 0
        else:
            self.crop_left = left_crop * divisor
        if right_crop < 0:
            self.crop_right = 0
        else:
            self.crop_right = right_crop * divisor
        if top_crop < 0:
            self.crop_top = 0
        else:
            self.crop_top = top_crop * divisor
        if bottom_crop < 0:
            self.crop_bottom = 0
        else:
            self.crop_bottom = bottom_crop * divisor
        #self.update_preview(recrop=False)
        if self.crop_controls:
            self.crop_controls.update_crop()

    def on_sharpen(self, *_):
        self.update_preview()

    def on_bilateral(self, *_):
        self.update_preview()

    def on_bilateral_amount(self, *_):
        self.update_preview()

    def on_median_blur(self, *_):
        self.update_preview()

    def on_border_opacity(self, *_):
        self.update_preview()

    def on_border_image(self, *_):
        self.update_preview()

    def on_border_x_scale(self, *_):
        self.update_preview()

    def on_border_y_scale(self, *_):
        self.update_preview()

    def on_vignette_amount(self, *_):
        self.update_preview()

    def on_vignette_size(self, *_):
        self.update_preview()

    def on_edge_blur_amount(self, *_):
        self.update_preview()

    def on_edge_blur_size(self, *_):
        self.update_preview()

    def on_edge_blur_intensity(self, *_):
        self.update_preview()

    def on_rotate_angle(self, *_):
        self.update_preview()

    def on_fine_angle(self, *_):
        self.update_preview()

    def on_flip_horizontal(self, *_):
        self.update_preview()

    def on_flip_vertical(self, *_):
        self.update_preview()

    def on_autocontrast(self, *_):
        self.update_preview()

    def on_adaptive_clip(self, *_):
        self.update_preview()

    def on_equalize(self, *_):
        self.update_preview()

    def on_brightness(self, *_):
        self.update_preview()

    def on_shadow(self, *_):
        self.update_preview()

    def on_gamma(self, *_):
        self.update_preview()

    def on_contrast(self, *_):
        self.update_preview()

    def on_saturation(self, *_):
        self.update_preview()

    def on_temperature(self, *_):
        self.update_preview()

    def on_curve(self, *_):
        self.update_preview()

    def on_tint(self, *_):
        self.update_preview()

    def on_border_tint(self, *_):
        self.update_preview()

    def on_size(self, *_):
        pass

    def on_source(self, *_):
        """The source file has been changed, reload image and regenerate preview."""

        self.video = os.path.splitext(self.source)[1].lower() in movietypes
        if self.video:
            self.open_video()
        self.reload_edit_image()
        self.update_texture(self.edit_image)
        #self.update_preview()

    def on_position(self, *_):
        pass

    def reload_edit_image(self):
        """Regenerate the edit preview image."""
        if self.video:
            if not self.player:
                return
            location = self.length * self.position
            frame = self.seek_player(location)
            frame = frame[0]
            frame_size = frame.get_size()
            pixel_format = frame.get_pixel_format()
            frame_converter = SWScale(frame_size[0],
                                      frame_size[1],
                                      pixel_format,
                                      ofmt='rgb24')
            new_frame = frame_converter.scale(frame)
            image_data = bytes(new_frame.to_bytearray()[0])

            original_image = Image.frombuffer(mode='RGB',
                                              size=(frame_size[0],
                                                    frame_size[1]),
                                              data=image_data,
                                              decoder_name='raw')
            #for some reason, video frames are read upside-down? fix it here...
            original_image = original_image.transpose(
                PIL.Image.FLIP_TOP_BOTTOM)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            self.original_image = original_image
            image = original_image.copy()

        else:
            original_image = Image.open(self.source)
            try:
                self.exif = original_image.info.get('exif', b'')
            except:
                self.exif = ''
            if self.angle != 0:
                if self.angle == 90:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_90)
                if self.angle == 180:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_180)
                if self.angle == 270:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_270)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            image = original_image.copy()
            self.original_image = original_image.copy()
            original_image.close()
        image_width = Window.width * .75
        width = int(image_width)
        height = int(image_width * (image.size[1] / image.size[0]))
        if width < 10:
            width = 10
        if height < 10:
            height = 10
        image = image.resize((width, height))
        if image.mode != 'RGB':
            image = image.convert('RGB')
        self.size_multiple = self.original_width / image.size[0]
        self.edit_image = image
        Clock.schedule_once(
            self.update_histogram
        )  #Need to delay this because kivy will mess up the drawing of it on first load.
        #self.histogram = image.histogram()

    def update_histogram(self, *_):
        self.histogram = self.edit_image.histogram()

    def on_texture(self, instance, value):
        if value is not None:
            self.texture_size = list(value.size)
        if self.mirror:
            self.texture.flip_horizontal()

    def denoise_preview(self, width, height, pos_x, pos_y):
        left = pos_x
        right = pos_x + width
        lower = pos_y + width
        upper = pos_y
        original_image = self.original_image
        preview = original_image.crop(box=(left, upper, right, lower))
        if preview.mode != 'RGB':
            preview = preview.convert('RGB')
        preview_cv = cv2.cvtColor(numpy.array(preview), cv2.COLOR_RGB2BGR)
        preview_cv = cv2.fastNlMeansDenoisingColored(preview_cv, None,
                                                     self.luminance_denoise,
                                                     self.color_denoise,
                                                     self.search_window,
                                                     self.block_size)
        preview_cv = cv2.cvtColor(preview_cv, cv2.COLOR_BGR2RGB)
        preview = Image.fromarray(preview_cv)
        preview_bytes = BytesIO()
        preview.save(preview_bytes, 'jpeg')
        preview_bytes.seek(0)
        return preview_bytes

    def update_preview(self, denoise=False, recrop=True):
        """Update the preview image."""

        image = self.adjust_image(self.edit_image)
        if denoise and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        self.update_texture(image)
        self.histogram = image.histogram()
        if recrop:
            self.reset_cropper(setup=True)

    def adjust_image(self, image, preview=True):
        """Applies all current editing opterations to an image.
        Arguments:
            image: A PIL image.
            preview: Generate edit image in preview mode (faster)
        Returns: A PIL image.
        """

        if not preview:
            orientation = self.photoinfo[13]
            if orientation == 3 or orientation == 4:
                image = image.transpose(PIL.Image.ROTATE_180)
            elif orientation == 5 or orientation == 6:
                image = image.transpose(PIL.Image.ROTATE_90)
            elif orientation == 7 or orientation == 8:
                image = image.transpose(PIL.Image.ROTATE_270)
            if orientation in [2, 4, 5, 7]:
                image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
            size_multiple = self.size_multiple
        else:
            size_multiple = 1

        if self.sharpen != 0:
            enhancer = ImageEnhance.Sharpness(image)
            image = enhancer.enhance(self.sharpen + 1)
        if self.median_blur != 0 and opencv:
            max_median = 10 * size_multiple
            median = int(self.median_blur * max_median)
            if median % 2 == 0:
                median = median + 1
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.medianBlur(open_cv_image, median)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.bilateral != 0 and self.bilateral_amount != 0 and opencv:
            diameter = int(self.bilateral * 10 * size_multiple)
            if diameter < 1:
                diameter = 1
            sigma_color = self.bilateral_amount * 100 * size_multiple
            if sigma_color < 1:
                sigma_color = 1
            sigma_space = sigma_color
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.bilateralFilter(open_cv_image, diameter,
                                                sigma_color, sigma_space)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.vignette_amount > 0 and self.vignette_size > 0:
            vignette = Image.new(mode='RGB', size=image.size, color=(0, 0, 0))
            filter_color = int((1 - self.vignette_amount) * 255)
            vignette_mixer = Image.new(mode='L',
                                       size=image.size,
                                       color=filter_color)
            draw = ImageDraw.Draw(vignette_mixer)
            shrink_x = int((self.vignette_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.vignette_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            vignette_mixer = vignette_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.vignette_amount * 60) +
                                         60))
            image = Image.composite(image, vignette, vignette_mixer)
        if self.edge_blur_amount > 0 and self.edge_blur_intensity > 0 and self.edge_blur_size > 0:
            blur_image = image.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            filter_color = int((1 - self.edge_blur_intensity) * 255)
            blur_mixer = Image.new(mode='L',
                                   size=image.size,
                                   color=filter_color)
            draw = ImageDraw.Draw(blur_mixer)
            shrink_x = int((self.edge_blur_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.edge_blur_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            blur_mixer = blur_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            image = Image.composite(image, blur_image, blur_mixer)
        if self.crop_top != 0 or self.crop_bottom != 0 or self.crop_left != 0 or self.crop_right != 0:
            if preview:
                overlay = Image.new(mode='RGB',
                                    size=image.size,
                                    color=(0, 0, 0))
                divisor = self.original_width / image.size[0]
                draw = ImageDraw.Draw(overlay)
                draw.rectangle(
                    [0, 0, (self.crop_left / divisor), image.size[1]],
                    fill=(255, 255, 255))
                draw.rectangle(
                    [0, 0, image.size[0], (self.crop_top / divisor)],
                    fill=(255, 255, 255))
                draw.rectangle([(image.size[0] -
                                 (self.crop_right / divisor)), 0,
                                (image.size[0]), image.size[1]],
                               fill=(255, 255, 255))
                draw.rectangle([
                    0, (image.size[1] - (self.crop_bottom / divisor)),
                    image.size[0], image.size[1]
                ],
                               fill=(255, 255, 255))
                bright = ImageEnhance.Brightness(overlay)
                overlay = bright.enhance(.333)
                image = ImageChops.subtract(image, overlay)
            else:
                if self.crop_left >= image.size[0]:
                    crop_left = 0
                else:
                    crop_left = int(self.crop_left)
                if self.crop_top >= image.size[1]:
                    crop_top = 0
                else:
                    crop_top = int(self.crop_top)
                if self.crop_right >= image.size[0]:
                    crop_right = image.size[0]
                else:
                    crop_right = int(image.size[0] - self.crop_right)
                if self.crop_bottom >= image.size[1]:
                    crop_bottom = image.size[1]
                else:
                    crop_bottom = int(image.size[1] - self.crop_bottom)
                if self.video:
                    #ensure that image size is divisible by 2
                    new_width = crop_right - crop_left
                    new_height = crop_bottom - crop_top
                    if new_width % 2 == 1:
                        if crop_right < image.size[0]:
                            crop_right = crop_right + 1
                        else:
                            crop_right = crop_right - 1
                    if new_height % 2 == 1:
                        if crop_bottom < image.size[1]:
                            crop_bottom = crop_bottom + 1
                        else:
                            crop_bottom = crop_bottom - 1
                image = image.crop(
                    (crop_left, crop_top, crop_right, crop_bottom))
        if self.flip_horizontal:
            image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
        if self.flip_vertical:
            image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if self.rotate_angle != 0:
            if self.rotate_angle == 90:
                image = image.transpose(PIL.Image.ROTATE_270)
            if self.rotate_angle == 180:
                image = image.transpose(PIL.Image.ROTATE_180)
            if self.rotate_angle == 270:
                image = image.transpose(PIL.Image.ROTATE_90)
        if self.fine_angle != 0:
            total_angle = -self.fine_angle * 10
            angle_radians = math.radians(abs(total_angle))
            width, height = rotated_rect_with_max_area(image.size[0],
                                                       image.size[1],
                                                       angle_radians)
            x = int((image.size[0] - width) / 2)
            y = int((image.size[1] - height) / 2)
            if preview:
                image = image.rotate(total_angle, expand=False)
            else:
                image = image.rotate(total_angle,
                                     resample=PIL.Image.BICUBIC,
                                     expand=False)
            image = image.crop((x, y, image.size[0] - x, image.size[1] - y))
        if self.autocontrast:
            image = ImageOps.autocontrast(image)
        if self.equalize != 0:
            equalize_image = ImageOps.equalize(image)
            image = Image.blend(image, equalize_image, self.equalize)
        temperature = int(round(abs(self.temperature) * 100))
        if temperature != 0:
            temperature = temperature - 1
            if self.temperature > 0:
                kelvin = negative_kelvin[99 - temperature]
            else:
                kelvin = positive_kelvin[temperature]
            matrix = ((kelvin[0] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[1] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[2] / 255.0), 0.0)
            image = image.convert('RGB', matrix)
        if self.brightness != 0:
            enhancer = ImageEnhance.Brightness(image)
            image = enhancer.enhance(1 + self.brightness)
        if self.shadow != 0:
            if self.shadow < 0:
                floor = int(abs(self.shadow) * 128)
                table = [0] * floor
                remaining_length = 256 - floor
                for index in range(0, remaining_length):
                    value = int(round((index / remaining_length) * 256))
                    table.append(value)
                lut = table * 3
            else:
                floor = int(abs(self.shadow) * 128)
                table = []
                for index in range(0, 256):
                    percent = 1 - (index / 255)
                    value = int(round(index + (floor * percent)))
                    table.append(value)
                lut = table * 3
            image = image.point(lut)

        if self.gamma != 0:
            if self.gamma == -1:
                gamma = 99999999999999999
            elif self.gamma < 0:
                gamma = 1 / (self.gamma + 1)
            elif self.gamma > 0:
                gamma = 1 / ((self.gamma + 1) * (self.gamma + 1))
            else:
                gamma = 1
            lut = [pow(x / 255, gamma) * 255 for x in range(256)]
            lut = lut * 3
            image = image.point(lut)
        if self.contrast != 0:
            enhancer = ImageEnhance.Contrast(image)
            image = enhancer.enhance(1 + self.contrast)
        if self.saturation != 0:
            enhancer = ImageEnhance.Color(image)
            image = enhancer.enhance(1 + self.saturation)
        if self.tint != [1.0, 1.0, 1.0, 1.0]:
            matrix = (self.tint[0], 0.0, 0.0, 0.0, 0.0, self.tint[1], 0.0, 0.0,
                      0.0, 0.0, self.tint[2], 0.0)
            image = image.convert('RGB', matrix)
        if self.curve:
            lut = self.curve * 3
            image = image.point(lut)

        if self.denoise and not preview and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        if self.adaptive_clip > 0 and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2Lab)
            channels = cv2.split(open_cv_image)
            clahe = cv2.createCLAHE(clipLimit=(self.adaptive_clip * 4),
                                    tileGridSize=(8, 8))
            clahe_image = clahe.apply(channels[0])
            channels[0] = clahe_image
            open_cv_image = cv2.merge(channels)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_Lab2RGB)
            image = Image.fromarray(open_cv_image)

        if self.border_image:
            image_aspect = image.size[0] / image.size[1]
            closest_aspect = min(self.border_image[1],
                                 key=lambda x: abs(x - image_aspect))
            index = self.border_image[1].index(closest_aspect)
            image_file = os.path.join('borders', self.border_image[2][index])
            if preview:
                resample = PIL.Image.NEAREST
            else:
                resample = PIL.Image.BICUBIC
            border_image = Image.open(image_file)
            border_crop_x = int(border_image.size[0] *
                                ((self.border_x_scale + 1) / 15))
            border_crop_y = int(border_image.size[1] *
                                ((self.border_y_scale + 1) / 15))
            border_image = border_image.crop(
                (border_crop_x, border_crop_y,
                 border_image.size[0] - border_crop_x,
                 border_image.size[1] - border_crop_y))
            border_image = border_image.resize(image.size, resample)

            if os.path.splitext(image_file)[1].lower() == '.jpg':
                alpha_file = os.path.splitext(image_file)[0] + '-mask.jpg'
                if not os.path.exists(alpha_file):
                    alpha_file = image_file
                alpha = Image.open(alpha_file)
                alpha = alpha.convert('L')
                alpha = alpha.crop((border_crop_x, border_crop_y,
                                    alpha.size[0] - border_crop_x,
                                    alpha.size[1] - border_crop_y))
                alpha = alpha.resize(image.size, resample)
            else:
                alpha = border_image.split()[-1]
                border_image = border_image.convert('RGB')
            if self.border_tint != [1.0, 1.0, 1.0, 1.0]:
                matrix = (self.border_tint[0], 0.0, 0.0, 1.0, 0.0,
                          self.border_tint[1], 0.0, 1.0, 0.0, 0.0,
                          self.border_tint[2], 1.0)
                border_image = border_image.convert('RGB', matrix)

            enhancer = ImageEnhance.Brightness(alpha)
            alpha = enhancer.enhance(self.border_opacity)
            image = Image.composite(border_image, image, alpha)

        return image

    def update_texture(self, image):
        """Saves a PIL image to the visible texture.
        Argument:
            image: A PIL image
        """

        image_bytes = BytesIO()
        image.save(image_bytes, 'jpeg')
        image_bytes.seek(0)
        self._coreimage = CoreImage(image_bytes, ext='jpg')
        self._on_tex_change()

    def get_full_quality(self):
        """Generate a full sized and full quality version of the source image.
        Returns: A PIL image.
        """

        image = self.original_image.copy()
        if not self.video:
            if self.angle != 0:
                if self.angle == 90:
                    image = image.transpose(PIL.Image.ROTATE_90)
                if self.angle == 180:
                    image = image.transpose(PIL.Image.ROTATE_180)
                if self.angle == 270:
                    image = image.transpose(PIL.Image.ROTATE_270)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return image

    def close_image(self):
        self.original_image.close()