예제 #1
0
def ffplay(path):
    global player
    player = MediaPlayer(path)
    time.sleep(0.5)
    while True:
        if int(float(str(player.get_pts())[:3])) - 2 == int(
                float(str(player.get_metadata()['duration'])[:3])) - 2:
            time.sleep(0.5)
            player.toggle_pause()
            player.close_player()
            break
    time.sleep(1)
예제 #2
0
파일: player.py 프로젝트: truongsofm/ycl
def create_player(url):
	music_stream_uri = extract_video_url(url)[0]
	if not music_stream_uri:
		print("Failed to get audio")
		sys.exit(1)
	
	ff_opts = {"vn": True, "sn": True} # only audio

	player = MediaPlayer(music_stream_uri, ff_opts=ff_opts, loglevel='debug')
	
	# refer : https://github.com/kivy/kivy/blob/52d12ebf33e410c9f4798674a93cbd0db8038bf1/kivy/core/audio/audio_ffpyplayer.py#L116
	# method to prevent crash on load - since the stream hasn't been downloaded sufficiently yet 

	player.toggle_pause()
	s = time.perf_counter()
	while (player.get_metadata()['duration'] is None and time.perf_counter() - s < 10.):
		time.sleep(0.005)
	
	return player
class SoundFFPy(Sound):
    @staticmethod
    def extensions():
        return formats_in

    def __init__(self, **kwargs):
        self._ffplayer = None
        self.quitted = False
        self._log_callback_set = False
        self._state = ''
        self.state = 'stop'

        if not get_log_callback():
            set_log_callback(_log_callback)
            self._log_callback_set = True

        super(SoundFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()
        if self._log_callback_set:
            set_log_callback(None)

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':

            def close(*args):
                self.quitted = True
                self.unload()

            Clock.schedule_once(close, 0)
        elif selector == 'eof':
            Clock.schedule_once(self._do_eos, 0)

    def load(self):
        self.unload()
        ff_opts = {'vn': True, 'sn': True}  # only audio
        self._ffplayer = MediaPlayer(self.source,
                                     callback=self._player_callback,
                                     loglevel='info',
                                     ff_opts=ff_opts)
        player = self._ffplayer
        player.set_volume(self.volume)
        player.toggle_pause()
        self._state = 'paused'
        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while ((not player.get_metadata()['duration']) and not self.quitted
               and time.clock() - s < 10.):
            time.sleep(0.005)

    def unload(self):
        if self._ffplayer:
            self._ffplayer = None
        self._state = ''
        self.state = 'stop'
        self.quitted = False

    def play(self):
        if self._state == 'playing':
            super(SoundFFPy, self).play()
            return
        if not self._ffplayer:
            self.load()
        self._ffplayer.toggle_pause()
        self._state = 'playing'
        self.state = 'play'
        super(SoundFFPy, self).play()

    def stop(self):
        if self._ffplayer and self._state == 'playing':
            self._ffplayer.toggle_pause()
            self._state = 'paused'
            self.state = 'stop'
        super(SoundFFPy, self).stop()

    def seek(self, position):
        if self._ffplayer is None:
            return
        self._ffplayer.seek(position, relative=False)

    def get_pos(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def on_volume(self, instance, volume):
        if self._ffplayer is not None:
            self._ffplayer.set_volume(volume)

    def _get_length(self):
        if self._ffplayer is None:
            return super(SoundFFPy, self)._get_length()
        return self._ffplayer.get_metadata()['duration']

    def _do_eos(self, *args):
        if not self.loop:
            self.stop()
        else:
            self.seek(0.)
예제 #4
0
class SoundFFPy(Sound):

    @staticmethod
    def extensions():
        return formats_in

    def __init__(self, **kwargs):
        self._ffplayer = None
        self.quitted = False
        self._log_callback_set = False
        self._state = ''
        self.state = 'stop'
        self._callback_ref = WeakMethod(self._player_callback)

        if not get_log_callback():
            set_log_callback(_log_callback)
            self._log_callback_set = True

        super(SoundFFPy, self).__init__(**kwargs)

    def __del__(self):
        self.unload()
        if self._log_callback_set:
            set_log_callback(None)

    def _player_callback(self, selector, value):
        if self._ffplayer is None:
            return
        if selector == 'quit':
            def close(*args):
                self.quitted = True
                self.unload()
            Clock.schedule_once(close, 0)
        elif selector == 'eof':
            Clock.schedule_once(self._do_eos, 0)

    def load(self):
        self.unload()
        ff_opts = {'vn': True, 'sn': True}  # only audio
        self._ffplayer = MediaPlayer(self.source,
                                     callback=self._callback_ref,
                                     loglevel='info', ff_opts=ff_opts)
        player = self._ffplayer
        player.set_volume(self.volume)
        player.toggle_pause()
        self._state = 'paused'
        # wait until loaded or failed, shouldn't take long, but just to make
        # sure metadata is available.
        s = time.clock()
        while ((not player.get_metadata()['duration'])
               and not self.quitted and time.clock() - s < 10.):
            time.sleep(0.005)

    def unload(self):
        if self._ffplayer:
            self._ffplayer = None
        self._state = ''
        self.state = 'stop'
        self.quitted = False

    def play(self):
        if self._state == 'playing':
            super(SoundFFPy, self).play()
            return
        if not self._ffplayer:
            self.load()
        self._ffplayer.toggle_pause()
        self._state = 'playing'
        self.state = 'play'
        super(SoundFFPy, self).play()

    def stop(self):
        if self._ffplayer and self._state == 'playing':
            self._ffplayer.toggle_pause()
            self._state = 'paused'
            self.state = 'stop'
        super(SoundFFPy, self).stop()

    def seek(self, position):
        if self._ffplayer is None:
            return
        self._ffplayer.seek(position, relative=False)

    def get_pos(self):
        if self._ffplayer is not None:
            return self._ffplayer.get_pts()
        return 0

    def on_volume(self, instance, volume):
        if self._ffplayer is not None:
            self._ffplayer.set_volume(volume)

    def _get_length(self):
        if self._ffplayer is None:
            return super(SoundFFPy, self)._get_length()
        return self._ffplayer.get_metadata()['duration']

    def _do_eos(self, *args):
        if not self.loop:
            self.stop()
        else:
            self.seek(0.)
예제 #5
0
파일: test.py 프로젝트: varung/ffpyplayer
class PlayerApp(App):

    def __init__(self, **kwargs):
        super(PlayerApp, self).__init__(**kwargs)
        self.texture = None
        self.size = (0, 0)
        self.next_frame = None
        self._done = False
        self._lock = RLock()
        self._thread = Thread(target=self._next_frame, name='Next frame')
        self._trigger = Clock.create_trigger(self.redraw)
        self._force_refresh = False

    def build(self):
        self.root = Root()
        return self.root

    def on_start(self):
        self.callback_ref = WeakMethod(self.callback)
        filename = sys.argv[1]
        logging.info('ffpyplayer: Playing file "{}"'.format(filename))
        # try ff_opts = {'vf':'edgedetect'} http://ffmpeg.org/ffmpeg-filters.html
        ff_opts = {}
        self.ffplayer = MediaPlayer(filename, callback=self.callback_ref,
                                    loglevel=log_level, ff_opts=ff_opts)
        self._thread.start()
        self.keyboard = Window.request_keyboard(None, self.root)
        self.keyboard.bind(on_key_down=self.on_keyboard_down)

    def resize(self):
        if self.ffplayer:
            w, h = self.ffplayer.get_metadata()['src_vid_size']
            if not h:
                return
            lock = self._lock
            lock.acquire()
            if self.root.image.width < self.root.image.height * w / float(h):
                self.ffplayer.set_size(-1, self.root.image.height)
            else:
                self.ffplayer.set_size(self.root.image.width, -1)
            lock.release()
            logging.debug('ffpyplayer: Resized video.')

    def update_pts(self, *args):
        if self.ffplayer:
            self.root.seek.value = self.ffplayer.get_pts()

    def on_keyboard_down(self, keyboard, keycode, text, modifiers):
        if not self.ffplayer:
            return False
        lock = self._lock
        ctrl = 'ctrl' in modifiers
        if keycode[1] == 'p' or keycode[1] == 'spacebar':
            logging.info('Toggled pause.')
            self.ffplayer.toggle_pause()
        elif keycode[1] == 'r':
            logging.debug('ffpyplayer: Forcing a refresh.')
            self._force_refresh = True
        elif keycode[1] == 'v':
            logging.debug('ffpyplayer: Changing video stream.')
            lock.acquire()
            self.ffplayer.request_channel('video',
                                          'close' if ctrl else 'cycle')
            lock.release()
            Clock.unschedule(self.update_pts)
            if ctrl:    # need to continue updating pts, since video is disabled.
                Clock.schedule_interval(self.update_pts, 0.05)
        elif keycode[1] == 'a':
            logging.debug('ffpyplayer: Changing audio stream.')
            lock.acquire()
            self.ffplayer.request_channel('audio',
                                          'close' if ctrl else 'cycle')
            lock.release()
        elif keycode[1] == 't':
            logging.debug('ffpyplayer: Changing subtitle stream.')
            lock.acquire()
            self.ffplayer.request_channel('subtitle',
                                          'close' if ctrl else 'cycle')
            lock.release()
        elif keycode[1] == 'right':
            logging.debug('ffpyplayer: Seeking forward by 10s.')
            self.ffplayer.seek(10.)
        elif keycode[1] == 'left':
            logging.debug('ffpyplayer: Seeking back by 10s.')
            self.ffplayer.seek(-10.)
        elif keycode[1] == 'up':
            logging.debug('ffpyplayer: Increasing volume.')
            self.ffplayer.set_volume(self.ffplayer.get_volume() + 0.01)
            self.root.volume.value = self.ffplayer.get_volume()
        elif keycode[1] == 'down':
            logging.debug('ffpyplayer: Decreasing volume.')
            self.ffplayer.set_volume(self.ffplayer.get_volume() - 0.01)
            self.root.volume.value = self.ffplayer.get_volume()
        return True

    def touch_down(self, touch):
        if self.root.seek.collide_point(*touch.pos) and self.ffplayer:
            pts = ((touch.pos[0] - self.root.volume.width) /
            self.root.seek.width * self.ffplayer.get_metadata()['duration'])
            logging.debug('ffpyplayer: Seeking to {}.'.format(pts))
            self.ffplayer.seek(pts, relative=False)
            self._force_refresh = True
            return True
        return False

    def callback(self, selector, value):
        if self.ffplayer is None:
            return
        if selector == 'quit':
            logging.debug('ffpyplayer: Quitting.')
            def close(*args):
                self._done = True
                self.ffplayer = None
            Clock.schedule_once(close, 0)
        # called from internal thread, it typically reads forward
        elif selector == 'display_sub':
            self.display_subtitle(*value)

    def _next_frame(self):
        ffplayer = self.ffplayer
        sleep = time.sleep
        trigger = self._trigger
        while not self._done:
            force = self._force_refresh
            if force:
                self._force_refresh = False
            frame, val = ffplayer.get_frame(force_refresh=force)

            if val == 'eof':
                logging.debug('ffpyplayer: Got eof.')
                sleep(1 / 30.)
            elif val == 'paused':
                logging.debug('ffpyplayer: Got paused.')
                sleep(1 / 30.)
            else:
                if frame:
                    logging.debug('ffpyplayer: Next frame: {}.'.format(val))
                    sleep(val)
                    self.next_frame = frame
                    trigger()
                else:
                    val = val if val else (1 / 30.)
                    logging.debug('ffpyplayer: Schedule next frame check: {}.'
                                  .format(val))
                    sleep(val)

    def redraw(self, dt=0, force_refresh=False):
        if not self.ffplayer:
            return
        if self.next_frame:
            img, pts = self.next_frame
            if img.get_size() != self.size or self.texture is None:
                self.root.image.canvas.remove_group(str(self)+'_display')
                self.texture = Texture.create(size=img.get_size(),
                                              colorfmt='rgb')
                # by adding 'vf':'vflip' to the player initialization ffmpeg
                # will do the flipping
                self.texture.flip_vertical()
                self.texture.add_reload_observer(self.reload_buffer)
                self.size = img.get_size()
                logging.debug('ffpyplayer: Creating new image texture of '
                              'size: {}.'.format(self.size))
            self.texture.blit_buffer(img.to_memoryview()[0])
            self.root.image.texture = None
            self.root.image.texture = self.texture
            self.root.seek.value = pts
            logging.debug('ffpyplayer: Blitted new frame with time: {}.'
                          .format(pts))

        if self.root.seek.value:
            self.root.seek.max = self.ffplayer.get_metadata()['duration']

    def display_subtitle(self, text, fmt, pts, t_start, t_end):
        pass # fmt is text (unformatted), or ass (formatted subs)

    def reload_buffer(self, *args):
        logging.debug('ffpyplayer: Reloading buffer.')
        frame = self.next_frame
        if not frame:
            return
        self.texture.blit_buffer(frame[0].to_memoryview()[0], colorfmt='rgb',
                                 bufferfmt='ubyte')
class Screen(Frame):
    '''
        Screen widget: Embedded video player from local or youtube
    '''
    def __init__(self, parent, *args, **kwargs):
        Frame.__init__(self, parent, bg='black')
        self.settings = {  # Inizialazing dictionary settings
            "width": 1024,
            "height": 576
        }
        self.settings.update(kwargs)  # Changing the default settings
        # Open the video source |temporary
        self.video_source = 'D:\jjh\project\project1_moviesite\avergers.mp4'

        # Canvas where to draw video output
        self.canvas = Canvas(self,
                             width=self.settings['width'],
                             height=self.settings['height'],
                             bg="black",
                             highlightthickness=0)
        self.canvas.pack()

        # Creating VLC player
        self.instance = vlc.Instance()
        self.player = self.instance.media_player_new()

    def update(self):
        '''
            Function: Start the player and keeps drawing the canvas 
        '''
        if not self.vid or not self.aux:  # If Audio or Video is missing stop everything
            self.stop()
            return None

        # Get the frames and if video and audio are running
        ret, frame = self.get_frame()
        audio_frame, val = self.aux.get_frame()

        # Drawing frames on canvas
        if self.fb == 1:  # Check if it's the first cycle, trying to make the audio start with the video
            self.photo = ImageTk.PhotoImage(
                image=Image.fromarray(frame).resize((self.settings['width'],
                                                     self.settings['height'])))
            self.canvas.create_image(0, 0, image=self.photo, anchor='nw')
            self.fb = 0
            self.aux.set_pause(False)  # Starting the audio
        elif ret and val != 'eof':
            self.photo = ImageTk.PhotoImage(
                image=Image.fromarray(frame).resize((self.settings['width'],
                                                     self.settings['height'])))
            self.canvas.create_image(0, 0, image=self.photo, anchor='nw')

        self.after(self.delay,
                   self.update)  # Update for single frame, need to sync

    def get_frame(self):
        '''
            Function: Draws the frames
        '''
        if self.vid.isOpened():
            ret, frame = self.vid.read()
            if ret:
                # Return a boolean success flag and the current frame converted to BGR
                return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            else:
                return (ret, None)

    def youTube(self, ID):
        '''
            Function: Gets the youtube video and starts it 
        '''
        print("(TO REPLACE) : Downloading")
        yt = YouTube("https://www.youtube.com/watch?v=" + ID)
        stream = yt.streams.filter(
            progressive=True).first()  # SEE THE POSSIBLE THINGS TO DOWNLOAD
        stream.download(_path_, 'test')
        print("(TO REPLACE) : Finished")
        self.start(_path_ + '\\test.mp4')

    def start(self, _source):
        '''
            Function: Starts the player when gets input from keyboard(temporal) or Telegram
        '''
        try:  # Stopping player if is already playing for a new video
            self.stop()
        except:
            None

        ff_opts = {'paused': True}  # Audio options
        self.fb = 1  # Setting first cycle

        if _source == 'local':  # Checking which source use
            self.vid = cv2.VideoCapture(self.video_source)
            self.aux = MediaPlayer(self.video_source, ff_opts=ff_opts)
        else:
            self.vid = cv2.VideoCapture(_source)
            self.aux = MediaPlayer(_source, ff_opts=ff_opts)

        if not self.vid.isOpened():
            raise ValueError("Unable to open video source")

        self.update()  # Starting the player

    def stop(self):
        '''
            Function: Release and stop Video and Audio
        '''
        try:  # Stopping video
            self.vid.release()
            self.vid = None
        except:
            pass
        try:  # Stopping audio
            self.aux.toggle_pause()
            self.aux = None
        except:
            pass
        self.canvas.delete('all')  # Resetting canvas

    def __del__(self):
        '''
            Function: Release the video source when the object is destroyed
        '''
        if self.vid.isOpened():
            self.vid.release()
예제 #7
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.flag = True

    def Listadd(self):
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for filelist in f:
                    filelist = filelist.strip()
                    self.list.addItem(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        self.list.addItems(filelists)
        self.Listchanged()

    def Remove(self):
        self.list.takeItem(self.list.currentRow())
        self.Listchanged()

    def Clear(self):
        self.list.clear()
        os.remove('CPlayerlist.txt')

    def Listchanged(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write(self.list.item(i).text() + '\n')

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环')

    def Play(self):
        try:
            if self.flag:
                self.playitem = self.list.currentItem().text()
                self.player = MediaPlayer("%s" % self.playitem)
                self.timer = QTimer()
                self.timer.start(50)
                self.timer.timeout.connect(self.Show)
                self.steptimer = QTimer()
                self.steptimer.start(1000)
                self.steptimer.timeout.connect(self.Step)
                self.flag = False
                self.bplay.setIcon(QIcon(r'img\pause.png'))
                self.bplay.setToolTip('暂停')
            else:
                if self.list.currentItem().text() == self.playitem:
                    self.player.toggle_pause()
                    if self.player.get_pause():
                        self.timer.stop()
                        self.steptimer.stop()
                        self.bplay.setIcon(QIcon(r'img\play.png'))
                        self.bplay.setToolTip('播放')
                    else:
                        self.timer.start()
                        self.steptimer.start()
                        self.bplay.setIcon(QIcon(r'img\pause.png'))
                        self.bplay.setToolTip('暂停')
                else:
                    self.step = 0
                    self.stime.setValue(0)
                    self.playitem = self.list.currentItem().text()
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
        except:
            QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        self.stime.setMaximum(int(self.mediatime))
        mediamin, mediasec = divmod(self.mediatime, 60)
        mediahour, mediamin = divmod(mediamin, 60)
        playmin, playsec = divmod(self.step, 60)
        playhour, playmin = divmod(playmin, 60)
        self.ltime.setText(
            '%02d:%02d:%02d/%02d:%02d:%02d' %
            (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放')
            self.lmedia.setPixmap(QPixmap(''))

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音')
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value())
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音')

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.timer.stop()
                    self.steptimer.stop()
                    self.step = 0
                    self.loop = 1
                    self.flag = True
                    self.stime.setValue(0)
                    self.player.close_player()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放')
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')

    def Fastforward(self):
        self.step += 10
        if self.step >= int(self.mediatime):
            self.stime.setValue(int(self.mediatime))
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')

    def Fastback(self):
        self.step -= 10
        if self.step <= 0:
            self.step = 0
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')
예제 #8
0
class Track:
    def __init__(self, name=None, track_id=None):
        # this block of code determines the attribute to be used to construct the object
        # i.e. wether to construct based on a search term of directly from an ID
        self.__type = None
        for attribute in [name, track_id]:
            if attribute is not None:
                self.__type = [
                    key for key, value in locals().items()
                    if value == attribute
                ][0]
        if self.__type is None:
            raise NoAttributesSupplied

        self.track_id = track_id
        self.title = name
        self.fetch_type = None

        print(':::fetching url')
        html = requests.get("https://www.youtube.com/results?search_query=" +
                            self.title.replace(' ', '+'))
        video = pafy.new(
            re.findall(r"watch\?v=(\S{11})", str(html.content))[0])
        best_stream = video.getbestaudio(preftype="wav", ftypestrict=False)

        self.ext = '.' + best_stream.extension
        self.title = filter_search_term(video.title).strip()
        self.url = best_stream.url
        self.filename = self.title + self.ext
        print('video title:::', filter_search_term(video.title))

        # dont repeat if all the data's been already fetched
        if not self.fetch_type is None:
            return

        if self.__type == 'track_id':
            track = spotify.track(self.track_id)
        elif self.__type == 'name':
            track_search = spotify.search(self.title, type='track', limit=1)
            if len(track_search['tracks']['items']) <= 0:
                print(
                    ':::track not available from spotify, doing a minimal fetch'
                )
                if '-' in self.title:
                    self.artists = [self.title.split('-')[0].strip()]
                else:
                    self.artists = None
                self.__artists_names = None
                self.album = None
                self.track_id = None
                self.genres = None
                self.fetch_type = 'minimal'
                return
            else:
                track = track_search['tracks']['items'][0]

        self.track_id = track['id']

        self.artists = []
        self.__artists_names = []
        for artist in track['artists']:
            self.artists.append(Artist(artist_id=artist['id']))
            self.__artists_names.append(artist['name'])

        self.genres = []
        for artist in self.artists:
            for genre in artist.genres:
                self.genres.append(genre)

        self.album = Album(album_id=track['album']['id'])

        self.fetch_type = 'full'

        print(':::fetched')

    def send_notification(self):
        print(":::sending notif")
        # send notification

        # fetch metadata for the track if it has'nt already been fetched
        if self.fetch_type is None:
            self.fetch_metadata()
        message = ''
        if not self.__artists_names is None:
            message = self.__artists_names
            if len(message) == 1: pass
            elif len(message) == 2: message.insert(1, ' and ')
            else:
                increment = 1
                for i in range(len(message)):
                    if not i == len(message) - 1:
                        insert_index = i + increment
                        message.insert(insert_index, ' and ')
                        increment += insert_index + 1
            message = ''.join(message)
        if not self.album is None:
            message += f'\nfrom album {self.album.name}'
        notification.notify(
            title=self.title,
            message=message,
            app_icon=r'C:\users\gadit\downloads\music_icon0.ico',
            app_name='M E L O D I N E',
            timeout=10,
            toast=False)

    def download(self, custom=None, no_part=True):
        global audio_downloader
        audio_downloader = YoutubeDL({
            #'buffersize': 512,
            #'http_chunk_size': 256,
            'audioformat': 'wav',
            'format': 'bestaudio',
            'outtmpl': self.title + self.ext,
            'extractaudio': True,
            'retries': 5,
            'continuedl': True,
            'nopart': no_part,
            'hls_prefer_native': True,
            'quiet': True
        })
        audio_downloader.extract_info(self.url)

    def play(self):
        self.fetch_metadata()
        self.download(no_part=True)
        print('::: downloaded')
        threading._start_new_thread(self.send_notification, ())

        self.player = MediaPlayer(self.filename)
        time.sleep(0.5)
        print('::: playing')

        last_pts = 0
        updated_pts = 0
        while True:
            updated_pts = int(float(str(self.player.get_pts())[:3])) - 3
            print(':::updated', updated_pts)
            # print(player.get_pts())

            while self.player.get_pause():
                time.sleep(0.5)
            if updated_pts == last_pts:
                self.player.toggle_pause()
                print("---buffered out, pausing")
                time.sleep(1)
                self.player.toggle_pause()
            if int(float(str(self.player.get_pts())[:3])) - 3 == int(
                    float(str(
                        self.player.get_metadata()['duration'])[:3])) - 3:
                print(':::breaking')
                self.player.toggle_pause()
                self.player.close_player()

            last_pts = updated_pts
            time.sleep(1)
        print(':::finished playing')
예제 #9
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.tag = self.flag = self.listtag = self.fulltag = True
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        self.move(int((screen.width() - size.width()) / 2),
                  int((screen.height() - size.height()) / 2))

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_P:
            self.Listhide()
        if event.key() == Qt.Key_T:
            self.Fastback()
        if event.key() == Qt.Key_L:
            self.Loop()
        if event.key() == Qt.Key_Space:
            self.Play()
        if event.key() == Qt.Key_S:
            self.Stop()
        if event.key() == Qt.Key_F:
            self.Full()
        if event.key() == Qt.Key_J:
            self.Fastforward()
        if event.key() == Qt.Key_M:
            self.Mute()
        if event.key() == Qt.Key_A:
            self.svolume.setValue(self.svolume.value() + 1)
        if event.key() == Qt.Key_R:
            self.svolume.setValue(self.svolume.value() - 1)

    def eventFilter(self, sender, event):
        if (event.type() == event.ChildRemoved):
            self.Moved()
        return False

    def Listmenu(self, position):
        lm = QMenu()
        addact = QAction("添加到播放列表", self, triggered=self.Add)
        removeact = QAction("从播放列表移除", self, triggered=self.Remove)
        renameact = QAction('重命名', self, triggered=self.Rename)
        clearact = QAction('清空播放列表', self, triggered=self.Clear)
        saveact = QAction('保存当前播放列表', self, triggered=self.Saved)
        lm.addAction(addact)
        if self.list.itemAt(position):
            lm.addAction(removeact)
            lm.addAction(renameact)
        lm.addAction(clearact)
        lm.addAction(saveact)
        lm.exec_(self.list.mapToGlobal(position))

    def Listadd(self):
        self.l = []
        self.list.installEventFilter(self)
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for i in f:
                    i = i.strip()
                    name = i[0:i.find(',')]
                    filelist = i[i.find(',') + 1:len(i)]
                    self.list.addItem(name)
                    self.l.append(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        for filelist in filelists:
            name = filelist[filelist.rfind('/') + 1:filelist.rfind('.')]
            self.list.addItem(name)
            self.l.append(filelist)

    def Remove(self):
        ltmp = []
        for i in self.list.selectedIndexes():
            ltmp.append(i.row())
        ltmp.sort(reverse=True)
        for j in ltmp:
            self.list.takeItem(j)
            self.l.pop(j)

    def Rename(self):
        item = self.list.item(self.list.currentRow())
        item.setFlags(item.flags() | Qt.ItemIsEditable)
        self.list.editItem(item)

    def Clear(self):
        self.l = []
        self.list.clear()
        if os.path.isfile('CPlayerlist.txt'):
            os.remove('CPlayerlist.txt')

    def Drag(self):
        self.tmp1 = []
        self.tmp2 = self.l[:]
        for i in range(self.list.count()):
            self.tmp1.append(self.list.item(i).text())

    def Moved(self):
        for i in range(self.list.count()):
            if self.list.item(i).text() == self.tmp1[i]:
                continue
            else:
                self.l[i] = self.tmp2[self.tmp1.index(
                    self.list.item(i).text())]

    def Saved(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write('%s,%s\n' % (self.list.item(i).text(), self.l[i]))
        QMessageBox.information(self, '保存', '播放列表保存成功!')

    def Listhide(self):
        if self.listtag:
            self.frame.hide()
            self.listtag = False
        else:
            self.frame.show()
            self.listtag = True

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放,快捷键“l”')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环,快捷键“l”')

    def Play(self):
        if self.flag:
            try:
                self.playitem = self.l[self.list.currentRow()]
                if os.path.isfile("%s" % self.playitem):
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer = QTimer()
                    self.timer.start(50)
                    self.timer.timeout.connect(self.Show)
                    self.steptimer = QTimer()
                    self.steptimer.start(1000)
                    self.steptimer.timeout.connect(self.Step)
                    self.flag = False
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')
            except:
                QMessageBox.warning(self, '错误', '找不到要播放的文件!')
        else:
            if self.l[self.list.currentRow()] == self.playitem:
                self.player.toggle_pause()
                if self.player.get_pause():
                    self.timer.stop()
                    self.steptimer.stop()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
                else:
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
            else:
                self.playitem = self.l[self.list.currentRow()]
                if os.path.isfile("%s" % self.playitem):
                    self.step = 0
                    self.stime.setValue(0)
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        if self.tag:
            self.player.set_volume(self.svolume.value() / 100)
        else:
            self.player.set_volume(0)
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        if self.mediatime:
            self.stime.setMaximum(int(self.mediatime))
            mediamin, mediasec = divmod(self.mediatime, 60)
            mediahour, mediamin = divmod(mediamin, 60)
            playmin, playsec = divmod(self.step, 60)
            playhour, playmin = divmod(playmin, 60)
            self.ltime.setText(
                '%02d:%02d:%02d/%02d:%02d:%02d' %
                (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放,快捷键“Space”')
            self.lmedia.setPixmap(QPixmap(''))

    def Full(self):
        if self.fulltag:
            self.frame.hide()
            self.frame_2.hide()
            self.showFullScreen()
            self.bfull.setIcon(QIcon(r'img\exitfullscreen.png'))
            self.bfull.setToolTip('退出全屏,快捷键“f”')
            self.fulltag = False
        else:
            self.frame.show()
            self.frame_2.show()
            self.showNormal()
            self.bfull.setIcon(QIcon(r'img\expandfullscreen.png'))
            self.bfull.setToolTip('全屏,快捷键“f”')
            self.fulltag = True

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
                self.tag = False
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value() / 100)
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
                self.tag = True

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.stime.setValue(0)
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.Stop()
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        if self.flag == False:
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastforward(self):
        if self.flag == False:
            self.step += 10
            if self.step >= int(self.mediatime):
                self.stime.setValue(int(self.mediatime))
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastback(self):
        if self.flag == False:
            self.step -= 10
            if self.step <= 0:
                self.step = 0
                self.stime.setValue(0)
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')
예제 #10
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.tag = True
        self.flag = True
        self.hidetag = True
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        self.move(int((screen.width() - size.width()) / 2),
                  int((screen.height() - size.height()) / 2))

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_P:
            self.Listhide()
        if event.key() == Qt.Key_T:
            self.Fastback()
        if event.key() == Qt.Key_L:
            self.Loop()
        if event.key() == Qt.Key_Space:
            self.Play()
        if event.key() == Qt.Key_S:
            self.Stop()
        if event.key() == Qt.Key_F:
            self.Full()
        if event.key() == Qt.Key_J:
            self.Fastforward()
        if event.key() == Qt.Key_M:
            self.Mute()
        if event.key() == Qt.Key_A:
            self.svolume.setValue(self.svolume.value() + 1)
        if event.key() == Qt.Key_R:
            self.svolume.setValue(self.svolume.value() - 1)

    def Listadd(self):
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for filelist in f:
                    filelist = filelist.strip()
                    self.list.addItem(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        self.list.addItems(filelists)
        self.Listchanged()

    def Remove(self):
        self.list.takeItem(self.list.currentRow())
        self.Listchanged()

    def Clear(self):
        self.list.clear()
        os.remove('CPlayerlist.txt')

    def Listchanged(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write(self.list.item(i).text() + '\n')

    def Listhide(self):
        if self.hidetag:
            self.frame.hide()
            self.hidetag = False
        else:
            self.frame.show()
            self.hidetag = True

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放,快捷键“l”')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环,快捷键“l”')

    def Play(self):
        if self.flag:
            try:
                self.playitem = self.list.currentItem().text()
                if os.path.isfile("%s" % self.playitem):
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer = QTimer()
                    self.timer.start(50)
                    self.timer.timeout.connect(self.Show)
                    self.steptimer = QTimer()
                    self.steptimer.start(1000)
                    self.steptimer.timeout.connect(self.Step)
                    self.flag = False
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')
            except:
                QMessageBox.warning(self, '错误', '找不到要播放的文件!')
        else:
            if self.list.currentItem().text() == self.playitem:
                self.player.toggle_pause()
                if self.player.get_pause():
                    self.timer.stop()
                    self.steptimer.stop()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
                else:
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
            else:
                self.playitem = self.list.currentItem().text()
                if os.path.isfile("%s" % self.playitem):
                    self.step = 0
                    self.stime.setValue(0)
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        if self.tag:
            self.player.set_volume(self.svolume.value() / 100)
        else:
            self.player.set_volume(0)
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        if self.mediatime:
            self.stime.setMaximum(int(self.mediatime))
            mediamin, mediasec = divmod(self.mediatime, 60)
            mediahour, mediamin = divmod(mediamin, 60)
            playmin, playsec = divmod(self.step, 60)
            playhour, playmin = divmod(playmin, 60)
            self.ltime.setText(
                '%02d:%02d:%02d/%02d:%02d:%02d' %
                (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放,快捷键“Space”')
            self.lmedia.setPixmap(QPixmap(''))

    def Full(self):
        if self.hidetag:
            self.setWindowFlags(Qt.FramelessWindowHint)
            rect = QApplication.desktop().geometry()
            self.setGeometry(rect)
            self.frame.hide()
            self.frame_2.hide()
            self.show()
            self.bfull.setIcon(QIcon(r'img\exitfullscreen.png'))
            self.bfull.setToolTip('退出全屏,快捷键“f”')
            self.hidetag = False
        else:
            self.setWindowFlags(Qt.Widget)
            self.setGeometry(0, 0, 1144, 705)
            self.frame.show()
            self.frame_2.show()
            screen = QDesktopWidget().screenGeometry()
            size = self.geometry()
            self.move(int((screen.width() - size.width()) / 2),
                      int((screen.height() - size.height()) / 2))
            self.show()
            self.bfull.setIcon(QIcon(r'img\expandfullscreen.png'))
            self.bfull.setToolTip('全屏,快捷键“f”')
            self.hidetag = True

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
                self.tag = False
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value() / 100)
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
                self.tag = True

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.timer.stop()
                    self.steptimer.stop()
                    self.step = 0
                    self.loop = 1
                    self.flag = True
                    self.stime.setValue(0)
                    self.player.close_player()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastforward(self):
        if self.flag == False:
            self.step += 10
            if self.step >= int(self.mediatime):
                self.stime.setValue(int(self.mediatime))
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastback(self):
        if self.flag == False:
            self.step -= 10
            if self.step <= 0:
                self.step = 0
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')
예제 #11
0
def process(inputDir):
    cv2.namedWindow("frame", cv2.WINDOW_NORMAL)

    cap = cv2.VideoCapture(os.path.join(inputDir, 'worldCamera.mp4'))
    t2i = Timestamp2Index(os.path.join(inputDir, 'frame_timestamps.tsv'))
    ff_opts = {'vn': False, 'volume': 1.}  #{'sync':'video', 'framedrop':True}
    player = MediaPlayer(os.path.join(inputDir, 'worldCamera.mp4'),
                         ff_opts=ff_opts)
    while player.get_metadata()['src_vid_size'] == (0, 0):
        time.sleep(0.01)
    frame_size = player.get_metadata()['src_vid_size']
    frateInfo = player.get_metadata()['frame_rate']
    frate = float(frateInfo[0]) / frateInfo[1]
    print(frateInfo, frate)
    width = int(frame_size[0])
    height = int(frame_size[1])
    val = ''
    cvImg = np.zeros((height, width, 3))
    print(np.shape(cvImg))

    # Read gaze data
    gazes = {}
    with open(os.path.join(inputDir, 'gazeData_world.tsv'), 'r') as f:
        reader = DictReader(f, delimiter='\t')
        for entry in reader:
            frame_idx = int(float(entry['frame_idx']))
            confidence = float(entry['confidence'])
            try:
                gx = float(entry['norm_pos_x']) * width
                gy = float(entry['norm_pos_y']) * height
                gaze = Gaze(gx, gy, confidence)
                if frame_idx in gazes:
                    gazes[frame_idx].append(gaze)
                else:
                    gazes[frame_idx] = [gaze]
            except Exception as e:
                sys.stderr.write(str(e) + '\n')
                sys.stderr.write('[WARNING] Problematic entry: %s\n' % (entry))

    # Read ground truth and transformation
    gt = {}
    transformation = {}
    with open(os.path.join(inputDir, 'transformations.tsv'), 'r') as f:
        reader = csv.DictReader(f, delimiter='\t')
        for entry in reader:
            frame_idx = int(entry['frame_idx'])

            # ground truth pixel position in undistorted image
            tmp = entry['gt'].split(',')
            gt[frame_idx] = (float(tmp[0]), float(tmp[1]))

    lastIdx = None
    while val != 'eof':
        frame, val = player.get_frame(True)
        if val != 'eof' and frame is not None:
            img, video_pts = frame
            #cvImg = np.reshape(np.asarray(img.to_bytearray()[0]), (height, width, 3)).copy()
            #cvImg = cv2.cvtColor(cvImg, cv2.COLOR_RGB2BGR)
            audio_pts = player.get_pts(
            )  # this is audio_pts because we're in default audio sync mode

            # assumes the frame rate is constant, which is dangerous (frame drops and what not)
            #idx = math.floor(video_pts*frate)

            # the audio is my shepherd and nothing shall I lack :-)
            # From the experience, PROP_POS_MSEC is utterly broken; let's use indexes instead
            idx = t2i.find(
                audio_pts) - 1  # opencv starts at 0; processed data at 1
            idxOffset = cap.get(cv2.CAP_PROP_POS_FRAMES) - idx
            if abs(idxOffset) > 0:
                cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            if lastIdx is None or lastIdx != idx:
                # print(idx,cap.get(cv2.CAP_PROP_FRAME_COUNT))
                ret, cvImg = cap.read()

                if idx in gazes:
                    for gaze in gazes[idx]:
                        gaze.draw(cvImg)

                if idx in gt:
                    x = int(round(gt[idx][0]))
                    y = int(round(gt[idx][1]))
                    cv2.line(cvImg, (x, 0), (x, int(height)), (0, 255, 0), 2)
                    cv2.line(cvImg, (0, y), (int(width), y), (0, 255, 0), 2)

                cv2.rectangle(cvImg, (0, int(height)),
                              (int(0.25 * width), int(height) - 30), (0, 0, 0),
                              -1)
                cv2.putText(cvImg, ("%8.2f [%6d]" % (audio_pts, idx)),
                            (0, int(height) - 5), cv2.FONT_HERSHEY_PLAIN, 2,
                            (0, 255, 255), 2)

                cv2.imshow("frame", cvImg)
                if width > 1280:
                    cv2.resizeWindow('frame', 1280, 720)
                lastIdx = idx

            key = cv2.waitKey(1) & 0xFF
            if key == ord('k'):
                player.seek(audio_pts + 10, relative=False)
            if key == ord('j'):
                player.seek(max(0, audio_pts - 10), relative=False)
            if key == ord('l'):
                player.seek(audio_pts + 5, relative=False)
            if key == ord('h'):
                player.seek(max(0, audio_pts - 5), relative=False)
            if key == ord('p'):
                player.toggle_pause()
            if key == ord('q'):
                break

    cap.release()
예제 #12
0
class PlayerThread(QThread):
    image_sig = pyqtSignal(QtGui.QImage)
    status_sig = pyqtSignal(bool)
    progress_sig = pyqtSignal(float)

    def __init__(self, parent):
        super().__init__(parent)
        self.label = parent.label
        self.image_sig.connect(parent.set_image)
        self.status_sig.connect(parent.set_status)
        self.progress_sig.connect(parent.set_progress)
        self.player = None
        self.duration = None
        self.progress = 0
        self.ratio_mode = Qt.KeepAspectRatio
        self.config = {}

    def set_video_name(self, video_name):
        if self.player is not None:
            self.player.close_player()
        self.player = MediaPlayer(video_name)
        self.status_sig.emit(self.player.get_pause())
        self.start()

    def set_config(self, config):
        self.config = config

    def close(self):
        if self.player is not None:
            self.player.close_player()
        self.quit()

    def pause(self):
        if self.player is not None:
            self.player.set_pause(True)
            self.status_sig.emit(True)

    def toggle_pause(self):
        if self.player is not None:
            self.player.toggle_pause()
            self.status_sig.emit(self.player.get_pause())

    def next_prev(self, is_forward):
        if self.player is not None:
            chunk_position = self.find_chunk(self.progress)
            if is_forward:
                if chunk_position < self.config['total'] - 1:
                    chunk_position += 1
                    self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)
            else:
                if chunk_position > 0:
                    chunk_position -= 1
                self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)

    def find_chunk(self, pts):
        if self.config:
            pts_ms = int(1000 * pts)
            front = 0
            rear = self.config['total'] - 1
            chunks = self.config['chunks']
            while front != rear:
                middle = (front + rear) // 2
                if pts_ms > chunks[middle][0]:
                    if pts_ms < chunks[middle + 1][0]:
                        break
                    else:
                        front = middle + 1
                else:
                    rear = middle
            return (front + rear) // 2
        else:
            return 0

    def seek(self, ratio):
        if self.duration is not None:
            pts = ratio * self.duration
            self.player.seek(pts, relative=False, accurate=False)

    def image_stretch(self, is_stretch):
        if is_stretch:
            self.ratio_mode = Qt.IgnoreAspectRatio
        else:
            self.ratio_mode = Qt.KeepAspectRatio

    def run(self):
        val = ''
        while val != 'eof':
            frame, val = self.player.get_frame()
            if self.duration is None:
                self.duration = self.player.get_metadata()['duration']
            if val != 'eof' and frame is not None:
                img, t = frame
                if img is not None:
                    byte = img.to_bytearray()[0]
                    width, height = img.get_size()
                    convert_to_qt_format = QtGui.QImage(byte, width, height, QImage.Format_RGB888)
                    p = convert_to_qt_format.scaled(self.label.width(), self.label.height(), self.ratio_mode)
                    self.image_sig.emit(p)
                    self.progress = t
                    if self.duration is not None:
                        self.progress_sig.emit(t / self.duration)
                    time.sleep(val)
예제 #13
0
    def _play_thread_run(self):
        process_frame = self.process_frame
        ff_opts = {'sync': 'video', 'an': True, 'sn': True, 'paused': True}

        ifmt, icodec = self.file_fmt, self.icodec
        use_dshow = self.use_dshow
        if ifmt:
            ff_opts['f'] = ifmt
        if use_dshow:
            ff_opts['f'] = 'dshow'
        if icodec:
            ff_opts['vcodec'] = icodec

        ipix_fmt, iw, ih, _ = self.metadata_play
        ff_opts['x'] = iw
        ff_opts['y'] = ih

        lib_opts = {}
        if use_dshow:
            rate = self.dshow_rate
            if self.dshow_opt:
                fmt, size, (rmin, rmax) = self.parse_dshow_opt(self.dshow_opt)
                lib_opts['pixel_format'] = fmt
                lib_opts['video_size'] = '{}x{}'.format(*size)
                if rate:
                    rate = min(max(rate, rmin), rmax)
                    lib_opts['framerate'] = '{}'.format(rate)
            elif rate:
                lib_opts['framerate'] = '{}'.format(rate)

        fname = self.play_filename
        if use_dshow:
            fname = 'video={}'.format(self.dshow_true_filename)

        ffplayer = MediaPlayer(fname,
                               callback=self.player_callback,
                               ff_opts=ff_opts,
                               lib_opts=lib_opts)

        # wait for media to init pixel fmt
        src_fmt = ''
        s = clock()
        while self.play_state == 'starting' and clock() - s < 5.:
            src_fmt = ffplayer.get_metadata().get('src_pix_fmt')
            if src_fmt:
                break
            time.sleep(0.01)

        if not src_fmt:
            raise ValueError("Player failed, couldn't get pixel type")

        if ipix_fmt:
            src_fmt = ipix_fmt
        fmt = {
            'gray': 'gray',
            'rgb24': 'rgb24',
            'bgr24': 'rgb24',
            'rgba': 'rgba',
            'bgra': 'rgba'
        }.get(src_fmt, 'yuv420p')
        ffplayer.set_output_pix_fmt(fmt)

        ffplayer.toggle_pause()
        Logger.info('FFmpeg Player: input, output formats are: {}, {}'.format(
            src_fmt, fmt))

        # wait for first frame
        img = None
        s = clock()
        ivl_start = None
        while self.play_state == 'starting' and clock() - s < 5.:
            img, val = ffplayer.get_frame()
            if val == 'eof':
                raise ValueError("Player failed, reached eof")

            if img:
                ivl_start = clock()
                break
            time.sleep(0.01)

        rate = ffplayer.get_metadata().get('frame_rate')
        if rate == (0, 0) or not rate or not rate[1]:
            raise ValueError("Player failed, couldn't read frame rate")

        if not img:
            raise ValueError("Player failed, couldn't read frame")

        # ready to start
        rate = rate[0] / float(rate[1])
        w, h = img[0].get_size()
        fmt = img[0].get_pixel_format()
        use_rt = self.use_real_time

        Clock.schedule_once(
            partial(eat_first,
                    self.update_metadata,
                    rate=rate,
                    w=w,
                    h=h,
                    fmt=fmt), 0)
        Clock.schedule_once(self.complete_start)

        # started
        process_frame(img[0], {'t': ivl_start if use_rt else img[1]})

        min_sleep = 1 / (rate * 8.)
        self.setattr_in_kivy_thread('ts_play', ivl_start)
        self.setattr_in_kivy_thread('frames_played', 1)
        count = 1

        while self.play_state != 'stopping':
            img, val = ffplayer.get_frame()
            ivl_end = clock()

            if ivl_end - ivl_start >= 1.:
                real_rate = count / (ivl_end - ivl_start)
                self.setattr_in_kivy_thread('real_rate', real_rate)
                count = 0
                ivl_start = ivl_end

            if val == 'paused':
                raise ValueError("Player {} got {}".format(self, val))
            if val == 'eof':
                break

            if not img:
                time.sleep(min(val, min_sleep) if val else min_sleep)
                continue
            elif val:
                ts = clock()
                leftover = val
                while leftover > min_sleep and \
                        self.play_state != 'stopping':
                    time.sleep(min_sleep)
                    leftover = max(val - (clock() - ts), 0)

            count += 1
            self.increment_in_kivy_thread('frames_played')
            process_frame(img[0], {'t': ivl_end if use_rt else img[1]})
예제 #14
0
class VideoPlayer:
    def __init__(self, video, trackbar_name, window_name):
        self.cur_frame = 0
        self.src = video.src
        self.video = video
        self.audio = MediaPlayer(video.src)
        self.frame_max = video.frame_max
        self.trackbar = trackbar_name
        self.window = window_name
        self.ostream = self.init_ostream()
        self.queue = Queue(maxsize=_G.MaxQueueSize)
        self.FLAG_CODEC_STOP = False
        cv2.namedWindow(self.window)
        cv2.createTrackbar(self.trackbar, self.window, 0, self.frame_max,
                           self.set_current_frame)

    def init_ostream(self):
        fname = make_out_filename(self.video.src)
        fourcc = cv2.VideoWriter_fourcc(*_G.VideoCodec)
        _fps = self.video.fps
        _res = (_G.CanvasWidth, _G.CanvasHeight)
        return cv2.VideoWriter(fname, fourcc, _fps, _res)

    def set_current_frame(self, n):
        # self.cur_frame = n
        pass

    def set_audio_frame(self, n):
        t = self.video.frame2timestamp(n)
        self.audio.seek(t, False)

    def start(self):
        self.codec_t = Thread(target=self.update_codec)
        self.codec_t.daemon = True
        self.codec_t.start()
        _t = Thread(target=self.extract_audio)
        _t.start()
        return self

    def extract_audio(self):
        fname = make_audio_filename(self.src)
        if not os.path.exists(fname):
            v = mp.VideoFileClip(self.src)
            v.audio.write_audiofile(fname)

    def update_codec(self):
        while not self.FLAG_CODEC_STOP:
            if not self.queue.full():
                ret, frame = self.video.read()
                if not ret:
                    self.FLAG_CODEC_STOP = True
                    return
                frame = self.make_frame(frame)
                self.queue.put(frame)
        print("Codec Ended")

    def frame_available(self):
        return self.queue.qsize() > 0

    def get_frame(self):
        return self.queue.get()

    def update(self):
        self.update_frame()
        self.update_input()

    def update_frame(self):
        if self.is_ended() or _G.FLAG_PAUSE:
            return

        cv2.setTrackbarPos(self.trackbar, self.window, self.cur_frame)

        frame = self.get_frame()
        if frame is None:
            return

        cv2.imshow(self.window, frame)
        # print(f"qsize={self.queue.qsize()}")
        self.ostream.write(frame)

        if not _G.FLAG_PAUSE:
            self.cur_frame += 1

    def update_input(self):
        key = cv2.waitKey(_G.UPS)
        if key == _G.VK_ESC:
            _G.FLAG_STOP = True
        elif key == _G.VK_SPACE:
            _G.FLAG_PAUSE ^= True
            self.audio.toggle_pause()

    def is_ended(self):
        return self.cur_frame >= self.frame_max

    def make_audio_window(self):
        window, val = self.audio.get_frame()
        if window is None or val == 'eof':
            return (None, None)
        return window

    def make_frame(self, frame):
        canvas = np.zeros((_G.CanvasHeight, _G.CanvasWidth, 3), np.uint8)

        mx, my = _G.CanvasWidth // 2, _G.CanvasHeight // 2
        frame = cv2.resize(frame, (mx, my))

        frame2 = filter.greyscale(frame)
        frame3 = filter.sharpen(frame)
        frame4 = filter.inverted(frame)

        canvas[0:frame.shape[0], 0:frame.shape[1]] += frame
        canvas[0:frame.shape[0], mx:mx + frame.shape[1]] += frame2
        canvas[my:my + frame.shape[0], 0:frame.shape[1]] += frame3
        canvas[my:my + frame.shape[0], mx:mx + frame.shape[1]] += frame4
        return canvas
예제 #15
0
class VideoPlayer:

    AudioSyncInterval = 60
    THREAD_MSG_CLEAR = "_th_clr_"
    THREAD_MSG_VSEEK = "_th_vseek_"

    def __init__(self, video, trackbar_name, window_name, **kwargs):
        self.cur_frame = 0
        self.dframe_cnt = 0
        self.audio_frame = 0
        self.last_vframe = -1  # last video frame
        self.last_aframe = -1  # last audio frame
        self.video = video
        self.audio = None
        self.audio_sync_interval = VideoPlayer.AudioSyncInterval
        if video:
            self.src = video.src
            self.frame_max = video.frame_max
        else:
            self.frame_max = kwargs.get('fmax')
        self.trackbar = trackbar_name
        self.window = window_name
        self.dqueue = Queue(maxsize=_G.MaxQueueSize)
        self.equeue = Queue()
        self.FLAG_ENCODE_STOP = not kwargs.get('output')
        self.FLAG_DECODE_STOP = False
        self.FLAG_LOCK = False
        self.thread_msg = None

        if self.FLAG_ENCODE_STOP:
            print(f"Ostream closed for {window_name}")

        mkframe = kwargs.get('make_frame')
        if not mkframe:
            mkframe = lambda f, t: f
        self.make_frame = mkframe
        cv2.namedWindow(self.window)
        cv2.createTrackbar(self.trackbar, self.window, 0, self.frame_max,
                           self.set_next_frame)

    def init_ostream(self):
        if not self.video:
            print("No video loaded for {self}")
            return
        fname = out_filename(_G.StreamFileIndex)
        fourcc = cv2.VideoWriter_fourcc(*_G.VideoCodec)
        _fps = self.video.fps
        _res = (_G.CanvasWidth, _G.CanvasHeight)
        return cv2.VideoWriter(fname, fourcc, _fps, _res)

    def set_next_frame(self, n):
        if not self.video:
            return
        if not self.FLAG_ENCODE_STOP:
            print("Cannot jump a encoding video")
            return
        if abs(n - self.cur_frame) >= _G.AutoSyncThreshold:
            self.jump_to_frame(n)

    def set_audio_frame(self, n):
        t = self.video.frame2timestamp(n)
        print(f"Sync f={n}, t={t}")
        self.audio.seek(t, False, accurate=True)

    def jump_to_frame(self, n):
        print(f"Jumping to frame {n}")
        self.lock_threads()
        self.cur_frame = n
        self.dframe_cnt = n
        self.clear_queue()
        self.seek_video(n)
        self.sync_audio_channel()
        self.unlock_threads()
        ori_pause_stat = _G.FLAG_PAUSE
        # fiber = self.wait_until_safe2play()
        # while fiber:
        #   fiber, _ = _G.resume(fiber)
        #   time.sleep(_G.UPS)
        self.pause(ori_pause_stat)

    def sync_audio_channel(self):
        if self.audio:
            self.set_audio_frame(self.cur_frame)

    def start(self):
        dth = Thread(target=self.update_decode, daemon=True)
        dth.start()
        if not self.FLAG_ENCODE_STOP:
            eth = Thread(target=self.update_encode, daemon=True)
            eth.start()
        if self.video:
            ath = Thread(target=self.extract_audio, daemon=True)
            ath.start()
            while not self.audio:
                time.sleep(_G.UPS)
            sth = Thread(target=self.update_synchronzation, daemon=True)
            sth.start()
        return self

    def extract_audio(self):
        fname = _G.FullAudioFilename
        if not os.path.exists(fname):
            v = mp.VideoFileClip(self.src)
            v.audio.write_audiofile(fname)
        self.audio = MediaPlayer(_G.FullAudioFilename)
        self.audio.toggle_pause()
        print("Audio loaded")

    def process_thread_message(self):
        if self.thread_msg == VideoPlayer.THREAD_MSG_CLEAR:
            print("Clear queue")
            self.dqueue.queue.clear()
        elif self.THREAD_MSG_VSEEK == VideoPlayer.THREAD_MSG_VSEEK:
            print("Seek to", self.thread_args[0])
            if self.video:
                self.video.set(cv2.CAP_PROP_POS_FRAMES, self.thread_args[0])

    def update_decode(self):
        # current framt count in decoding
        self.dframe_cnt = 0
        while not self.FLAG_DECODE_STOP:
            time.sleep(_G.UPS)

            if self.thread_msg:
                self.process_thread_message()
                self.thread_msg = None
                self.thread_args = None

            if self.FLAG_LOCK:
                pass

            if not self.dqueue.full():
                frame = None
                if self.video:
                    ret, frame = self.video.read()
                    if not ret:
                        self.FLAG_DECODE_STOP = True
                        return
                frame = self.make_frame(frame, self.dframe_cnt)
                self.dqueue.put(frame)
                self.dframe_cnt += 1
        print("Decode Ended")

    def update_encode(self):
        ostream = self.init_ostream()
        while not self.FLAG_ENCODE_STOP:
            if not self.equeue.empty():
                ostream.write(self.equeue.get())
            time.sleep(_G.SubThreadUPS)

    def update_synchronzation(self):
        while not _G.FLAG_STOP:
            time.sleep(_G.SubThreadUPS)
            if self.FLAG_LOCK:
                pass
            aframe = self.audio.get_pts() * self.video.fps
            if abs(aframe - self.cur_frame) > _G.AutoSyncThreshold:
                self.sync_audio_channel()
                print(f"Auto Synced: {aframe} > {self.cur_frame}")

    def frame_available(self):
        if self.FLAG_LOCK:
            return False
        return self.dqueue.qsize() > 0

    def get_frame(self):
        if self.FLAG_LOCK:
            return None
        if self.dqueue.empty():
            return None
        return self.dqueue.get()

    def get_audio_frame(self):
        return int(self.audio.get_pts() * self.video.fps)

    def write_async_ostream(self, frame):
        self.equeue.put(frame)

    def update(self):
        frame_synced = self.update_frame()
        self.update_input()
        self.update_flags()
        return frame_synced

    def update_frame(self):
        if self.audio:
            self.audio_frame = self.get_audio_frame()
            if self.audio_frame == self.last_aframe:
                return False
            self.last_aframe = self.audio_frame

        if self.is_ended() or _G.FLAG_PAUSE:
            return False

        cv2.setTrackbarPos(self.trackbar, self.window, self.cur_frame)

        frame = self.get_frame()
        if frame is None:
            return False

        cv2.imshow(self.window, frame)
        if not self.FLAG_ENCODE_STOP:
            self.write_async_ostream(frame)

        if not _G.FLAG_PAUSE:
            self.last_vframe = self.cur_frame
            self.cur_frame += 1
        return True

    def update_flags(self):
        if self.is_ended() and not self.equeue.empty():
            self.FLAG_ENCODE_STOP = True

    def update_input(self):
        key = cv2.waitKey(_G.UPMS)
        if key == _G.VK_ESC:
            self.stop()
        elif key == _G.VK_SPACE:
            self.pause(_G.FLAG_PAUSE ^ True)
        elif key == _G.VK_S or key == _G.VK_s:
            print(
                f"e/d queue size={self.equeue.qsize()}/{self.dqueue.qsize()}")

    def stop(self):
        _G.FLAG_STOP = True

    def pause(self, flg):
        if _G.FLAG_PAUSE != flg:
            self.audio.toggle_pause()
        _G.FLAG_PAUSE = flg
        self.sync_audio_channel()

    def is_ended(self):
        return self.cur_frame >= self.frame_max

    def make_audio_window(self):
        window, val = self.audio.get_frame()
        if window is None or val == 'eof':
            return (None, None)
        return window

    # Should be called from main thread to wait
    def wait_until_safe2play(self):
        while self.dqueue.qsize() < _G.MaxQueueSize // 2:
            self.pause(True)
            yield

    def clear_queue(self):
        self.send_thread_message(VideoPlayer.THREAD_MSG_CLEAR)

    def lock_threads(self):
        self.FLAG_LOCK = True
        time.sleep(_G.UPS * 2)

    def unlock_threads(self):
        self.FLAG_LOCK = False
        time.sleep(_G.UPS * 2)

    def seek_video(self, n):
        self.send_thread_message(VideoPlayer.THREAD_MSG_VSEEK, n)

    def send_thread_message(self, msg, *args):
        while self.thread_msg:
            time.sleep(_G.UPS)
        self.thread_msg = msg
        self.thread_args = args
예제 #16
0
class VideoStream:
    def __init__(self, video_source=None):
        ff_opts = {'paused': True, 'autoexit': False}  # Audio options
        self.video_surce = video_source
        # Open the video source
        self.player = MediaPlayer(video_source, ff_opts=ff_opts)
        # TODO: colocar pausa de tiempo para cargas mediaplayer y obtener los datos
        # conseguir el frame rate para la sincronizacion self.dalay
        while self.player.get_metadata()['src_vid_size'] == (0, 0):
            time.sleep(0.01)
        data = self.player.get_metadata()
        print('data -->', data)
        self.f_rate = data['frame_rate']
        print('delay -> ', self.f_rate)
        self.w, self.h = data['src_vid_size']
        print('WxH -> ', self.w, self.h)
        self.pts = self.player.get_pts(
        )  # Returns the elapsed play time. float
        print('pts ->', self.pts)
        self.duration = data['duration']
        print('duration', self.duration)
        self.pause = self.player.get_pause(
        )  # Returns whether the player is paused.
        print('pause ->', self.pause)
        self.volume = self.player.get_volume(
        )  # Returns the volume of the audio. loat: A value between 0.0 - 1.0
        print('volume ->', self.volume)
        self.player.toggle_pause(
        )  # Toggles -alterna- the player’s pause state
        # self.player.set_pause(False) # auses or un-pauses the file. state: bool
        cond = True
        while cond:
            self.l_frame, self.val = self.player.get_frame()
            if self.val == 'eof':
                print('can not open source: ', video_source)
                break
            elif self.l_frame is None:
                time.sleep(0.01)
            else:
                self._imagen, self.pts = self.l_frame
                print('pts ->', self.pts)
                # arr = self._imagen.to_memoryview()[0] # array image
                # self.imagen = Image.frombytes("RGB", self.original_size, arr.memview)
                # self.imagen.show()
                cond = False

    # propierties.
    @property
    def f_rate(self):
        return self.__f_rate

    @f_rate.setter
    def f_rate(self, val):
        import math
        vn = val[0]
        vd = val[1]
        if vd <= 1:
            self.__f_rate = vn
        elif vd > 1:
            self.__f_rate = int(round(vn / vd))
        else:
            self.__f_rate = 30

    # end properties.

    def get_frame(self):
        '''
        Return valores:
            val : 'eof' or 'pause' 
            pts : time location aduio imagen.
            imagen : frame image
        Return (val, t, imagen)
        '''
        self.l_frame, self.val = self.player.get_frame()
        if self.val == 'eof':
            # condicion final fichero, salimos if and while
            # self.player.toggle_pause() # ponemos en pause
            return self.val, None, None
        elif self.l_frame is None:
            time.sleep(0.01)
            return self.val, None, None
        else:
            # import math
            self._imagen, self.pts = self.l_frame
            return self.val, self.pts, self._imagen
            # w, h = self._imagen.get_size()
            # linesize = [int(math.ceil(w * 3 / 32.) * 32)]
            # self._imagen = pic.Image(plane_buffers=[bytes(b' ') * (h * linesize[0])],
            #             pix_fmt=self._imagen.get_pixel_format(), size=(w, h), linesize=linesize)
            # self._imagen.get_linesizes(keep_align=True)

            # if self.new_size is not None:
            #     sws = None
            #     n_w , n_h = self.new_size
            #     if n_w > n_h:
            #         sws = pic.SWScale(w, h, self._imagen.get_pixel_format(), oh=n_h)
            #     else:
            #         sws = pic.SWScale(w, h, self._imagen.get_pixel_format(), ow=n_w)
            #     self._imagen = sws.scale(self._imagen)

            # size = self._imagen.get_size()
            # arr = self._imagen.to_memoryview()[0] # array image
            # self.imagen = Image.frombytes("RGB", size, arr.memview)
            # print('>>> videostream::get_frame()::self.pts ->', self.pts)

    def toggle_pause(self):
        '''
            Function: toggle_pause
        '''
        try:  # Stopping audio
            self.player.toggle_pause()
            # self.player = None
        except:
            pass

    def seek(self, pts=None, relative=False, accurate=False):
        if not pts:
            return
        self.player.seek(pts, relative=False, accurate=False)

    def snapshot(self, road=None):
        '''
        get current frame
        '''
        img = self.l_frame[0]
        if img is not None:
            size = img.get_size()
            arr = img.to_memoryview()[0]  # array image
            img = Image.frombytes("RGB", size, arr.memview)
            # vamos a guardar esto.
            time_str = time.strftime("%d-%m-%Y-%H-%M-%S")
            frame_name = f"frame-{time_str}.jpg"
            if not road:
                ruta = os.path.dirname(self.video_surce)
                name_out = os.path.join(ruta, frame_name)
            else:
                name_out = os.path.join(road, frame_name)
            img.save(name_out)

    # Release the video source when the object is destroyed
    def __del__(self):
        self.player.close_player()
        print('__del__')