def test_play(self): from .common import get_media from ffpyplayer.player import MediaPlayer import time error = [None, ] def callback(selector, value): if selector.endswith('error'): error[0] = selector, value # only video ff_opts={'an':True, 'sync':'video'} player = MediaPlayer(get_media('dw11222.mp4'), callback=callback, ff_opts=ff_opts) while not error[0]: frame, val = player.get_frame() if val == 'eof': break elif frame is None: time.sleep(0.01) else: img, t = frame if error[0]: raise Exception('{}: {}'.format(*error[0]))
def PlayVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
class MainWindow(QMainWindow): pass def __init__(self): super().__init__() self.player = None self.setWindowTitle("FFPyPlayer Test") def showEvent(self, e): self.timer_id = self.startTimer(1) self.lbl = QLabel(self) self.lbl.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter) self.setCentralWidget(self.lbl) def timerEvent(self, event) -> None: self.killTimer(self.timer_id) ff_opts = {'paused': False, 'autoexit': True} self.player = MediaPlayer('../example_data/sample.mp4', ff_opts=ff_opts, lib_opts={}) # self.player = MediaPlayer('http://localhost:1441/sample_stream.mp4', ff_opts=ff_opts, lib_opts={}) self.running = True while self.running: time.sleep(0.01) frame, val = self.player.get_frame() if val == 'eof': break if frame is None: time.sleep(0.01) else: img, t = frame data = img.to_bytearray()[0] width, height = img.get_size() # the technical name for the 'rgb24' default pixel format is RGB888, # which is QImage.Format_RGB888 in the QImage format enum qimage = QImage(data, width, height, QImage.Format_RGB888) pixmap = QPixmap.fromImage(qimage) pixmap = pixmap.scaled(self.lbl.width(), self.lbl.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.lbl.setPixmap(pixmap) time.sleep(val) QApplication.processEvents() def closeEvent(self, event) -> None: self.running = False if self.player is not None: self.player.set_pause(True) self.player.close_player()
def verify_frames(filename, timestamps, frame_vals=None): from ffpyplayer.player import MediaPlayer error = [ None, ] def callback(selector, value): if selector.endswith('error'): error[0] = selector, value player = MediaPlayer(filename, callback=callback) read_timestamps = set() try: i = -1 while not error[0]: frame, val = player.get_frame() if val == 'eof': break if val == 'paused': raise ValueError('Got paused') elif frame is None: time.sleep(0.01) else: img, t = frame print(i, t) if i < 0: i += 1 continue print(i, t, timestamps[i]) read_timestamps.add(t) assert math.isclose(t, timestamps[i], rel_tol=.1) if frame_vals: assert frame_vals[i] == img.to_bytearray()[0][0] i += 1 finally: player.close_player() if error[0] is not None: raise Exception('{}: {}'.format(*error[0])) assert len(timestamps) - 1 == i assert len(read_timestamps) == i
def open_file1(self): self.pause = False self.filename = str(Path().absolute()) + '/ems.mp4' print(self.filename) # Open the video file self.cap = cv2.VideoCapture(self.filename) self.player = MediaPlayer(self.filename) # self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) # self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) self.width = 1000 self.height = 500 self.canvas.config(width=self.width, height=self.height)
def connect(self): if self.needStop: return if self.url is None: print('need url for connect') return ecall('adb forward tcp:50000 tcp:50000') lib_opts = {'analyzeduration': '32', 'flags': 'low_delay'} if self.player: self.player.close_player() self.player = None self.player = MediaPlayer(self.url, callback=self._mediaPlayerCallback, lib_opts=lib_opts) self.connectedTimer = Timer(0.1, self._processConnectResult) self.connectedTimer.start()
def connect(self): if self.needStop: return if self.url is None: print('need url for connect') return ecall(adb_path() + ' forward tcp:' + self.port + ' tcp:' + self.port) lib_opts = {'analyzeduration': '32', 'flags': 'low_delay'} if self.player: print("monitor try reconnect!") self.player.close_player() self.player = None self.player = MediaPlayer(self.url, callback=self._mediaPlayerCallback, lib_opts=lib_opts) self.connectedTimer = Timer(0.1, self._processConnectResult) self.connectedTimer.start()
def PlayVideo(video_path, H, W): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) fps = video.get(cv2.CAP_PROP_FPS) sleep_ms = int(np.round((1 / fps) * 1000)) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: break if cv2.waitKey(30) & 0xFF == ord("q"): break cv2.imshow("Game", cv2.resize(frame, (W, H))) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
def __init__(self): self.filename = 'frozen.mp4' self.player = MediaPlayer(self.filename, loop=0) while True: frame, val = self.player.get_frame() if frame: img, t = frame size = img.get_size() break self.w = size[0] self.h = size[1] self.tex = gl.glGenTextures(1) self.n = 0 super(video, self).__init__()
def play_next_step(): if NEXT_STEPS: f = NEXT_STEPS.pop(0) # Call the first function and remove it from the FIFO scenario.display_good_feedback() song = MediaPlayer(CORRECT_SOUND) sleep(1) f() else: print("Trying to call a next step but there is none", file=sys.stderr, flush=True)
def Play(self): if self.flag: try: self.playitem = self.l[self.list.currentRow()] if os.path.isfile("%s" % self.playitem): self.player = MediaPlayer("%s" % self.playitem) self.timer = QTimer() self.timer.start(50) self.timer.timeout.connect(self.Show) self.steptimer = QTimer() self.steptimer.start(1000) self.steptimer.timeout.connect(self.Step) self.flag = False self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停,快捷键“Space”') else: QMessageBox.warning(self, '错误', '找不到要播放的文件!') except: QMessageBox.warning(self, '错误', '找不到要播放的文件!') else: if self.l[self.list.currentRow()] == self.playitem: self.player.toggle_pause() if self.player.get_pause(): self.timer.stop() self.steptimer.stop() self.bplay.setIcon(QIcon(r'img\play.png')) self.bplay.setToolTip('播放,快捷键“Space”') else: self.timer.start() self.steptimer.start() self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停,快捷键“Space”') else: self.playitem = self.l[self.list.currentRow()] if os.path.isfile("%s" % self.playitem): self.step = 0 self.stime.setValue(0) self.player = MediaPlayer("%s" % self.playitem) self.timer.start() self.steptimer.start() self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停,快捷键“Space”') else: QMessageBox.warning(self, '错误', '找不到要播放的文件!')
def __update(self): """ Read and add frame into the queue """ player = MediaPlayer(self.video_path) if self.play_audio else None # keep looping infinitely while True: # if the thread indicator variable is set, stop the # thread if self.stopped: break # otherwise, ensure the queue has room in it if not self.Q.full(): # read the next frame from the file (grabbed, frame) = self.stream.read() # if the `grabbed` boolean is `False`, then we have # reached the end of the video file if not grabbed: self.stopped = True # if there are transforms to be done, might as well # do them on producer thread before handing back to # consumer thread. ie. Usually the producer is so far # ahead of consumer that we have time to spare. # # Python is not parallel but the transform operations # are usually OpenCV native so release the GIL. # # Really just trying to avoid spinning up additional # native threads and overheads of additional # producer/consumer queues since this one was generally # idle grabbing frames. if self.transform: frame = self.transform(frame) # add the frame to the queue self.Q.put(frame) else: time.sleep(0.1) # Rest for 10ms, we have a full queue if player is not None: player.close_player() self.stream.release()
def PlayVideo(self): video = cv2.VideoCapture(self.video_path) player = MediaPlayer(self.video_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() with open('temp1.txt') as fp1: data = fp1.read() try: # Reading data from file2 with open('temp2.txt') as fp2: data2 = fp2.read() except Exception as ex: #print(ex) continue # Merging 2 files # To add the data of file2 # from next line data += "\n" data += data2 word_count, word_count1 = self.processText(data) for indx, (key, value) in enumerate(word_count): content = key + ' ' + str(value) indx = indx + 1 frame = cv2.putText(frame, content, (50, indx * 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA) if not grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame #print(img, t) #fileText.close() fp1.close() fp2.close() video.release() cv2.destroyAllWindows()
def __init__(self, video_source=None): ff_opts = {'paused': True, 'autoexit': False} # Audio options self.video_surce = video_source # Open the video source self.player = MediaPlayer(video_source, ff_opts=ff_opts) # TODO: colocar pausa de tiempo para cargas mediaplayer y obtener los datos # conseguir el frame rate para la sincronizacion self.dalay while self.player.get_metadata()['src_vid_size'] == (0, 0): time.sleep(0.01) data = self.player.get_metadata() print('data -->', data) self.f_rate = data['frame_rate'] print('delay -> ', self.f_rate) self.w, self.h = data['src_vid_size'] print('WxH -> ', self.w, self.h) self.pts = self.player.get_pts( ) # Returns the elapsed play time. float print('pts ->', self.pts) self.duration = data['duration'] print('duration', self.duration) self.pause = self.player.get_pause( ) # Returns whether the player is paused. print('pause ->', self.pause) self.volume = self.player.get_volume( ) # Returns the volume of the audio. loat: A value between 0.0 - 1.0 print('volume ->', self.volume) self.player.toggle_pause( ) # Toggles -alterna- the player’s pause state # self.player.set_pause(False) # auses or un-pauses the file. state: bool cond = True while cond: self.l_frame, self.val = self.player.get_frame() if self.val == 'eof': print('can not open source: ', video_source) break elif self.l_frame is None: time.sleep(0.01) else: self._imagen, self.pts = self.l_frame print('pts ->', self.pts) # arr = self._imagen.to_memoryview()[0] # array image # self.imagen = Image.frombytes("RGB", self.original_size, arr.memview) # self.imagen.show() cond = False
def ffplay(path): global player player = MediaPlayer(path) time.sleep(0.5) while True: if int(float(str(player.get_pts())[:3])) - 2 == int( float(str(player.get_metadata()['duration'])[:3])) - 2: time.sleep(0.5) player.toggle_pause() player.close_player() break time.sleep(1)
def play(self): if self._ffplayer and self._state == 'paused': self._ffplayer.toggle_pause() self._state = 'playing' return self.load() self._out_fmt = 'rgba' ff_opts = {'paused': True, 'out_fmt': self._out_fmt} self._ffplayer = MediaPlayer(self._filename, callback=self._player_callback, thread_lib='SDL', loglevel='info', ff_opts=ff_opts) self._ffplayer.set_volume(self._volume) self._thread = Thread(target=self._next_frame_run, name='Next frame') self._thread.daemon = True self._thread.start()
def start_videostream(self): #Start new instance of player if self.video_player: self.video_player.close_player() cVol = float(self.curVolume) / 100 print(cVol) self.video_player = MediaPlayer(self.video_folder.get() + "\\" + self.playlist[self.selected_video], ff_opts={ 'paused': True, 'volume': 0.03 }) self.video_player.set_size(400, 200) #while not self.video_player: # continue time.sleep(0.1) if self.video_player: self.video_player.set_volume(cVol) self.video_player.set_pause(False)
def create_player(url): music_stream_uri = extract_video_url(url)[0] if not music_stream_uri: print("Failed to get audio") sys.exit(1) ff_opts = {"vn": True, "sn": True} # only audio player = MediaPlayer(music_stream_uri, ff_opts=ff_opts, loglevel='debug') # refer : https://github.com/kivy/kivy/blob/52d12ebf33e410c9f4798674a93cbd0db8038bf1/kivy/core/audio/audio_ffpyplayer.py#L116 # method to prevent crash on load - since the stream hasn't been downloaded sufficiently yet player.toggle_pause() s = time.perf_counter() while (player.get_metadata()['duration'] is None and time.perf_counter() - s < 10.): time.sleep(0.005) return player
def playVideo(): video_path = '/home/ubuntu/FobiaPhilter/ActionFiles/videoConstruct1.mp4' audio_path = "/home/ubuntu/FobiaPhilter/audio.mp3" video = cv2.VideoCapture(video_path) player = MediaPlayer(audio_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(5) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
def on_start(self): self.callback_ref = WeakMethod(self.callback) filename = sys.argv[1] logging.info('ffpyplayer: Playing file "{}"'.format(filename)) # try ff_opts = {'vf':'edgedetect'} http://ffmpeg.org/ffmpeg-filters.html ff_opts = {} self.ffplayer = MediaPlayer(filename, callback=self.callback_ref, loglevel=log_level, ff_opts=ff_opts) self._thread.start() self.keyboard = Window.request_keyboard(None, self.root) self.keyboard.bind(on_key_down=self.on_keyboard_down)
def load(self): self.unload() ff_opts = {'vn': True, 'sn': True} # only audio self._ffplayer = MediaPlayer(self.source, callback=self._player_callback, loglevel='info', ff_opts=ff_opts) player = self._ffplayer player.set_volume(self.volume) player.toggle_pause() self._state = 'paused'
def play_video(video): cap = cv2.VideoCapture(video) video_player = MediaPlayer(video) while True: audio_frame, val = video_player.get_frame() ret, frame = cap.read() if not ret: break if cv2.waitKey(25) & 0xFF == 27: break if val != 'eof' and audio_frame is not None: img, t = audio_frame frame = cv2.resize(frame, (800, 600)) cv2.imshow('frame', frame) cap.release() cv2.destroyAllWindows()
def PlayVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) ret, frame = video.read() while ret: ret, frame = video.read() audio_frame, val = player.get_frame() if not ret: print("End of video") break if cv2.waitKey(25) & 0xFF == ord("q"): break if val != 'eof' and audio_frame is not None: #audio # img, t = audio_frame pass cv2.imshow("Video", frame) video.release() cv2.destroyAllWindows()
def CaptureVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: # print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.putText(frame, 'Press Q To Quit', (50, 50), cv2.FONT_HERSHEY_DUPLEX, 0.8, 255) cv2.imshow( video_path.split("\\")[len(video_path.split("\\")) - 1].split(".")[0], frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
def run(self): music = [] path = r".\\music" for i in os.listdir(path): music.append(i) while True: rmusic = random.randint(1, len(music) - 1) random_music = music[rmusic] audio = MP3(path + "\\" + str(random_music)) timelength = audio.info.length player = MediaPlayer(path + "\\" + str(random_music)) time.sleep(timelength)
def start_video_convert(self): self.close_video() self.player = MediaPlayer(self.source, ff_opts={ 'paused': True, 'ss': 0.0, 'an': True }) self.player.set_volume(0) self.frame_number = 0 if self.start_point > 0 or self.end_point < 1: all_frames = self.length * (self.framerate[0] / self.framerate[1]) self.max_frames = all_frames * (self.end_point - self.start_point) else: self.max_frames = 0 #need to wait for load so the seek routine doesnt crash python self.first_frame = self.wait_frame() if self.start_point > 0: self.start_seconds = self.length * self.start_point self.first_frame = self.seek_player(self.start_seconds)
def __video_stream(self, filename): video = cv2.VideoCapture(str(filename)) if self.__player is not None: self.__player.close_player() self.__player = None self.__player = MediaPlayer(str(filename)) self.__player.set_volume(1.0) self._is_video_playing = True while video.isOpened(): ret, frame = video.read() self.__player.get_frame(show=False) if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) tex = self.__create_texture(frame) yield tex else: video.set(cv2.CAP_PROP_POS_FRAMES, 0) self.__player.seek(0, relative=False) self._is_video_playing = False return None
def play_btn(): try: cap = cv2.VideoCapture(self.master.video_source) sound_player = MediaPlayer(self.master.video_source) except: ValueError return 0 start_time = time() fps = cap.get(cv2.CAP_PROP_FPS) delay = round(1000 / fps) cap.set(cv2.CAP_PROP_FPS, round(fps)) ret = False while (cap.isOpened()): ret, frame = cap.read() audio_frame, val = sound_player.get_frame() if ret: cv2.imshow('FPGA Player', frame) if cv2.waitKey(delay - 4) & 0xFF == ord( 'q'): # fps - 4 frame will be sync with audio break if val != 'eof' and audio_frame is not None: img, t = audio_frame else: break cap.release() messagebox.showinfo("info", "학습이 끝났습니다.") cv2.destroyAllWindows() end_time = time() play_time = start_time - end_time # play time check mailing(self.master.class_id, self.master.std_id, self.master.name, play_time)
def toggle_playback(self, widget): if self._widget == widget: if self._player.get_pause(): plugins.video.video_player.pause_playback() self._player.set_pause(False) self._widget.audio_state = 'play' self._timer = Clock.schedule_interval(self._playback_update, .1) else: self.pause_playback() else: plugins.video.video_player.pause_playback() if self._widget is not None: self.pause_playback() self._widget = widget self._widget.audio_state = 'play' self._player = MediaPlayer(filename=self._widget.audio_source, ff_opts={ 'paused': True, 'ss': self._widget.audio_pos }) Clock.schedule_interval(self._start_playback, .1)
def play(self): # _state starts empty and is empty again after unloading if self._ffplayer: # player is already setup, just handle unpausing assert self._state in ('paused', 'playing') if self._state == 'paused': self._ffplayer.set_pause(False) self._state = 'playing' self._wakeup_thread() return # we're now either in limbo state waiting for thread to setup, # or no thread has been started if self._state == 'playing': # in limbo, just wait for thread to setup player return elif self._state == 'paused': # in limbo, still unpause for when player becomes ready self._state = 'playing' self._wakeup_thread() return # load first unloads self.load() self._out_fmt = 'rgba' # it starts internally paused, but unpauses itself ff_opts = { 'paused': True, 'out_fmt': self._out_fmt, 'sn': True, 'volume': self._volume, } ffplayer = MediaPlayer(self._filename, callback=self._player_callback, thread_lib='SDL', loglevel='info', ff_opts=ff_opts) # Disabled as an attempt to fix kivy issue #6210 # self._ffplayer.set_volume(self._volume) self._thread = Thread(target=self._next_frame_run, name='Next frame', args=(ffplayer, )) # todo: remove self._thread.daemon = True # start in playing mode, but _ffplayer isn't set until ready. We're # now in a limbo state self._state = 'playing' self._thread.start()
def play(screen, asciiToNum, videoPath): player = MediaPlayer(videoPath) screen.nodelay(True) while 1: frame, val = player.get_frame() if val == 'eof': break elif frame is None: time.sleep(0.01) else: time_bf = time.time() c = screen.getch() if c == ord('q'): break img, t = frame w, h = img.get_size() sws = SWScale(w, h, img.get_pixel_format(), ofmt='yuv420p', ow=w // 8, oh=h // 8) img_scaled = sws.scale(img) frame_scaled = np.uint8( np.array(list(img_scaled.to_bytearray()[0]))).reshape( h // 8, w // 8) transformedAscii = transform(frame_scaled, asciiToNum) s = arrayToString(transformedAscii) time_af = time.time() screen.erase() screen.addstr(s) screen.addstr(str(t)) screen.refresh() time.sleep(0 if 0 > val - (time_af - time_bf) else val - (time_af - time_bf)) player.close_player()
def load(self): self.unload() ff_opts = {'vn': True, 'sn': True} # only audio self._ffplayer = MediaPlayer(self.source, callback=self._callback_ref, loglevel='info', ff_opts=ff_opts) player = self._ffplayer player.set_volume(self.volume) player.toggle_pause() self._state = 'paused' # wait until loaded or failed, shouldn't take long, but just to make # sure metadata is available. s = time.clock() while ((not player.get_metadata()['duration']) and not self.quitted and time.clock() - s < 10.): time.sleep(0.005)
def play(self): if self._ffplayer and self._state == "paused": self._ffplayer.toggle_pause() self._state = "playing" return self.load() self._out_fmt = "rgba" ff_opts = {"paused": True, "out_fmt": self._out_fmt} self._ffplayer = MediaPlayer( self._filename, callback=self._callback_ref, thread_lib="SDL", loglevel="info", ff_opts=ff_opts ) self._ffplayer.set_volume(self._volume) self._thread = Thread(target=self._next_frame_run, name="Next frame") self._thread.daemon = True self._thread.start()
def play(self): if self._ffplayer and self._state == 'paused': self._ffplayer.toggle_pause() self._state = 'playing' return self.load() self._out_fmt = 'rgba' ff_opts = { 'paused': True, 'out_fmt': self._out_fmt } self._ffplayer = MediaPlayer( self._filename, callback=self._player_callback, thread_lib='SDL', loglevel='info', ff_opts=ff_opts) self._ffplayer.set_volume(self._volume) self._thread = Thread(target=self._next_frame_run, name='Next frame') self._thread.daemon = True self._thread.start()
class SoundFFPy(Sound): @staticmethod def extensions(): return formats_in def __init__(self, **kwargs): self._ffplayer = None self.quitted = False self._log_callback_set = False self._state = '' self.state = 'stop' self._callback_ref = WeakMethod(self._player_callback) if not get_log_callback(): set_log_callback(_log_callback) self._log_callback_set = True super(SoundFFPy, self).__init__(**kwargs) def __del__(self): self.unload() if self._log_callback_set: set_log_callback(None) def _player_callback(self, selector, value): if self._ffplayer is None: return if selector == 'quit': def close(*args): self.quitted = True self.unload() Clock.schedule_once(close, 0) elif selector == 'eof': Clock.schedule_once(self._do_eos, 0) def load(self): self.unload() ff_opts = {'vn': True, 'sn': True} # only audio self._ffplayer = MediaPlayer(self.source, callback=self._callback_ref, loglevel='info', ff_opts=ff_opts) player = self._ffplayer player.set_volume(self.volume) player.toggle_pause() self._state = 'paused' # wait until loaded or failed, shouldn't take long, but just to make # sure metadata is available. s = time.clock() while ((not player.get_metadata()['duration']) and not self.quitted and time.clock() - s < 10.): time.sleep(0.005) def unload(self): if self._ffplayer: self._ffplayer = None self._state = '' self.state = 'stop' self.quitted = False def play(self): if self._state == 'playing': super(SoundFFPy, self).play() return if not self._ffplayer: self.load() self._ffplayer.toggle_pause() self._state = 'playing' self.state = 'play' super(SoundFFPy, self).play() def stop(self): if self._ffplayer and self._state == 'playing': self._ffplayer.toggle_pause() self._state = 'paused' self.state = 'stop' super(SoundFFPy, self).stop() def seek(self, position): if self._ffplayer is None: return self._ffplayer.seek(position, relative=False) def get_pos(self): if self._ffplayer is not None: return self._ffplayer.get_pts() return 0 def on_volume(self, instance, volume): if self._ffplayer is not None: self._ffplayer.set_volume(volume) def _get_length(self): if self._ffplayer is None: return super(SoundFFPy, self)._get_length() return self._ffplayer.get_metadata()['duration'] def _do_eos(self, *args): if not self.loop: self.stop() else: self.seek(0.)
class VideoFFPy(VideoBase): YUV_RGB_FS = """ $HEADER$ uniform sampler2D tex_y; uniform sampler2D tex_u; uniform sampler2D tex_v; void main(void) { float y = texture2D(tex_y, tex_coord0).r; float u = texture2D(tex_u, tex_coord0).r - 0.5; float v = texture2D(tex_v, tex_coord0).r - 0.5; float r = y + 1.402 * v; float g = y - 0.344 * u - 0.714 * v; float b = y + 1.772 * u; gl_FragColor = vec4(r, g, b, 1.0); } """ _trigger = None def __init__(self, **kwargs): self._ffplayer = None self._thread = None self._next_frame = None self._seek_queue = [] self._ffplayer_need_quit = False self._trigger = Clock.create_trigger(self._redraw) super(VideoFFPy, self).__init__(**kwargs) def __del__(self): self.unload() def _player_callback(self, selector, value): if self._ffplayer is None: return if selector == 'quit': def close(*args): self.unload() Clock.schedule_once(close, 0) def _get_position(self): if self._ffplayer is not None: return self._ffplayer.get_pts() return 0 def _set_position(self, pos): self.seek(pos) def _set_volume(self, volume): self._volume = volume if self._ffplayer: self._ffplayer.set_volume(self._volume) def _get_duration(self): if self._ffplayer is None: return 0 return self._ffplayer.get_metadata()['duration'] @mainthread def _do_eos(self): if self.eos == 'pause': self.pause() elif self.eos == 'stop': self.stop() elif self.eos == 'loop': self.position = 0 self.dispatch('on_eos') @mainthread def _change_state(self, state): self._state = state def _redraw(self, *args): if not self._ffplayer: return next_frame = self._next_frame if not next_frame: return img, pts = next_frame if img.get_size() != self._size or self._texture is None: self._size = w, h = img.get_size() if self._out_fmt == 'yuv420p': w2 = int(w / 2) h2 = int(h / 2) self._tex_y = Texture.create( size=(w, h), colorfmt='luminance') self._tex_u = Texture.create( size=(w2, h2), colorfmt='luminance') self._tex_v = Texture.create( size=(w2, h2), colorfmt='luminance') self._fbo = fbo = Fbo(size=self._size) with fbo: BindTexture(texture=self._tex_u, index=1) BindTexture(texture=self._tex_v, index=2) Rectangle(size=fbo.size, texture=self._tex_y) fbo.shader.fs = VideoFFPy.YUV_RGB_FS fbo['tex_y'] = 0 fbo['tex_u'] = 1 fbo['tex_v'] = 2 self._texture = fbo.texture else: self._texture = Texture.create(size=self._size, colorfmt='rgba') # XXX FIXME #self.texture.add_reload_observer(self.reload_buffer) self._texture.flip_vertical() self.dispatch('on_load') if self._texture: if self._out_fmt == 'yuv420p': dy, du, dv, _ = img.to_memoryview() self._tex_y.blit_buffer(dy, colorfmt='luminance') self._tex_u.blit_buffer(du, colorfmt='luminance') self._tex_v.blit_buffer(dv, colorfmt='luminance') self._fbo.ask_update() self._fbo.draw() else: self._texture.blit_buffer( img.to_memoryview()[0], colorfmt='rgba') self.dispatch('on_frame') def _next_frame_run(self): ffplayer = self._ffplayer sleep = time.sleep trigger = self._trigger did_dispatch_eof = False seek_queue = self._seek_queue # fast path, if the source video is yuv420p, we'll use a glsl shader for # buffer conversion to rgba while not self._ffplayer_need_quit: src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt') if not src_pix_fmt: sleep(0.005) continue if src_pix_fmt == 'yuv420p': self._out_fmt = 'yuv420p' ffplayer.set_output_pix_fmt(self._out_fmt) self._ffplayer.toggle_pause() break if self._ffplayer_need_quit: return # wait until loaded or failed, shouldn't take long, but just to make # sure metadata is available. s = time.clock() while not self._ffplayer_need_quit: if ffplayer.get_metadata()['src_vid_size'] != (0, 0): break # XXX if will fail later then? if time.clock() - s > 10.: break sleep(0.005) if self._ffplayer_need_quit: return # we got all the informations, now, get the frames :) self._change_state('playing') while not self._ffplayer_need_quit: if seek_queue: vals = seek_queue[:] del seek_queue[:len(vals)] ffplayer.seek( vals[-1] * ffplayer.get_metadata()['duration'], relative=False) self._next_frame = None t1 = time.time() frame, val = ffplayer.get_frame() t2 = time.time() if val == 'eof': sleep(0.2) if not did_dispatch_eof: self._do_eos() did_dispatch_eof = True elif val == 'paused': did_dispatch_eof = False sleep(0.2) else: did_dispatch_eof = False if frame: self._next_frame = frame trigger() else: val = val if val else (1 / 30.) sleep(val) def seek(self, percent): if self._ffplayer is None: return self._seek_queue.append(percent) def stop(self): self.unload() def pause(self): if self._ffplayer and self._state != 'paused': self._ffplayer.toggle_pause() self._state = 'paused' def play(self): if self._ffplayer and self._state == 'paused': self._ffplayer.toggle_pause() self._state = 'playing' return self.load() self._out_fmt = 'rgba' ff_opts = { 'paused': True, 'out_fmt': self._out_fmt } self._ffplayer = MediaPlayer( self._filename, callback=self._player_callback, thread_lib='SDL', loglevel='info', ff_opts=ff_opts) self._ffplayer.set_volume(self._volume) self._thread = Thread(target=self._next_frame_run, name='Next frame') self._thread.daemon = True self._thread.start() def load(self): self.unload() def unload(self): if self._trigger is not None: self._trigger.cancel() self._ffplayer_need_quit = True if self._thread: self._thread.join() self._thread = None if self._ffplayer: self._ffplayer = None self._next_frame = None self._size = (0, 0) self._state = '' self._ffplayer_need_quit = False
class PlayerApp(App): def __init__(self, **kwargs): super(PlayerApp, self).__init__(**kwargs) self.texture = None self.size = (0, 0) self.next_frame = None self._done = False self._lock = RLock() self._thread = Thread(target=self._next_frame, name='Next frame') self._trigger = Clock.create_trigger(self.redraw) self._force_refresh = False def build(self): self.root = Root() return self.root def on_start(self): self.callback_ref = WeakMethod(self.callback) filename = sys.argv[1] logging.info('ffpyplayer: Playing file "{}"'.format(filename)) # try ff_opts = {'vf':'edgedetect'} http://ffmpeg.org/ffmpeg-filters.html ff_opts = {} self.ffplayer = MediaPlayer(filename, callback=self.callback_ref, loglevel=log_level, ff_opts=ff_opts) self._thread.start() self.keyboard = Window.request_keyboard(None, self.root) self.keyboard.bind(on_key_down=self.on_keyboard_down) def resize(self): if self.ffplayer: w, h = self.ffplayer.get_metadata()['src_vid_size'] if not h: return lock = self._lock lock.acquire() if self.root.image.width < self.root.image.height * w / float(h): self.ffplayer.set_size(-1, self.root.image.height) else: self.ffplayer.set_size(self.root.image.width, -1) lock.release() logging.debug('ffpyplayer: Resized video.') def update_pts(self, *args): if self.ffplayer: self.root.seek.value = self.ffplayer.get_pts() def on_keyboard_down(self, keyboard, keycode, text, modifiers): if not self.ffplayer: return False lock = self._lock ctrl = 'ctrl' in modifiers if keycode[1] == 'p' or keycode[1] == 'spacebar': logging.info('Toggled pause.') self.ffplayer.toggle_pause() elif keycode[1] == 'r': logging.debug('ffpyplayer: Forcing a refresh.') self._force_refresh = True elif keycode[1] == 'v': logging.debug('ffpyplayer: Changing video stream.') lock.acquire() self.ffplayer.request_channel('video', 'close' if ctrl else 'cycle') lock.release() Clock.unschedule(self.update_pts) if ctrl: # need to continue updating pts, since video is disabled. Clock.schedule_interval(self.update_pts, 0.05) elif keycode[1] == 'a': logging.debug('ffpyplayer: Changing audio stream.') lock.acquire() self.ffplayer.request_channel('audio', 'close' if ctrl else 'cycle') lock.release() elif keycode[1] == 't': logging.debug('ffpyplayer: Changing subtitle stream.') lock.acquire() self.ffplayer.request_channel('subtitle', 'close' if ctrl else 'cycle') lock.release() elif keycode[1] == 'right': logging.debug('ffpyplayer: Seeking forward by 10s.') self.ffplayer.seek(10.) elif keycode[1] == 'left': logging.debug('ffpyplayer: Seeking back by 10s.') self.ffplayer.seek(-10.) elif keycode[1] == 'up': logging.debug('ffpyplayer: Increasing volume.') self.ffplayer.set_volume(self.ffplayer.get_volume() + 0.01) self.root.volume.value = self.ffplayer.get_volume() elif keycode[1] == 'down': logging.debug('ffpyplayer: Decreasing volume.') self.ffplayer.set_volume(self.ffplayer.get_volume() - 0.01) self.root.volume.value = self.ffplayer.get_volume() return True def touch_down(self, touch): if self.root.seek.collide_point(*touch.pos) and self.ffplayer: pts = ((touch.pos[0] - self.root.volume.width) / self.root.seek.width * self.ffplayer.get_metadata()['duration']) logging.debug('ffpyplayer: Seeking to {}.'.format(pts)) self.ffplayer.seek(pts, relative=False) self._force_refresh = True return True return False def callback(self, selector, value): if self.ffplayer is None: return if selector == 'quit': logging.debug('ffpyplayer: Quitting.') def close(*args): self._done = True self.ffplayer = None Clock.schedule_once(close, 0) # called from internal thread, it typically reads forward elif selector == 'display_sub': self.display_subtitle(*value) def _next_frame(self): ffplayer = self.ffplayer sleep = time.sleep trigger = self._trigger while not self._done: force = self._force_refresh if force: self._force_refresh = False frame, val = ffplayer.get_frame(force_refresh=force) if val == 'eof': logging.debug('ffpyplayer: Got eof.') sleep(1 / 30.) elif val == 'paused': logging.debug('ffpyplayer: Got paused.') sleep(1 / 30.) else: if frame: logging.debug('ffpyplayer: Next frame: {}.'.format(val)) sleep(val) self.next_frame = frame trigger() else: val = val if val else (1 / 30.) logging.debug('ffpyplayer: Schedule next frame check: {}.' .format(val)) sleep(val) def redraw(self, dt=0, force_refresh=False): if not self.ffplayer: return if self.next_frame: img, pts = self.next_frame if img.get_size() != self.size or self.texture is None: self.root.image.canvas.remove_group(str(self)+'_display') self.texture = Texture.create(size=img.get_size(), colorfmt='rgb') # by adding 'vf':'vflip' to the player initialization ffmpeg # will do the flipping self.texture.flip_vertical() self.texture.add_reload_observer(self.reload_buffer) self.size = img.get_size() logging.debug('ffpyplayer: Creating new image texture of ' 'size: {}.'.format(self.size)) self.texture.blit_buffer(img.to_memoryview()[0]) self.root.image.texture = None self.root.image.texture = self.texture self.root.seek.value = pts logging.debug('ffpyplayer: Blitted new frame with time: {}.' .format(pts)) if self.root.seek.value: self.root.seek.max = self.ffplayer.get_metadata()['duration'] def display_subtitle(self, text, fmt, pts, t_start, t_end): pass # fmt is text (unformatted), or ass (formatted subs) def reload_buffer(self, *args): logging.debug('ffpyplayer: Reloading buffer.') frame = self.next_frame if not frame: return self.texture.blit_buffer(frame[0].to_memoryview()[0], colorfmt='rgb', bufferfmt='ubyte')
def load(self): self.unload() ff_opts = {'vn': True, 'sn': True} # only audio self._ffplayer = MediaPlayer(self.source, callback=self._player_callback, loglevel='info', ff_opts=ff_opts) player = self._ffplayer player.set_volume(self.volume) player.toggle_pause() self._state = 'paused' # wait until loaded or failed, shouldn't take long, but just to make # sure metadata is available. s = time.perf_counter() while (player.get_metadata()['duration'] is None and not self.quitted and time.perf_counter() - s < 10.): time.sleep(0.005)