def test_play(self): from .common import get_media from ffpyplayer.player import MediaPlayer import time error = [None, ] def callback(selector, value): if selector.endswith('error'): error[0] = selector, value # only video ff_opts={'an':True, 'sync':'video'} player = MediaPlayer(get_media('dw11222.mp4'), callback=callback, ff_opts=ff_opts) while not error[0]: frame, val = player.get_frame() if val == 'eof': break elif frame is None: time.sleep(0.01) else: img, t = frame if error[0]: raise Exception('{}: {}'.format(*error[0]))
def playVideoFromFile(self, fileName='data/rick', fullscreen=False): # load video file video = cv2.VideoCapture(fileName) # calculate delay base on the video's frame rate fps = video.get(cv2.CAP_PROP_FPS) delay = int(1000 / (fps + 1)) # load audio from the video file audio = MediaPlayer(fileName) while video.isOpened(): ret, frame = video.read() # Play sound audio.get_frame() # If still have frame to retrive if ret: if fullscreen: cv2.namedWindow('output', cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty('output', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.imshow('output', frame) if cv2.waitKey(delay) == ord('q'): break else: break video.release() cv2.destroyAllWindows()
def test_play(self): from .common import get_media from ffpyplayer.player import MediaPlayer import time error = [ None, ] def callback(selector, value): if selector.endswith('error'): error[0] = selector, value # only video ff_opts = {'an': True, 'sync': 'video'} player = MediaPlayer(get_media('dw11222.mp4'), callback=callback, ff_opts=ff_opts) while not error[0]: frame, val = player.get_frame() if val == 'eof': break elif frame is None: time.sleep(0.01) else: img, t = frame if error[0]: raise Exception('{}: {}'.format(*error[0]))
def PlayVideo(video_path): # Define the stuff that will happen. video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) # Loop the content while the video plays. while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: # Do something after the video ends. print() break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.namedWindow( 'Movie Player with Audio | Python 3.0, OpenCV and FFPyPlayer. (Press Q to Quit)', flags=cv2.WINDOW_GUI_NORMAL) # Define the GUI mode. cv2.resizeWindow( "Movie Player with Audio | Python 3.0, OpenCV and FFPyPlayer. (Press Q to Quit)", 768, 432) # Define the Window Resolution. cv2.imshow( "Movie Player with Audio | Python 3.0, OpenCV and FFPyPlayer. (Press Q to Quit)", frame) # Show the frame. if val != 'eof' and audio_frame is not None: # Attempt to sync the movie with the audio. img, t = audio_frame # Finish the task if the video ends. video.release() cv2.destroyAllWindows()
def save_video_thumbnail(source, output): """Saves thumbnail of the given video under the given name""" player = MediaPlayer(source, ff_opts={'ss': 1.0}) frame, val = None, None while not frame: frame, val = player.get_frame(force_refresh=True) player.close_player() if val == 'eof': return None elif frame is None: return None else: img = frame[0] pixel_format = img.get_pixel_format() img_size = img.get_size() thumb_size = 256, int(img_size[1] * 256 / img_size[0]) codec = 'tiff' output_format = get_supported_pixfmts(codec, pixel_format)[0] #resize and convert into the best pixel format sws = SWScale(img_size[0], img_size[1], pixel_format, thumb_size[0], thumb_size[1], output_format) thumbnail = sws.scale(img) streams = [{ 'pix_fmt_in': output_format, 'width_in': thumb_size[0], 'height_in': thumb_size[1], 'codec': codec, 'frame_rate': (30, 1) }] writer = MediaWriter(output, streams, lib_opts={'compression_algo': 'lzw'}) writer.write_frame(img=thumbnail, pts=0, stream=0) writer.close()
def PlayVideo(video_path): cap = cv2.VideoCapture(video_path) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) print("Frame Count:", frame_count) print('Width: ', cap.get(3)) print("Height: ", cap.get(4)) FPS = cap.get(cv2.CAP_PROP_FPS) print('FPS:', FPS) player = MediaPlayer(video_path) fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('temp_output.mp4', fourcc, FPS, (480, 320)) i = 1 pbar = tqdm(total=frame_count) while cap.isOpened(): pbar.update(i) ret, frame = cap.read() audio_frame, val = player.get_frame() if not ret: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break frame = cv2.resize(frame, dsize=(480, 320)) out.write(frame) cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame cap.release() out.release() cv2.destroyAllWindows()
def outro(self): toread = "Congratulations, bingo winner! What's your name?" comeon = "Very wise. Never reveal personal information to a stranger, no matter how beautiful. But can you just make up a name?" self.speak.Speak(toread) while True: userin = input('Enter name: ') if userin != "": bye = "Well done, " + userin + ". Please be nice to your newly-established inferiors. I've been DEBRA, the Definitely Effective Bingo Reciting Application. Bye bye!" self.speak.Speak(bye) break else: self.speak.Speak(comeon) player = MediaPlayer('udidit2-lofi.mp4') val = '' while val != 'eof': frame, val = player.get_frame() if val != 'eof' and frame is not None: img, t = frame arr = np.uint8(np.asarray(list(img.to_bytearray()[0])).reshape(540,960,3)) cv2.imshow('u did it', arr) if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destroyAllWindows() break
def video_loop(self): self.updating_aguments() self.getVideoSource() player = MediaPlayer(self.videoPath) while True: ret, frame = self.cap.read() audio_frame, val = player.get_frame() if (ret == 0): print("End of video") break frame = cv2.resize(frame, (self.width, self.height)) cv2.imshow('Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if val != 'eof' and audio_frame is not None: frame, t = audio_frame print("Frame:" + str(frame) + " T: " + str(t)) self.cap.release() cv2.destroyAllWindows()
def main(): url = input('\nEnter your URL: \n ==> ') sourcePath = url camera = getVideoSource(sourcePath, 720, 480) player = MediaPlayer(sourcePath) while True: ret, frame = camera.read() audio_frame, val = player.get_frame() if (ret == 0): print("End of video") break frame = cv2.resize(frame, (720, 480)) cv2.imshow('Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if val != 'eof' and audio_frame is not None: frame, t = audio_frame print("Frame:" + str(frame) + " T: " + str(t)) camera.release() cv2.destroyAllWindows()
def playVideo(video_path): cap = cv2.VideoCapture(video_path) winname = "Video" cv2.namedWindow(winname, cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) if (cap.isOpened() == False): print("Error opening video stream or file") else: player = MediaPlayer(video_path) while (cap.isOpened()): grabbed, frame = cap.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break # resized_frame = cv2.resize(frame, (2880, 1800)) cv2.imshow(winname, frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame cap.release() cv2.destroyAllWindows()
def PlayVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) count = 0 while True: count = count + 1 grabbed, frame = video.read() audio_frame, val = player.get_frame() if count % 15 == 0: th = threading.Thread(target=extract_text_frame, args=(frame, )) th.start() if not grabbed: print("End of video") break if cv2.waitKey(45) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame if count % 15 == 0: th.join() video.release() cv2.destroyAllWindows()
def main(): sourcePath = "/home/dream/Videos/4K Video Downloader/Around the World in 80 Clips.mp4" camera = getVideoSource(sourcePath, 720, 480) player = MediaPlayer(sourcePath) while True: ret, frame = camera.read() audio_frame, val = player.get_frame() if (ret == 0): print("End of video") break frame = cv2.resize(frame, (720, 480)) cv2.imshow('Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if val != 'eof' and audio_frame is not None: frame, t = audio_frame print("Frame:" + str(frame) + " T: " + str(t)) camera.release() cv2.destroyAllWindows()
class MyApp(QWidget): def __init__(self, name, parent=None): super(MyApp, self).__init__(parent) self.label = QLabel() self.qimg = QImage() self.val = '' self.player = MediaPlayer(name) self.timer = QTimer() self.timer.setInterval(50) self.timer.start() self.timer.timeout.connect(self.showFrame) layout = QVBoxLayout() layout.addWidget(self.label) self.setLayout(layout) self.setWindowTitle(name) # self.showFullScreen() def showFrame(self): frame, self.val = self.player.get_frame() if frame is not None: img, t = frame self.qimg = QImage(bytes(img.to_bytearray()[0]), img.get_size()[0], img.get_size()[1], QImage.Format_RGB888) self.label.setPixmap(QPixmap.fromImage(self.qimg))
def play_av(self, video_path, pause=0, audio_enable=0, video_delay=3, audio_length=0): cnt = 0 cnt_th = int(audio_length / 15) + 1 while cnt < cnt_th: cnt += 1 #video_path = './animation/csy02.mov' video = cv2.VideoCapture(video_path) if(audio_enable == 1): player = MediaPlayer(video_path) grabbed, play_frame = video.read() winname = "Video" cv2.namedWindow(winname) cv2.moveWindow(winname, 0, 10) cv2.imshow(winname, play_frame) if pause == 0: while True: grabbed, play_frame = video.read() if (audio_enable == 1): audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(video_delay) & 0xFF == ord("q"): break cv2.imshow("Video", play_frame) if (audio_enable == 1): if val != 'eof' and audio_frame is not None: # audio img, t = audio_frame video.release()
def music(name): musicfile = name player = MediaPlayer(musicfile) val = '' while val != 'eof': frame, val = player.get_frame() if val != 'eof' and frame is not None: img, t = frame
def make_song(self): video_path = self.path + '/song/doubt.mp3' player = MediaPlayer(video_path) window_name = 'Just Dance' start = time.time() capture = cv2.VideoCapture(0) capture.set(cv2.CAP_PROP_FPS, 60) width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) time_list = [197] while capture.isOpened(): audio_frame, val = player.get_frame() if val != 'eof' and audio_frame is not None: # audio img, t = audio_frame k = cv2.waitKey(33) if k == 27: break ret, frame = capture.read() frame = cv2.flip(frame, 1) frame = cv2.resize(frame, None, fx=1, fy=1, interpolation=cv2.INTER_AREA) current = time.time() - start if current > 10: if k == ord('a'): time_list.append(current) cv2.putText(frame, str(current), (width // 2, 605), cv2.FONT_ITALIC, 1, (0, 0, 0), 2, cv2.LINE_AA) else: random_color = random.choice(list(self.color_palates.keys())) cv2.putText(frame, str(10 - int(current)), (540, height // 2 + 50), cv2.FONT_ITALIC, 10, self.color_palates[random_color], 10, cv2.LINE_AA) cv2.imshow(window_name, frame) capture.release() cv2.destroyAllWindows() self.save_data(time_list, self.filename)
def video_famoso(nome): cap = cv2.VideoCapture("video_famosos/" + nome + ".mp4") player = MediaPlayer("video_famosos/" + nome + ".mp4") ret, frame = cap.read() frameAudio, val = player.get_frame() while (1): ret, frame = cap.read() cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q') or ret == False: cap.release() cv2.destroyAllWindows() break cv2.imshow('frame', frame)
def PlayVideo(videoPath): video = cv2.VideoCapture(videoPath) player = MediaPlayer(videoPath) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: img, t = audio_frame # Audio video.release() cv2.destroyAllWindows()
def PlayVideo(): video_name = input( "Please enter the name of the video file that you want to play: " ) ## User input for the name of the image file. video_directory_guess = input( "Please enter the directory that may contain the video: " ) ## User input for the path of the image file. ## This function looks for and finds the desired file. You can specify a parent directory for the fundtion to look for, however if you have no idea where a file is; this functio will find it for you, just slower. If you have no idea where a file is, just type "/". files_found = [] for path, subdirs, files in os.walk(directory_name): for name in files: if (file_name == name): file_path = os.path.join(path, name) files_found.append(file_path) print(files_found) return files_found[0] ## Return the path. video_directory = Path(find_the_video( video_name, video_directory_guess)) ## Initialize the path of the image file. new_working_directory = video_directory.parent ## Initialize the parent directory of the image path. os.chdir( new_working_directory ) ## Change the working directory of the script to the parent directory of the image path. video_path = find_the_video(video_name, video_directory_guess) video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.imshow("Psi Video Player", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
def PlayVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release()
def running_video(): cap = cv2.VideoCapture('live_video.ts') player = MediaPlayer('live_video.ts') if (cap.isOpened() == False): print("Error opening video file") while (cap.isOpened()): ret, frame = cap.read() audio_frame = player.get_frame() if ret == True: cv2.imshow('Frame', frame) if cv2.waitKey(40) & 0xFF == ord('q'): break else: pass cap.release() cv2.destroyAllWindows()
def main(): lenArgs = len(sys.argv) listKnwnArgs = ["-videoPath", "-width", "-height"] videoPath = "/home/dream/Videos/4K Video Downloader/Around the World in 80 Clips.mp4" width = 720 height = 480 print("Len of arguments are: " + str(lenArgs)) print("Arguments are: " + str(sys.argv)) if (lenArgs > 1): for i in range(1, lenArgs): getArg = sys.argv[i].split("=") for j in range(len(listKnwnArgs)): if len(getArg) == 2: if (listKnwnArgs[j] == getArg[0]): if (j == 0): videoPath = getArg[1] elif (j == 1): width = int(getArg[1]) elif (j == 2): height = int(getArg[1]) camera = getVideoSource(videoPath, width, height) player = MediaPlayer(videoPath) while True: ret, frame = camera.read() audio_frame, val = player.get_frame() if (ret == 0): print("End of video") break frame = cv2.resize(frame, (width, height)) cv2.imshow('Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if val != 'eof' and audio_frame is not None: frame, t = audio_frame print("Frame:" + str(frame) + " T: " + str(t)) camera.release() cv2.destroyAllWindows()
def PlayVideo(self): video = cv2.VideoCapture(self.video_path) player = MediaPlayer(self.video_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() self.extract_text_frame(frame) with open('temp1.txt') as fp1: data = fp1.read() try: # Reading data from file2 with open('temp2.txt') as fp2: data2 = fp2.read() except Exception as ex: #print(ex) continue # Merging 2 files # To add the data of file2 # from next line data += "\n" data += data2 word_count, word_count1 = self.processText(data) for indx, (key, value) in enumerate(word_count): content = key + ' ' + str(value) indx = indx + 1 frame = cv2.putText(frame, content, (50, indx * 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA) if not grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame #print(img, t) #fileText.close() fp1.close() fp2.close() video.release() cv2.destroyAllWindows()
class MainWindow(QMainWindow): pass def __init__(self): super().__init__() self.player = None self.setWindowTitle("FFPyPlayer Test") def showEvent(self, e): self.timer_id = self.startTimer(1) self.lbl = QLabel(self) self.lbl.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter) self.setCentralWidget(self.lbl) def timerEvent(self, event) -> None: self.killTimer(self.timer_id) ff_opts = {'paused': False, 'autoexit': True} self.player = MediaPlayer('../example_data/sample.mp4', ff_opts=ff_opts, lib_opts={}) # self.player = MediaPlayer('http://localhost:1441/sample_stream.mp4', ff_opts=ff_opts, lib_opts={}) self.running = True while self.running: time.sleep(0.01) frame, val = self.player.get_frame() if val == 'eof': break if frame is None: time.sleep(0.01) else: img, t = frame data = img.to_bytearray()[0] width, height = img.get_size() # the technical name for the 'rgb24' default pixel format is RGB888, # which is QImage.Format_RGB888 in the QImage format enum qimage = QImage(data, width, height, QImage.Format_RGB888) pixmap = QPixmap.fromImage(qimage) pixmap = pixmap.scaled(self.lbl.width(), self.lbl.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.lbl.setPixmap(pixmap) time.sleep(val) QApplication.processEvents() def closeEvent(self, event) -> None: self.running = False if self.player is not None: self.player.set_pause(True) self.player.close_player()
def verify_frames(filename, timestamps, frame_vals=None): from ffpyplayer.player import MediaPlayer error = [ None, ] def callback(selector, value): if selector.endswith('error'): error[0] = selector, value player = MediaPlayer(filename, callback=callback) read_timestamps = set() try: i = -1 while not error[0]: frame, val = player.get_frame() if val == 'eof': break if val == 'paused': raise ValueError('Got paused') elif frame is None: time.sleep(0.01) else: img, t = frame print(i, t) if i < 0: i += 1 continue print(i, t, timestamps[i]) read_timestamps.add(t) assert math.isclose(t, timestamps[i], rel_tol=.1) if frame_vals: assert frame_vals[i] == img.to_bytearray()[0][0] i += 1 finally: player.close_player() if error[0] is not None: raise Exception('{}: {}'.format(*error[0])) assert len(timestamps) - 1 == i assert len(read_timestamps) == i
def PlayVideo(video_path, name, x, y): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: print("End") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.moveWindow(name, x, y) cv2.imshow(name, frame) if val != 'eof' and audio_frame is not None: img, t = audio_frame video.release() cv2.destroyAllWindows()
def PlayVideo(video_path, H, W): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) fps = video.get(cv2.CAP_PROP_FPS) sleep_ms = int(np.round((1 / fps) * 1000)) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: break if cv2.waitKey(30) & 0xFF == ord("q"): break cv2.imshow("Game", cv2.resize(frame, (W, H))) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
def PlayVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) fps = video.get(cv2.CAP_PROP_FPS) print(fps) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(int(np.round((1 / fps) * 1000))) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
def playDemo(path): video_path = path + 'videoConstruct1.mp4' audio_path = path + "audio.mp3" video = cv2.VideoCapture(video_path) player = MediaPlayer(audio_path) while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(5) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame video.release() cv2.destroyAllWindows()
def PlayVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) ret, frame = video.read() while ret: ret, frame = video.read() audio_frame, val = player.get_frame() if not ret: print("End of video") break if cv2.waitKey(25) & 0xFF == ord("q"): break if val != 'eof' and audio_frame is not None: #audio # img, t = audio_frame pass cv2.imshow("Video", frame) video.release() cv2.destroyAllWindows()
def play_video(video): cap = cv2.VideoCapture(video) video_player = MediaPlayer(video) while True: audio_frame, val = video_player.get_frame() ret, frame = cap.read() if not ret: break if cv2.waitKey(25) & 0xFF == 27: break if val != 'eof' and audio_frame is not None: img, t = audio_frame frame = cv2.resize(frame, (800, 600)) cv2.imshow('frame', frame) cap.release() cv2.destroyAllWindows()