def PlayVideo(video_path): cap = cv2.VideoCapture(video_path) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) print("Frame Count:", frame_count) print('Width: ', cap.get(3)) print("Height: ", cap.get(4)) FPS = cap.get(cv2.CAP_PROP_FPS) print('FPS:', FPS) player = MediaPlayer(video_path) fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter('temp_output.mp4', fourcc, FPS, (480, 320)) i = 1 pbar = tqdm(total=frame_count) while cap.isOpened(): pbar.update(i) ret, frame = cap.read() audio_frame, val = player.get_frame() if not ret: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break frame = cv2.resize(frame, dsize=(480, 320)) out.write(frame) cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame cap.release() out.release() cv2.destroyAllWindows()
def play_videoFile(filename,mirror=False): cap = cv2.VideoCapture(filename) fps = cap.get(cv2.CAP_PROP_FPS) #modify: width = 1920 height = 1080 #cv2.namedWindow('VideoHDR',cv2.WINDOW_AUTOSIZE) cv2.namedWindow('VideoHDR',cv2.WINDOW_NORMAL) audioplayer = MediaPlayer(filename) while True: ret_val, frame = cap.read() if mirror: frame = cv2.flip(frame, 1) cv2.imshow('VideoHDR', frame) k = cv2.waitKey(int(1/fps*1000)) if k == 27: break # esc to quit if k == 32: #space to pause while cv2.waitKey(1) != 32: pass cv2.destroyAllWindows()
def timerEvent(self, event) -> None: self.killTimer(self.timer_id) ff_opts = {'paused': False, 'autoexit': True} self.player = MediaPlayer('../example_data/sample.mp4', ff_opts=ff_opts, lib_opts={}) # self.player = MediaPlayer('http://localhost:1441/sample_stream.mp4', ff_opts=ff_opts, lib_opts={}) self.running = True while self.running: time.sleep(0.01) frame, val = self.player.get_frame() if val == 'eof': break if frame is None: time.sleep(0.01) else: img, t = frame data = img.to_bytearray()[0] width, height = img.get_size() # the technical name for the 'rgb24' default pixel format is RGB888, # which is QImage.Format_RGB888 in the QImage format enum qimage = QImage(data, width, height, QImage.Format_RGB888) pixmap = QPixmap.fromImage(qimage) pixmap = pixmap.scaled(self.lbl.width(), self.lbl.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.lbl.setPixmap(pixmap) time.sleep(val) QApplication.processEvents()
def main(): url = input('\nEnter your URL: \n ==> ') sourcePath = url camera = getVideoSource(sourcePath, 720, 480) player = MediaPlayer(sourcePath) while True: ret, frame = camera.read() audio_frame, val = player.get_frame() if (ret == 0): print("End of video") break frame = cv2.resize(frame, (720, 480)) cv2.imshow('Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if val != 'eof' and audio_frame is not None: frame, t = audio_frame print("Frame:" + str(frame) + " T: " + str(t)) camera.release() cv2.destroyAllWindows()
def start(self, log=True): """Initialize and start the decoder. This method will return when a valid frame is made available. """ # clear queued data from previous streams self._lastFrame = None self._frameIndex = -1 # open the media player self._handle = MediaPlayer(self._filename, ff_opts=self._lastPlayerOpts) self._handle.set_pause(True) # Pull the first frame to get metadata. NB - `_enqueueFrame` should be # able to do this but the logic in there depends on having access to # metadata first. That may be rewritten at some point to reduce all of # this to just a single `_enqeueFrame` call. # self._status = NOT_STARTED # hand off the player interface to the thread self._tStream = MovieStreamThreadFFPyPlayer(self._handle) self._tStream.begin() # make sure we have metadata self.update()
def PlayVideo(video_path): # Define the stuff that will happen. video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) # Loop the content while the video plays. while True: grabbed, frame = video.read() audio_frame, val = player.get_frame() if not grabbed: # Do something after the video ends. print() break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.namedWindow( 'Movie Player with Audio | Python 3.0, OpenCV and FFPyPlayer. (Press Q to Quit)', flags=cv2.WINDOW_GUI_NORMAL) # Define the GUI mode. cv2.resizeWindow( "Movie Player with Audio | Python 3.0, OpenCV and FFPyPlayer. (Press Q to Quit)", 768, 432) # Define the Window Resolution. cv2.imshow( "Movie Player with Audio | Python 3.0, OpenCV and FFPyPlayer. (Press Q to Quit)", frame) # Show the frame. if val != 'eof' and audio_frame is not None: # Attempt to sync the movie with the audio. img, t = audio_frame # Finish the task if the video ends. video.release() cv2.destroyAllWindows()
def play_av(self, video_path, pause=0, audio_enable=0, video_delay=3, audio_length=0): cnt = 0 cnt_th = int(audio_length / 15) + 1 while cnt < cnt_th: cnt += 1 #video_path = './animation/csy02.mov' video = cv2.VideoCapture(video_path) if(audio_enable == 1): player = MediaPlayer(video_path) grabbed, play_frame = video.read() winname = "Video" cv2.namedWindow(winname) cv2.moveWindow(winname, 0, 10) cv2.imshow(winname, play_frame) if pause == 0: while True: grabbed, play_frame = video.read() if (audio_enable == 1): audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(video_delay) & 0xFF == ord("q"): break cv2.imshow("Video", play_frame) if (audio_enable == 1): if val != 'eof' and audio_frame is not None: # audio img, t = audio_frame video.release()
def play(self): if self._ffplayer and self._state == 'paused': self._ffplayer.toggle_pause() self._state = 'playing' return self.load() self._out_fmt = 'rgba' ff_opts = { 'paused': True, 'out_fmt': self._out_fmt, 'sn': True, 'volume': self._volume, } self._ffplayer = MediaPlayer(self._filename, callback=self._player_callback, thread_lib='SDL', loglevel='info', ff_opts=ff_opts) # Disabled as an attempt to fix kivy issue #6210 # self._ffplayer.set_volume(self._volume) self._thread = Thread(target=self._next_frame_run, name='Next frame') self._thread.daemon = True self._thread.start()
def play(self): self.fetch_metadata() self.download(no_part=True) print('::: downloaded') threading._start_new_thread(self.send_notification, ()) self.player = MediaPlayer(self.filename) time.sleep(0.5) print('::: playing') last_pts = 0 updated_pts = 0 while True: updated_pts = int(float(str(self.player.get_pts())[:3])) - 3 print(':::updated', updated_pts) # print(player.get_pts()) while self.player.get_pause(): time.sleep(0.5) if updated_pts == last_pts: self.player.toggle_pause() print("---buffered out, pausing") time.sleep(1) self.player.toggle_pause() if int(float(str(self.player.get_pts())[:3])) - 3 == int( float(str( self.player.get_metadata()['duration'])[:3])) - 3: print(':::breaking') self.player.toggle_pause() self.player.close_player() last_pts = updated_pts time.sleep(1) print(':::finished playing')
def main(): sourcePath = "/home/dream/Videos/4K Video Downloader/Around the World in 80 Clips.mp4" camera = getVideoSource(sourcePath, 720, 480) player = MediaPlayer(sourcePath) while True: ret, frame = camera.read() audio_frame, val = player.get_frame() if (ret == 0): print("End of video") break frame = cv2.resize(frame, (720, 480)) cv2.imshow('Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if val != 'eof' and audio_frame is not None: frame, t = audio_frame print("Frame:" + str(frame) + " T: " + str(t)) camera.release() cv2.destroyAllWindows()
def Slidemoved(self): self.timer.start() self.steptimer.start() self.player = MediaPlayer("%s" % self.playitem, ff_opts={'ss': self.step}) self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停')
def controlTimer(self): # if timer is stopped # create video capture self.cap = cv2.VideoCapture(0) fps = self.cap.get(cv2.CAP_PROP_FPS) print("fps", fps) self.video = cv2.VideoCapture( "C:/Users/brahm/OneDrive/Desktop/రాష్ట్రపతి పాలన ఎప్పుడు, ఎందుకు విధిస్తారు_ ఆర్టికల్ 356 ఏం చెబుతోంది_ - BBC New.mp4" ) self.video.set(cv2.CAP_PROP_FPS, 30) fpsdata = self.cap.get(cv2.CAP_PROP_FPS) print("fpsdata", fpsdata) self.player = MediaPlayer( "C:/Users/brahm/OneDrive/Desktop/రాష్ట్రపతి పాలన ఎప్పుడు, ఎందుకు విధిస్తారు_ ఆర్టికల్ 356 ఏం చెబుతోంది_ - BBC New.mp4" ) # start timer self.timer.start()
def PlayVideo(video_path): video = cv2.VideoCapture(video_path) player = MediaPlayer(video_path) count = 0 while True: count = count + 1 grabbed, frame = video.read() audio_frame, val = player.get_frame() if count % 15 == 0: th = threading.Thread(target=extract_text_frame, args=(frame, )) th.start() if not grabbed: print("End of video") break if cv2.waitKey(45) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame if count % 15 == 0: th.join() video.release() cv2.destroyAllWindows()
def outro(self): toread = "Congratulations, bingo winner! What's your name?" comeon = "Very wise. Never reveal personal information to a stranger, no matter how beautiful. But can you just make up a name?" self.speak.Speak(toread) while True: userin = input('Enter name: ') if userin != "": bye = "Well done, " + userin + ". Please be nice to your newly-established inferiors. I've been DEBRA, the Definitely Effective Bingo Reciting Application. Bye bye!" self.speak.Speak(bye) break else: self.speak.Speak(comeon) player = MediaPlayer('udidit2-lofi.mp4') val = '' while val != 'eof': frame, val = player.get_frame() if val != 'eof' and frame is not None: img, t = frame arr = np.uint8(np.asarray(list(img.to_bytearray()[0])).reshape(540,960,3)) cv2.imshow('u did it', arr) if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destroyAllWindows() break
def test_play(self): from .common import get_media from ffpyplayer.player import MediaPlayer import time error = [ None, ] def callback(selector, value): if selector.endswith('error'): error[0] = selector, value # only video ff_opts = {'an': True, 'sync': 'video'} player = MediaPlayer(get_media('dw11222.mp4'), callback=callback, ff_opts=ff_opts) while not error[0]: frame, val = player.get_frame() if val == 'eof': break elif frame is None: time.sleep(0.01) else: img, t = frame if error[0]: raise Exception('{}: {}'.format(*error[0]))
def playVideo(video_path): cap = cv2.VideoCapture(video_path) winname = "Video" cv2.namedWindow(winname, cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) if (cap.isOpened() == False): print("Error opening video stream or file") else: player = MediaPlayer(video_path) while (cap.isOpened()): grabbed, frame = cap.read() audio_frame, val = player.get_frame() if not grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break # resized_frame = cv2.resize(frame, (2880, 1800)) cv2.imshow(winname, frame) if val != 'eof' and audio_frame is not None: #audio img, t = audio_frame cap.release() cv2.destroyAllWindows()
def save_video_thumbnail(source, output): """Saves thumbnail of the given video under the given name""" player = MediaPlayer(source, ff_opts={'ss': 1.0}) frame, val = None, None while not frame: frame, val = player.get_frame(force_refresh=True) player.close_player() if val == 'eof': return None elif frame is None: return None else: img = frame[0] pixel_format = img.get_pixel_format() img_size = img.get_size() thumb_size = 256, int(img_size[1] * 256 / img_size[0]) codec = 'tiff' output_format = get_supported_pixfmts(codec, pixel_format)[0] #resize and convert into the best pixel format sws = SWScale(img_size[0], img_size[1], pixel_format, thumb_size[0], thumb_size[1], output_format) thumbnail = sws.scale(img) streams = [{ 'pix_fmt_in': output_format, 'width_in': thumb_size[0], 'height_in': thumb_size[1], 'codec': codec, 'frame_rate': (30, 1) }] writer = MediaWriter(output, streams, lib_opts={'compression_algo': 'lzw'}) writer.write_frame(img=thumbnail, pts=0, stream=0) writer.close()
def clicked(): bps = k1.get() b = np.around(np.linspace(0,60,bps),decimals=2) y_beats = librosa.clicks(times= b) librosa.output.write_wav('metro.wav', y_beats, 22050) tt = TapTester() c = 0 player = MediaPlayer('metro.wav') for i in range(1000): # if tt.listen() == 1: # if ((time.time()-start)% (2/3)) < 0.3: # c += 1 # print("on beat") # if c>1: # print("streak x"+str(c)) # else: # print("off beat") # c = 0 if tt.listen() == 1: if c == 0: start = time.time() if b[c]-0.1 <= round(time.time()-start,2) <= b[c]+0.1: print("on beat x", str(c)) lbl_streak.configure(text = str(c)+"x Streak") root.update() c+=1 else: c=0
def video_loop(self): self.updating_aguments() self.getVideoSource() player = MediaPlayer(self.videoPath) while True: ret, frame = self.cap.read() audio_frame, val = player.get_frame() if (ret == 0): print("End of video") break frame = cv2.resize(frame, (self.width, self.height)) cv2.imshow('Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if val != 'eof' and audio_frame is not None: frame, t = audio_frame print("Frame:" + str(frame) + " T: " + str(t)) self.cap.release() cv2.destroyAllWindows()
def run(self): while True: a = time.time() b = time.localtime(a) if b[3] == 13 and b[4] == 55 and b[5] == 0: music = [] path = r".\\music" for i in os.listdir(path): music.append(i) rmusic = random.randint(1, len(music) - 1) random_music = music[rmusic] audio = MP3(path + "\\" + str(random_music)) timelength = audio.info.length player = MediaPlayer(path + "\\" + str(random_music)) time.sleep(timelength) continue elif b[3] == 20 and b[4] == 30 and b[5] == 0: time.sleep(3) ssl._create_default_https_context = ssl._create_unverified_context url = r"http://t.weather.sojson.com/api/weather/city/101010100" page = urllib.request.urlopen(url) html = page.read().decode("utf-8") res = json.loads(html) a = json.dumps(res, ensure_ascii=False, indent=4) res = json.loads(a) today = res['data'] tomorrow = res['data']['forecast'][1] text = "今天是%s年%s月%s日,\ 温度为%s度,当前湿度%s\ ,空气质量%s,\ \ 明天\ 温度为%s,气温%s\ 天气状况%s,温馨提示%s,今天的播报\ 就到这里,感谢大家的收听" %\ (res['date'][0:4],res['date'][5],res['date'][6:8],\ today['wendu'],today['shidu']\ ,today['quality']\ #明天天气播报 ,tomorrow['high'],tomorrow['low'] ,tomorrow['type'],tomorrow['notice']) APP_ID = "11687967" API_KEY = "GEjlCy7qc2yl9quEeVlscuPk" SECRET_KEY = "4u6nQpIDBVSVtqC4X2rb4IH8K1mQlxaB" client = AipSpeech(APP_ID, API_KEY, SECRET_KEY) result = client.synthesis(text, options={ 'vol': 15, "per": 0, "spd": 5 }) if not isinstance(result, dict): with open(r'D://audio.mp3', 'wb') as f: f.write(result) else: print(result) playsound.playsound("D://audio.mp3") continue
def playVideoFromFile(self, fileName='data/rick', fullscreen=False): # load video file video = cv2.VideoCapture(fileName) # calculate delay base on the video's frame rate fps = video.get(cv2.CAP_PROP_FPS) delay = int(1000 / (fps + 1)) # load audio from the video file audio = MediaPlayer(fileName) while video.isOpened(): ret, frame = video.read() # Play sound audio.get_frame() # If still have frame to retrive if ret: if fullscreen: cv2.namedWindow('output', cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty('output', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.imshow('output', frame) if cv2.waitKey(delay) == ord('q'): break else: break video.release() cv2.destroyAllWindows()
def Slidemoved(self): if self.flag == False: self.timer.start() self.steptimer.start() self.player = MediaPlayer("%s" % self.playitem, ff_opts={'ss': self.step}) self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停,快捷键“Space”')
def music(name): musicfile = name player = MediaPlayer(musicfile) val = '' while val != 'eof': frame, val = player.get_frame() if val != 'eof' and frame is not None: img, t = frame
def extract_audio(self): fname = _G.FullAudioFilename if not os.path.exists(fname): v = mp.VideoFileClip(self.src) v.audio.write_audiofile(fname) self.audio = MediaPlayer(_G.FullAudioFilename) self.audio.toggle_pause() print("Audio loaded")
def play_next_step(): if NEXT_STEPS: f = NEXT_STEPS.pop(0) # Call the first function and remove it from the FIFO scenario.display_good_feedback() song = MediaPlayer(CORRECT_SOUND) sleep(1) f() else: print("Trying to call a next step but there is none", file=sys.stderr, flush=True)
def Play(self): if self.flag: try: self.playitem = self.l[self.list.currentRow()] if os.path.isfile("%s" % self.playitem): self.player = MediaPlayer("%s" % self.playitem) self.timer = QTimer() self.timer.start(50) self.timer.timeout.connect(self.Show) self.steptimer = QTimer() self.steptimer.start(1000) self.steptimer.timeout.connect(self.Step) self.flag = False self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停,快捷键“Space”') else: QMessageBox.warning(self, '错误', '找不到要播放的文件!') except: QMessageBox.warning(self, '错误', '找不到要播放的文件!') else: if self.l[self.list.currentRow()] == self.playitem: self.player.toggle_pause() if self.player.get_pause(): self.timer.stop() self.steptimer.stop() self.bplay.setIcon(QIcon(r'img\play.png')) self.bplay.setToolTip('播放,快捷键“Space”') else: self.timer.start() self.steptimer.start() self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停,快捷键“Space”') else: self.playitem = self.l[self.list.currentRow()] if os.path.isfile("%s" % self.playitem): self.step = 0 self.stime.setValue(0) self.player = MediaPlayer("%s" % self.playitem) self.timer.start() self.steptimer.start() self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停,快捷键“Space”') else: QMessageBox.warning(self, '错误', '找不到要播放的文件!')
def Fastforward(self): self.step += 10 if self.step >= int(self.mediatime): self.stime.setValue(int(self.mediatime)) self.timer.start() self.steptimer.start() self.player = MediaPlayer("%s" % self.playitem, ff_opts={'ss': self.step}) self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停')
def Fastback(self): self.step -= 10 if self.step <= 0: self.step = 0 self.timer.start() self.steptimer.start() self.player = MediaPlayer("%s" % self.playitem, ff_opts={'ss': self.step}) self.bplay.setIcon(QIcon(r'img\pause.png')) self.bplay.setToolTip('暂停')
def viewCam(self): start_time = datetime.datetime.now() num_frames = 0 im_width, im_height = (self.cap.get(3), self.cap.get(4)) # max number of hands we want to detect/track num_hands_detect = 2 # Expand dimensions since the model expects images to have shape: [1, None, None, 3] ret, image_np = self.cap.read() # image_np = cv2.flip(image_np, 1) try: image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB) except: print("Error converting to RGB") # Actual detection. Variable boxes contains the bounding box cordinates for hands detected, # while scores contains the confidence for each of these boxes. # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold) boxes, scores = detector_utils.detect_objects(image_np, detection_graph, sess) # draw bounding boxes on frame detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh, scores, boxes, im_width, im_height, image_np) # Calculate Frames per second (FPS) num_frames += 1 elapsed_time = (datetime.datetime.now() - start_time).total_seconds() fps = num_frames / elapsed_time detector_utils.draw_fps_on_image("FPS : " + str(int(fps)), image_np) # cv2.imshow('Single-Threaded Detection', # cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)) # convert image to RGB format image = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB) # get image infos height, width, channel = image_np.shape step = channel * width # create QImage from image qImg = QImage(image_np.data, width, height, step, QImage.Format_RGB888) # show image in img_label #audio #audio_frame, val = self.player.get_frame() if self.i: self.player = MediaPlayer("vid1.MP4") self.i = False self.player.get_frame() self.ui.image_label.setPixmap(QPixmap.fromImage(qImg))
def on_start(self): self.callback_ref = WeakMethod(self.callback) filename = sys.argv[1] logging.info('ffpyplayer: Playing file "{}"'.format(filename)) # try ff_opts = {'vf':'edgedetect'} http://ffmpeg.org/ffmpeg-filters.html ff_opts = {} self.ffplayer = MediaPlayer(filename, callback=self.callback_ref, loglevel=log_level, ff_opts=ff_opts) self._thread.start() self.keyboard = Window.request_keyboard(None, self.root) self.keyboard.bind(on_key_down=self.on_keyboard_down)