def __init__(self): pygame.init() self.screen = pygame.display.set_mode([640, 480]) self.sysfont = pygame.font.SysFont(None, 60) self.life_image = pygame.image.load("image/heart.png") self.start() self.running = True fps = FPS() while self.running: fps.start() for event in pygame.event.get(): if event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE): self.running = False self.update() pygame.display.update() fps.end() delay = int(FRAME_RATE - fps.get_time() * 1000) if delay < 0: delay = 0 pygame.time.delay(delay) # 60 FPS pygame.quit()
class MainWindow(QMainWindow): ########################################################### ########################################################### ########################################################### #Constructor def __init__(self, vid_path, src=0, src2=1, *args, **kwargs): super(MainWindow, self).__init__(*args, **kwargs) ########################################################### ########################################################### ########################################################### #Camera params self.counter = 0 self.fps = FPS() self.sp = vid_path self.stream = cv2.VideoCapture(src) self.stream2 = cv2.VideoCapture(src2) self.default_fps = self.stream.get(cv2.CAP_PROP_FPS) self.default_fps2 = self.stream2.get(cv2.CAP_PROP_FPS) self.cam_size = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))) self.cam_size2 = (int(self.stream2.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream2.get(cv2.CAP_PROP_FRAME_HEIGHT))) self.writer = cv2.VideoWriter(self.sp + 'outputVid.avi', cv2.VideoWriter_fourcc(*'XVID'), self.default_fps, self.cam_size) self.writer2 = cv2.VideoWriter(self.sp + 'outputVid2.avi', cv2.VideoWriter_fourcc(*'XVID'), self.default_fps2, self.cam_size2) self.cam_ind = src self.cam_ind2 = src2 self.stopped = False ########################################################### ########################################################### ########################################################### #GUI configuration self.disply_width = 500 self.display_height = 350 self.setWindowTitle("Qt live label demo") self.layout_base = QVBoxLayout() self.layout_h = QHBoxLayout() self.camera1 = QLabel("webcam1") self.camera2 = QLabel("webcam2") start_b = QPushButton("Start Video") stop_b = QPushButton("Stop Video") start_b.pressed.connect(self.start_record) stop_b.pressed.connect(self.stop_record) self.layout_h.addWidget(self.camera1) self.layout_h.addWidget(self.camera2) self.layout_base.addWidget(start_b) self.layout_base.addWidget(stop_b) self.layout_base.addLayout(self.layout_h) self.layout_h.setSpacing(15) self.widget = QWidget() self.widget.setLayout(self.layout_base) self.setCentralWidget(self.widget) self.camera1.resize(self.disply_width, self.display_height) self.camera2.resize(self.disply_width, self.display_height) self.show() self.threadpool = QThreadPool() print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount()) print("Cam 1 fps&size:", self.default_fps, self.cam_size) print("Cam 2 fps&size:", self.default_fps2, self.cam_size2) self.worker = Worker() self.timer = QTimer() self.timer.setInterval(1000) self.timer.timeout.connect(self.recurring_timer) self.timer.start() def progress_fn(self, n): print("%d%% done" % n) def thread_fc(self): while True: # if the thread indicator variable is set, stop the thread if self.stopped == True: print("[Record-Info] elasped time: {:.2f}".format( self.fps.elapsed())) print("[Record-Info] approx. FPS: {:.2f}".format( self.fps.fps())) self.stream.release() self.stream2.release() self.writer.release() self.writer2.release() cv2.destroyAllWindows() return "Done" # otherwise, read the next frame from the stream (self.success, self.frame) = self.stream.read() (self.success2, self.frame2) = self.stream2.read() self.fps.update() if self.success: self.writer.write(self.frame) if self.success2: self.writer2.write(self.frame2) self.worker.signals.change_pixmap_signal.emit( [self.frame, self.frame2]) def print_output(self, s): print("output", s) def thread_complete(self): print("Thread complete") def convert_cv_qt(self, cv_img): """Convert from an opencv image to QPixmap""" rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w convert_to_Qt_format = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888) p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio) return QPixmap.fromImage(p) @pyqtSlot(np.ndarray) def update_image(self, cv_img): """Updates the image_label with a new opencv image""" qt_img = self.convert_cv_qt(cv_img[0]) qt_img2 = self.convert_cv_qt(cv_img[1]) self.camera1.setPixmap(qt_img) self.camera2.setPixmap(qt_img2) def execute_thread(self): # Pass the function to execute self.worker.begin_job( self.thread_fc ) # Any other args, kwargs are passed to the run function self.worker.signals.change_pixmap_signal.connect(self.update_image) self.worker.signals.result.connect(self.print_output) self.worker.signals.finished.connect(self.thread_complete) self.worker.signals.progress.connect(self.progress_fn) # Execute self.threadpool.start(self.worker) return self def start_record(self): print('start recording!', self.stopped) self.fps.start() if self.stopped == True: self.worker = Worker() self.stream = cv2.VideoCapture(self.cam_ind) self.stream2 = cv2.VideoCapture(self.cam_ind2) self.writer = cv2.VideoWriter(self.sp + 'outputVid.avi', cv2.VideoWriter_fourcc(*'XVID'), self.default_fps, self.cam_size) self.writer2 = cv2.VideoWriter(self.sp + 'outputVid2.avi', cv2.VideoWriter_fourcc(*'XVID'), self.default_fps2, self.cam_size2) self.stopped = False self.worker.open_record_loop() self.execute_thread() def stop_record(self): # indicate that the thread should be stopped print("Stop recording!") self.stopped = True self.worker.close_record_loop() self.fps.stop() self.execute_thread() def recurring_timer(self): self.counter += 1
# start the capture (on camera channel 0) thread cap = WebcamVideoStream(src=0).start() # wait one second for everything to settle before reading first frame time.sleep(1) # create a video of the tracking timestamp = "{:%Y_%m_%d_%H_%M}".format(datetime.now()) fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') out = cv2.VideoWriter('recordings/video' + timestamp + '.mov', fourcc, 30, (612, 425), True) # start FPS counter fps = FPS() fps.start() # record time timerStart = datetime.now() while True: # capture a frame frame = cap.read() # undistort and crop the frame # cv2.undistort() is slow so we use a remapping # undistorted = cv2.undistort(frame, mtx, dist, None, newcameramtx) undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR) x, y, w, h = roi cropped = undistorted[y:y + h, x:x + w]