def main(): ctx = zmq.Context() sock = ctx.socket(zmq.PUB) sock.connect("tcp://127.0.0.1:3002") logfile = open( 'showarucoboard.py_log_%s.json' % datetime.datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S"), 'w') vs = VideoStream(src=1).start() pe = RealAntPoseEstimator() last_time = None while True: # Read latest image img, utcnowms = vs.read() # Compute fps fps = 0 if last_time != None: if last_time == utcnowms: continue fps = 1000 / (utcnowms - last_time) last_time = utcnowms # Estimate pose d, img = pe.get_pose(img, utcnowms) if d is not None: sock.send_json(d) d["sent"] = True print("fps %5.1f " % fps, end='') print( "- %d dist %2.1f x %1.3fm y %1.3fm z %1.3fm roll %3.0f pitch %3.0f yaw %3.0f" % tuple([ d[x] for x in [ "server_epoch_ms", "dist", "x", "y", "z", "roll", "pitch", "yaw" ] ]), end='') if "xvel" in d: print(" xvel%6.3f %6.3f y%6.3f z%6.3f" % (d["xvel"], d["xvel_raw"], d["yvel"], d["zvel"])) else: print() logfile.write(json.dumps(d) + '\n') cv2.imshow('ant', img) if cv2.waitKey(1) == 27: break # esc to quit vs.stop() cv2.destroyAllWindows()
from face_decorator import FaceDecorator from eye_aspect_ratio import EyeAspectRatio from config import args vs = VideoStream(args["path"]).start() workers = [ GrayConverter(), FaceDetector(), FaceMarker(args["landmark_model"]), EyeAspectRatio(), FaceDecorator() ] winname = args["display"] graphWin = "Graph" cv2.namedWindow(winname) cv2.moveWindow(winname, 200, 300) cv2.namedWindow(graphWin) cv2.moveWindow(graphWin, 900, 300) while True: frame = vs.read() if frame is None: break info = {} for w in workers: w.workon(frame, info) cv2.imshow(winname, info["output"]) cv2.imshow(graphWin, info["graph"]) key = cv2.waitKey(10) & 0xFF if key == ord('q') or key == 27: break elif key == ord(' '): cv2.waitKey(0) cv2.destroyAllWindows() vs.stop() vs.t.join()
t2 = cv2.getTickCount() time1 = (t2 - t1) / freq frame_rate_calc = 1 / time1 time_arr.append(time.time() - start_time) fps_arr.append(frame_rate_calc) if SHOW_OUTPUT: videostream.show_frame(bboxes) if SAVE_MODE: videostream.save_frame(bboxes) # Press 'q' to quit if cv2.waitKey(1) == ord('q'): break finally: # Clean up videostream.stop() servo.stop() print(f"Average fps = {sum(fps_arr)/len(fps_arr)}") import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (20, 10) plt.plot(time_arr, fps_arr) plt.xlabel("Time, s") plt.ylabel("Framerate, frame/s") plt.savefig("fps.png")
def recognize_video(detector, embedder: Embedder, recognizer: Recognizer, detector_params='default', source=0): # инициализация видеопотока print('Starting video stream...') vs = VideoStream(src=source).start() if not is_detector(detector): raise TypeError('Incorrect type of detector') # разогрев камеры time.sleep(0.5) # запуск оценщика пропускной способности FPS fps = FPS().start() # цикл по фреймам из видео while True: frame = vs.read() if detector_params == 'default': faces_roi, boxes = detector.calc_image(frame, return_mode='both') elif type(detector) == DetectorSSD: confidence = detector_params[0] faces_roi, boxes = detector.calc_image(frame, confidence=confidence, return_mode='both') elif type(detector) == DetectorVJ or type(detector) == DetectorLBP: [scale_factor, min_neighbors] = detector_params faces_roi, boxes = detector.calc_image(frame, scale_factor=scale_factor, min_neighbors=min_neighbors, return_mode='both') elif type(detector) == DetectorHOG or type(detector) == DetectorMMOD: upsampling_times = detector_params[0] faces_roi, boxes = detector.calc_image( frame, upsampling_times=upsampling_times, return_mode='both') for i in range(len(faces_roi)): embeddings = embedder.calc_face(faces_roi[i]) name = recognizer.recognize(embeddings) start_x, start_y, end_x, end_y = boxes[i] text = '{}'.format(name) y = start_y - 10 if start_y - 10 > 10 else start_y + 10 cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (0, 0, 255), 2) cv2.putText(frame, text, (start_x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) # обновление счетчика FPS fps.update() # показ выходного фрейма cv2.imshow('Frame', frame) key = cv2.waitKey(1) & 0xFF # завершение при нажатии 'q' if key == ord("q"): break fps.stop() print('Elasped time: {:.2f}'.format(fps.elapsed())) print('Approx. FPS: {:.2f}'.format(fps.fps())) cv2.destroyAllWindows() vs.stop()
def main(): ''' Main method ''' global RUNNING # Create a socket and connect to the server s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) PRINT('Connected to ' + ENC_VALUE(HOST + ':' + str(PORT)) + '.', SUCCESS) # connect to the arduino via serial ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1) ser.flush() cr = '\n' # Start the camera video thread stream = VideoStream().start() last_serial_time = time.time( ) # stores the last time a serial value was sent while RUNNING: # Get the current frame read by the video stream try: stream_frame = stream.read() _, frame = cv2.imencode('.jpg', stream_frame, ENCODE_PARAM) # Send data send(s, [frame]) except Exception as e: # Prints Error PRINT(str(e), ERROR) # Recieve data recv_data = recv(s) print(recv_data[1]) # Check if a command was sent if recv_data[ DATA_IDX_COMMAND] == COMMAND_QUIT: # If quit command was recieved RUNNING = false PRINT('Recieved command ' + ENC_VALUE(COMMAND_QUIT) + '.', INFO) joy_vrt = round(4 * (1 - 0)) joy_fwd = round(4 * (1 - 0)) joy_rot = round(4 * (1 + 0)) submit = str(joy_vrt * 100 + joy_fwd * 10 + joy_rot) ser.write(submit.encode('utf-8')) ser.write(cr.encode('utf-8')) RUNNING = False elif time.time() - last_serial_time >= 1: if recv_data and len( recv_data[1]) == 3: # checks if recv data is empty print('running' + str(recv_data)) joy_vrt = round(4 * (1 - recv_data[1][1][3])) joy_fwd = round(4 * (1 - recv_data[1][1][1])) joy_rot = round(4 * (1 + recv_data[1][1][2])) submit = str(joy_vrt * 100 + joy_fwd * 10 + joy_rot) print(submit) ser.write(submit.encode('utf-8')) ser.write(cr.encode('utf-8')) line = ser.readline().decode('utf-8').rstrip() print(line) last_serial_time = time.time() s.close() # Closes socket stream.stop() # Stops stream PRINT('Quit.', SUCCESS)
class VideoWidget(QtGui.QWidget): newFrame = QtCore.pyqtSignal(np.ndarray) sourceChanged = QtCore.pyqtSignal() stateChanged = QtCore.pyqtSignal(int) def __init__(self, mainWindow, parent=None): super(VideoWidget, self).__init__(parent) self.mainWindow = mainWindow self.videoStream = VideoStream(0) self.videoStream.newFrame.connect(self.onNewFrame) self.videoStream.sourceChanged.connect(self.onSourceChanged) self.videoStream.stateChanged.connect(self.onStateChanged) self.inputFilename = None self.outputFilename = None self.setupUi() self.setRecordingControlsState() self.switchToWebCamBtn.setChecked(True) self.play() def setupUi(self): self.videoScreen = VideoScreen(self.videoStream, self, self) self.fileWidget = FileWidget(self.mainWindow, self, 'Set input') self.fileWidget.setObjectName(_fromUtf8("fileWidget")) self.fileWidget.fileChanged.connect(self.onInputChanged) self.switchToWebCamBtn = QtGui.QPushButton(QtGui.QIcon("webcam.png"), "", self) self.switchToWebCamBtn.setStyleSheet("margin-right: 7px; padding: 6px") self.switchToWebCamBtn.clicked.connect(self.onSwitchToWebCamBtnClicked) self.switchToWebCamBtn.setFlat(True) self.switchToWebCamBtn.setCheckable(True) self.spacer1 = QtGui.QWidget() self.spacer1.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) self.playAction = QtGui.QAction( self.style().standardIcon(QtGui.QStyle.SP_MediaPlay), "Play", self, shortcut="Ctrl+P", enabled=True) self.playAction.triggered.connect(self.play) self.pauseAction = QtGui.QAction( self.style().standardIcon(QtGui.QStyle.SP_MediaPause), "Pause", self, shortcut="Ctrl+A", enabled=True) self.pauseAction.triggered.connect(self.pause) self.stopAction = QtGui.QAction( self.style().standardIcon(QtGui.QStyle.SP_MediaStop), "Stop", self, shortcut="Ctrl+S", enabled=True) self.stopAction.triggered.connect(self.stop) self.inputBar = QtGui.QToolBar(self) self.inputBar.addWidget(self.fileWidget) self.inputBar.addWidget(self.spacer1) self.inputBar.addAction(self.playAction) self.inputBar.addAction(self.pauseAction) self.inputBar.addAction(self.stopAction) self.inputBar.addWidget(self.switchToWebCamBtn) self.saveAsWidget = FileWidget(self.mainWindow, self, 'Set output', True) self.saveAsWidget.fileChanged.connect(self.onOutputChanged) self.recordAction = QtGui.QAction( QtGui.QIcon("record.png"), "Record", self, shortcut="Ctrl+R", enabled=True) self.recordAction.triggered.connect(self.record) self.recordAction.setVisible(True) self.recordStopAction = QtGui.QAction( QtGui.QIcon("recording.png"), "Stop recording", self, shortcut="Ctrl+R", enabled=True) self.recordStopAction.triggered.connect(self.recordStop) self.recordStopAction.setVisible(False) self.spacer2 = QtGui.QWidget() self.spacer2.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) self.processBtn = QtGui.QPushButton("Process", self) self.processBtn.setStyleSheet("margin-right: 7px; padding: 5px; width: 140px") self.processBtn.clicked.connect(self.onProcessBtnClicked) self.outputBar = QtGui.QToolBar(self) self.outputBar.addWidget(self.saveAsWidget) self.outputBar.addWidget(self.spacer2) self.outputBar.addAction(self.recordAction) self.outputBar.addAction(self.recordStopAction) self.outputBar.addWidget(self.processBtn) hbox = QtGui.QVBoxLayout(self) hbox.addWidget(self.videoScreen) hbox.addStretch() hbox.addWidget(self.inputBar) hbox.addWidget(self.outputBar) hbox.setContentsMargins(-1, -1, -1, 2) def onProcessBtnClicked(self): self.play() self.videoStream.record() def onSwitchToWebCamBtnClicked(self): if self.switchToWebCamBtn.isChecked(): self.videoStream.resetSource(0) self.play() else: self.videoStream.resetSource() def onInputChanged(self, filename): self.inputFilename = filename self.switchToWebCamBtn.setChecked(False) self.setRecordingControlsState() self.videoStream.resetSource(str(filename)) def onOutputChanged(self, filename): self.outputFilename = filename self.setRecordingControlsState() self.videoStream.resetOutput(str(filename)) def pause(self): self.videoStream.pause() def play(self): self.videoStream.play() def stop(self): self.videoStream.stop() def record(self): self.recordAction.setVisible(False) self.recordStopAction.setVisible(True) self.videoStream.record() def recordStop(self): self.recordAction.setVisible(True) self.recordStopAction.setVisible(False) self.videoStream.recordStop() @QtCore.pyqtSlot(np.ndarray) def onNewFrame(self, frame): self.newFrame.emit(frame) def onSourceChanged(self): self.sourceChanged.emit() @QtCore.pyqtSlot(int) def onStateChanged(self, state): if state == VideoStream.State.CLOSED: self.setClosedState() elif state == VideoStream.State.STOPPED: self.setStoppedState() elif state == VideoStream.State.PAUSED: self.setPausedState() elif state == VideoStream.State.PLAYING: self.setPlayingState() self.stateChanged.emit(state) def setClosedState(self): self.playAction.setEnabled(False) self.pauseAction.setEnabled(False) self.stopAction.setEnabled(False) def setStoppedState(self): self.playAction.setEnabled(True) self.pauseAction.setEnabled(False) self.stopAction.setEnabled(False) def setPausedState(self): self.playAction.setEnabled(True) self.pauseAction.setEnabled(False) self.stopAction.setEnabled(True) def setPlayingState(self): self.playAction.setEnabled(False) self.pauseAction.setEnabled(True) self.stopAction.setEnabled(True) def setRecordingControlsState(self): self.recordAction.setEnabled(bool(self.outputFilename)) self.recordStopAction.setEnabled(bool(self.outputFilename)) self.processBtn.setEnabled(bool(self.outputFilename) and bool(self.inputFilename))
class VideoWidget(QtGui.QWidget): newFrame = QtCore.pyqtSignal(np.ndarray) sourceChanged = QtCore.pyqtSignal() stateChanged = QtCore.pyqtSignal(int) def __init__(self, mainWindow, parent=None): super(VideoWidget, self).__init__(parent) self.mainWindow = mainWindow self.videoStream = VideoStream(0) self.videoStream.newFrame.connect(self.onNewFrame) self.videoStream.sourceChanged.connect(self.onSourceChanged) self.videoStream.stateChanged.connect(self.onStateChanged) self.inputFilename = None self.outputFilename = None self.setupUi() self.setRecordingControlsState() self.switchToWebCamBtn.setChecked(True) self.play() def setupUi(self): self.videoScreen = VideoScreen(self.videoStream, self, self) self.fileWidget = FileWidget(self.mainWindow, self, 'Set input') self.fileWidget.setObjectName(_fromUtf8("fileWidget")) self.fileWidget.fileChanged.connect(self.onInputChanged) self.switchToWebCamBtn = QtGui.QPushButton(QtGui.QIcon("webcam.png"), "", self) self.switchToWebCamBtn.setStyleSheet("margin-right: 7px; padding: 6px") self.switchToWebCamBtn.clicked.connect(self.onSwitchToWebCamBtnClicked) self.switchToWebCamBtn.setFlat(True) self.switchToWebCamBtn.setCheckable(True) self.spacer1 = QtGui.QWidget() self.spacer1.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) self.playAction = QtGui.QAction(self.style().standardIcon( QtGui.QStyle.SP_MediaPlay), "Play", self, shortcut="Ctrl+P", enabled=True) self.playAction.triggered.connect(self.play) self.pauseAction = QtGui.QAction(self.style().standardIcon( QtGui.QStyle.SP_MediaPause), "Pause", self, shortcut="Ctrl+A", enabled=True) self.pauseAction.triggered.connect(self.pause) self.stopAction = QtGui.QAction(self.style().standardIcon( QtGui.QStyle.SP_MediaStop), "Stop", self, shortcut="Ctrl+S", enabled=True) self.stopAction.triggered.connect(self.stop) self.inputBar = QtGui.QToolBar(self) self.inputBar.addWidget(self.fileWidget) self.inputBar.addWidget(self.spacer1) self.inputBar.addAction(self.playAction) self.inputBar.addAction(self.pauseAction) self.inputBar.addAction(self.stopAction) self.inputBar.addWidget(self.switchToWebCamBtn) self.saveAsWidget = FileWidget(self.mainWindow, self, 'Set output', True) self.saveAsWidget.fileChanged.connect(self.onOutputChanged) self.recordAction = QtGui.QAction(QtGui.QIcon("record.png"), "Record", self, shortcut="Ctrl+R", enabled=True) self.recordAction.triggered.connect(self.record) self.recordAction.setVisible(True) self.recordStopAction = QtGui.QAction(QtGui.QIcon("recording.png"), "Stop recording", self, shortcut="Ctrl+R", enabled=True) self.recordStopAction.triggered.connect(self.recordStop) self.recordStopAction.setVisible(False) self.spacer2 = QtGui.QWidget() self.spacer2.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) self.processBtn = QtGui.QPushButton("Process", self) self.processBtn.setStyleSheet( "margin-right: 7px; padding: 5px; width: 140px") self.processBtn.clicked.connect(self.onProcessBtnClicked) self.outputBar = QtGui.QToolBar(self) self.outputBar.addWidget(self.saveAsWidget) self.outputBar.addWidget(self.spacer2) self.outputBar.addAction(self.recordAction) self.outputBar.addAction(self.recordStopAction) self.outputBar.addWidget(self.processBtn) hbox = QtGui.QVBoxLayout(self) hbox.addWidget(self.videoScreen) hbox.addStretch() hbox.addWidget(self.inputBar) hbox.addWidget(self.outputBar) hbox.setContentsMargins(-1, -1, -1, 2) def onProcessBtnClicked(self): self.play() self.videoStream.record() def onSwitchToWebCamBtnClicked(self): if self.switchToWebCamBtn.isChecked(): self.videoStream.resetSource(0) self.play() else: self.videoStream.resetSource() def onInputChanged(self, filename): self.inputFilename = filename self.switchToWebCamBtn.setChecked(False) self.setRecordingControlsState() self.videoStream.resetSource(str(filename)) def onOutputChanged(self, filename): self.outputFilename = filename self.setRecordingControlsState() self.videoStream.resetOutput(str(filename)) def pause(self): self.videoStream.pause() def play(self): self.videoStream.play() def stop(self): self.videoStream.stop() def record(self): self.recordAction.setVisible(False) self.recordStopAction.setVisible(True) self.videoStream.record() def recordStop(self): self.recordAction.setVisible(True) self.recordStopAction.setVisible(False) self.videoStream.recordStop() @QtCore.pyqtSlot(np.ndarray) def onNewFrame(self, frame): self.newFrame.emit(frame) def onSourceChanged(self): self.sourceChanged.emit() @QtCore.pyqtSlot(int) def onStateChanged(self, state): if state == VideoStream.State.CLOSED: self.setClosedState() elif state == VideoStream.State.STOPPED: self.setStoppedState() elif state == VideoStream.State.PAUSED: self.setPausedState() elif state == VideoStream.State.PLAYING: self.setPlayingState() self.stateChanged.emit(state) def setClosedState(self): self.playAction.setEnabled(False) self.pauseAction.setEnabled(False) self.stopAction.setEnabled(False) def setStoppedState(self): self.playAction.setEnabled(True) self.pauseAction.setEnabled(False) self.stopAction.setEnabled(False) def setPausedState(self): self.playAction.setEnabled(True) self.pauseAction.setEnabled(False) self.stopAction.setEnabled(True) def setPlayingState(self): self.playAction.setEnabled(False) self.pauseAction.setEnabled(True) self.stopAction.setEnabled(True) def setRecordingControlsState(self): self.recordAction.setEnabled(bool(self.outputFilename)) self.recordStopAction.setEnabled(bool(self.outputFilename)) self.processBtn.setEnabled( bool(self.outputFilename) and bool(self.inputFilename))