def from_stream(): fps = FPS().start() cam = WebcamVideoStream().start() max_frames = 50 i = 0 while True: frame = cam.read() if i > max_frames: fps.stop() print(fps.elapsed()) print(fps.fps()) break i += 1 testcone(frame, stream=True) fps.update() cv2.imshow('', frame) cv2.waitKey(1)
def threaded_video_test(): # created a *threaded* video stream, allow the camera sensor to warmup, # and start the FPS counter print("[INFO] sampling THREADED frames from webcam...") vs = WebcamVideoStream(src=SRC).start() fps = FPS().start() # loop over some frames...this time using the threaded stream while fps._numFrames < NUM_FRAMES: # grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=VID_WIDTH) # check to see if the frame should be displayed to our screen if DISPLAY: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # update the FPS counter fps.update() # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
def blocking_video_test(): # grab a pointer to the video stream and initialize the FPS counter print("[INFO] sampling frames from webcam...") stream = cv2.VideoCapture(SRC) fps = FPS().start() # loop over some frames while fps._numFrames < 1000: # grab the frame from the stream and resize it to have a maximum # width of 400 pixels (grabbed, frame) = stream.read() frame = imutils.resize(frame, width=VID_WIDTH) # check to see if the frame should be displayed to our screen if DISPLAY: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # update the FPS counter fps.update() # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # do a bit of cleanup stream.release() cv2.destroyAllWindows()
class ALPRCamera: def __init__(self, camera, db_service, alpr_config, alpr_run_time, gui): self.stopped = False self.camera_name = camera.name self.cam = videoStream(src=camera.url) self.guiFPS = FPS() self.gui = gui self.detection_boxes = [] for search_box in camera.aoi_list: for search_box_name in search_box: new_box = detectionBox(camera.name, search_box_name, search_box[search_box_name], self.cam, alpr_config, alpr_run_time, db_service) self.detection_boxes.append(new_box) def get_frame(self): frame = self.cam.read() self.guiFPS.update() for box in self.detection_boxes: frame = box.draw(frame) return frame def is_alive(self): return not self.stopped def start(self): Thread(target=self.run, args=()).start() return self def stop(self): self.stopped = True def run(self): self.cam.start() self.guiFPS.start_timer() for box in self.detection_boxes: box.start() # main loop while not self.stopped: continue for box in self.detection_boxes: box.stop() self.guiFPS.stop() if self.gui: print(self.camera_name + " gui", self.guiFPS.fps()) self.cam.stop() cv2.destroyAllWindows() return
class VideoPlayer: def __init__(self, source=0, dest=None): self._source = source self._dest = dest self._frame = None self._playing = False self._fps = FPS() def start(self): self._cap = cv2.VideoCapture(self._source) self._cap.set(3, 640) self._cap.set(4, 640) if self._dest is not None: width = int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5) height = int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5) fps = int(self._cap.get(cv2.CAP_PROP_FPS)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') self._out = cv2.VideoWriter(self._dest, fourcc, fps, (width, height)) self._playing = True self._fps.start() while self._playing: self.read_frame() self.process_frame() self.write_frame() self._fps.stop() print(self._fps.fps()) def stop(self): self._playing = False self._cap.release() self._out.release() cv2.destroyAllWindows() def read_frame(self): ret, frame = self._cap.read() self._frame = frame self._fps.update() def process_frame(self): pass def write_frame(self): self.show_frame() if self._dest is not None: self.save_frame() def show_frame(self): cv2.imshow('Video', self._frame) if cv2.waitKey(1) & 0xFF == ord('q'): self.stop() def save_frame(self): self._out.write(self._frame)
class VideoStreamer(object): def __init__(self): self.fps = FPS().start() def get_fps(self): self.fps.stop() fps = self.fps.fps() self.fps.start() return fps def get_frame(self): raise NotImplementedError("Choose a video streamer from the available ones " "e.g., CV2VideoStreamer or ROSVideoStreamer")
def test01(): """can we instantiate? """ fps = FPS().start() pacer = Pacer(DESIRED_FPS).start() while fps.n_frames < N_TEST_FRAMES: print(datetime.datetime.now()) fps.update() pacer.update() fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) print("[INFO] n_frames: %i" % fps.n_frames)
color = (0, 0, 255) if judge_side_face(landm): color = (0, 255, 0) cv2.rectangle(frame, (b[0], b[1]), (b[2], b[3]), color, 2) cx = b[0] cy = b[1] + 12 cv2.putText(frame, text, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255)) # landms landm = landm.astype(np.int32) cv2.circle(frame, tuple(landm[0]), 1, (0, 0, 255), 4) cv2.circle(frame, tuple(landm[1]), 1, (0, 255, 255), 4) cv2.circle(frame, tuple(landm[2]), 1, (255, 0, 255), 4) cv2.circle(frame, tuple(landm[3]), 1, (0, 255, 0), 4) cv2.circle(frame, tuple(landm[4]), 1, (255, 0, 0), 4) fps.update() text_fps = "FPS: {:.3f}".format(fps.get_fps_n()) cv2.putText(frame, text_fps, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) cv2.imshow("frame", imutils.resize(frame, height=1000)) key = cv2.waitKey(1) & 0xff if key == ord("q"): break elif key == ord("c"): cv2.imwrite("cap.jpg", frame) cv2.destroyAllWindows() fps.stop() print("Total FPS: {}".format(fps.fps())) vs.stop()
def run(self): #Activate Detector module self.detector = VideoInferencePage() # Create a VideoCapture object and read from input file # If the input is the camera, pass 0 instead of the video file name cap = cv2.VideoCapture(self.video_address) fps = None new_detected=[] # Check if camera opened successfully if (cap.isOpened()== False): print("Error opening video stream or file") # Read until video is completed while(cap.isOpened() or shouldrun): # Capture frame-by-frame ret, frame = cap.read() global baseimageupd if ret == True: if not self.detector.isready(): continue if not fps: fps = FPS().start() elif fps.elapsed()>60: fps = FPS().start() if state=="take_off" and float(var_altitude) >= 10 and baseimageupd==False: #print ("hahah2") object_image = frame baseimageupd = True #cv2.imwrite("/media/ibrahim/Data/faster-rcnn/tools/img/baseimage.jpg",object_image) image = self.image_resize(object_image, height=300) retval, buffer = cv2.imencode('.png', image) #print ("hahah22") image_base64 = base64.b64encode(buffer) self.newdetected.emit(image_base64) #print ("hahah23") #feed the detector and wait for true result self.detector.send_frame(frame) result=self.detector.get_result() #Uncomment this if want to bypass the detector #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if not isinstance(result, np.ndarray): continue # Display the resulting frame convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888) p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio) self.newimage.emit(p) #self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps())) passobject = self.detector.get_passingobject() #passobject = [] if len(new_detected)<len(passobject): for objectID in passobject.keys(): if not objectID in new_detected: new_detected.append(objectID) #image parsing to base64 #print (passobject[objectID]['image']) try: image = self.image_resize(passobject[objectID]['image'], height=300) label = (passobject[objectID]['label']) retval, buffer = cv2.imencode('.png', image) image_base64 = base64.b64encode(buffer) self.newinv.emit(image_base64, label) except Exception as e: print ("\n*************\nMissing Image\n***************\n") continue ''' if passobject[objectID]['image'] != []: image = self.image_resize(passobject[objectID]['image'], height=300) label = (passobject[objectID]['label']) retval, buffer = cv2.imencode('.png', image) image_base64 = base64.b64encode(buffer) self.newinv.emit(image_base64, label) else: print ("\n*************\nMissing Image\n***************\n") continue ''' fps.update() self.new_fps.emit(int(fps.fps())) if self.detector.isobjectsupdated: objects = self.detector.get_objects() # Press Q on keyboard to exit if not shouldrun: fps.stop() self.detector.exit_detection() break # restart stream else: print ("ret is false") if fps: fps.stop() time.sleep(3) cap.release() cap = cv2.VideoCapture(self.video_address) if (cap.isOpened()== True) and fps: fps.start() # When everything done, release the video capture object cap.release() # Closes all the frames cv2.destroyAllWindows()
fps.start() timeOut = 0 while frames < 300: #print "Main Thread Queue - > ",frame_queue.qsize() with queue_lock: if not frame_queue.empty(): timeOut = 0 frame = frame_queue.get() sal = MPSaliency(frame) sal_map = sal.get_saliency_map() #sal_frame = (sal_map*255).round().astype(np.uint8) #frame = cv2.cvtColor(sal_frame, cv2.COLOR_GRAY2BGR) #out.write(frame) frames = frames + 1 fps.update() else: pass fps.stop() stream.terminate() cv2.destroyAllWindows() #out.release() print "FPS :: ", fps.fps() print "DUR :: ", fps.elapsed()
# grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=400) print(vs.readxOffset()) if (vs.readxOffset() > 30): ser.write(" rt > ".encode("utf-8")) elif ((vs.readxOffset() < 30) and (vs.readxOffset() > -30)): ser.write(" st > ".encode("utf-8")) else: ser.write(" lt > ".encode("utf-8")) # check to see if the frame should be displayed to our screen if args["display"] > 0: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # update the FPS counter fps.update() # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox) img = cv_plot_keypoints(img, pred_coords, confidence, class_IDs, bounding_boxs, scores) # 动作对比 angles = AngeleCal.cal(pred_coords, confidence) results = angeleCal.compare(angles).astype('U5') # 缩放示例视频并合并显示 width = int(img.shape[1]) height = int(width * frame2.shape[0] / frame2.shape[1]) frame2 = cv.resize(frame2, (width, height), cv.INTER_AREA) img = np.vstack((img, frame2)) cv_plot_image(img, upperleft_txt=FPS.fps(), upperleft_txt_corner=(10, 25), left_txt_list=results) # ESC键退出 if cv.waitKey(1) == 27: break ret1, frame1 = cap1.read() ret2, frame2 = cap2.read() cv.destroyAllWindows() cap1.release() cap2.release()
from fps import FPS faceCascade = cv2.CascadeClassifier('faceclassifier.xml') bodyCascade = cv2.CascadeClassifier('bodyclassifier.xml') cam = WebcamVideoStream() cam.startThread() fps = FPS() while True: img = cam.read() fps.start() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) fps.stop() cv2.putText(img, 'FPS:' + str(int(fps.fps())), (450, 60), cv2.FONT_HERSHEY_COMPLEX, 1.5, (255, 255, 255), 2, cv2.LINE_AA) fps.updateFrames() cv2.imshow("frame", img) if chr(cv2.waitKey(1) & 255) == 'q': break cam.stop() cv2.destroyAllWindows()
def run(self): #Activate Detector module self.detector = VideoInferencePage() # Create a VideoCapture object and read from input file # If the input is the camera, pass 0 instead of the video file name cap = cv2.VideoCapture(self.video_address) fps = None new_detected=[] # Check if camera opened successfully if (cap.isOpened()== False): print("Error opening video stream or file") # Read until video is completed while(cap.isOpened() or shouldrun): # Capture frame-by-frame ret, frame = cap.read() if ret == True: if not self.detector.isready(): continue if not fps: fps = FPS().start() elif fps.elapsed()>60: fps = FPS().start() #feed the detector and wait for true result self.detector.send_frame(frame) result=self.detector.get_result() #Uncomment this if want to bypass the detector #result=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if not isinstance(result, np.ndarray): continue # Display the resulting frame convertToQtFormat = QtGui.QImage(result.data, result.shape[1], result.shape[0], QtGui.QImage.Format_RGB888) p = convertToQtFormat.scaled(1260, 720, QtCore.Qt.KeepAspectRatio) self.emit(QtCore.SIGNAL('newImage(QImage)'), p) fps.update() self.emit(QtCore.SIGNAL('newFPS(int)'), int(fps.fps())) passobject = self.detector.get_passingobject() passobject = [] if len(new_detected)<len(passobject): for objectID in passobject.keys(): if not objectID in new_detected: new_detected.append(objectID) #image parsing to base64 image = self.image_resize(passobject[objectID]['image'], height=300) retval, buffer = cv2.imencode('.png', image) image_base64 = base64.b64encode(buffer) self.newdetected.emit(image_base64) # Press Q on keyboard to exit if not shouldrun: fps.stop() self.detector.exit_detection() break # restart stream else: print "ret is false" if fps: fps.stop() time.sleep(3) cap.release() cap = cv2.VideoCapture(self.video_address) if (cap.isOpened()== True) and fps: fps.start() # When everything done, release the video capture object cap.release() # Closes all the frames cv2.destroyAllWindows()
class PiVideoStream(QThread): signals = ObjectSignals() snapshotTaken = pyqtSignal() clipRecorded = pyqtSignal() camera = PiCamera() ## @param ins is the number of instances created. This may not exceed 1. ins = 0 def __init__(self): super().__init__() ## Instance limiter. Checks if an instance exists already. If so, it deletes the current instance. if PiVideoStream.ins >= 1: del self self.msg( "error; multiple instances of {:s} created, while only 1 instance is allowed" .format(__class__.__name__)) return try: PiVideoStream.ins += 1 except Exception as err: traceback.print_exc() self.signals.error.emit( (type(err), err.args, traceback.format_exc())) warnings.filterwarnings('default', category=DeprecationWarning) self.settings = QSettings("settings.ini", QSettings.IniFormat) self.loadSettings() self.initStream() def loadSettings(self): self.msg("info; loading camera settings from " + self.settings.fileName()) frame_size_str = self.settings.value('camera/frame_size') (width, height) = frame_size_str.split('x') self.camera.resolution = raw_frame_size((int(width), int(height))) self.camera.framerate = int(self.settings.value('camera/frame_rate')) self.camera.image_effect = self.settings.value('camera/effect') self.camera.shutter_speed = int( self.settings.value('camera/shutter_speed')) self.camera.iso = int(self.settings.value( 'camera/iso')) # should force unity analog gain # set parameters for speed frame_size_str = self.settings.value('image_frame_size') (width, height) = frame_size_str.split('x') self.image_size = (int(width), int(height)) self.camera.video_denoise = False self.monochrome = True self.use_video_port = True # dunno if setting awb mode manually is really useful ## self.camera.awb_mode = 'off' ## self.camera.awb_gains = 5.0 ## self.camera.meter_mode = 'average' ## self.camera.exposure_mode = 'auto' # 'sports' to reduce motion blur, 'off'after init to freeze settings @pyqtSlot() def initStream(self): # Initialize the camera stream if self.isRunning(): # in case init gets called, while thread is running self.msg("info; video stream is running already") else: # init camera and open stream if self.monochrome: ## self.camera.color_effects = (128,128) # return monochrome image, not required if we take Y frame only. self.rawCapture = PiYArray(self.camera, size=self.camera.resolution) self.stream = self.camera.capture_continuous( self.rawCapture, 'yuv', self.use_video_port) else: self.rawCapture = PiRGBArray(self.camera, size=self.camera.resolution) self.stream = self.camera.capture_continuous( self.rawCapture, 'bgr', self.use_video_port) # allocate memory self.frame = np.empty(self.camera.resolution + (1 if self.monochrome else 3, ), dtype=np.uint8) # restart thread self.start() self.wait_ms(1000) self.msg("info; video stream initialized with frame size = " + str(self.camera.resolution)) @pyqtSlot() def run(self): try: self.fps = FPS().start() for f in self.stream: if self.isInterruptionRequested(): self.signals.finished.emit() return ## self.rawCapture.truncate(0) # Depricated: clear the stream in preparation for the next frame self.rawCapture.seek(0) self.frame = f.array # grab the frame from the stream self.signals.ready.emit() self.signals.result.emit( cv2.resize( self.frame, self.image_size)) # resize to speed up processing self.fps.update() except Exception as err: traceback.print_exc() self.signals.error.emit( (type(err), err.args, traceback.format_exc())) @pyqtSlot() def stop(self): self.msg("info; stopping") if self.isRunning(): self.requestInterruption() self.wait_signal(self.signals.finished, 2000) self.fps.stop() self.quit() ## self.frame.fill(0) # clear frame, not allowed since frame is read-only? ## self.signals.ready.emit() ## self.signals.result.emit(np.zeros(self.image_size)) # could produce an information image here self.msg( "info; video stream stopped, approx. acquisition speed: {:.2f} fps" .format(self.fps.fps())) def msg(self, text): if text: text = self.__class__.__name__ + ";" + str(text) print(text) self.signals.message.emit(text) @pyqtSlot() def changeCameraSettings(self, frame_size=(640, 480), frame_rate=24, format='bgr', effect='none', use_video_port=False, monochrome=True): ''' The use_video_port parameter controls whether the camera’s image or video port is used to capture images. It defaults to False which means that the camera’s image port is used. This port is slow but produces better quality pictures. ''' self.stop() self.camera.resolution = raw_frame_size(frame_size) self.camera.framerate = frame_rate self.camera.image_effect = effect self.use_video_port = use_video_port self.monochrome = monochrome self.initStream() def wait_ms(self, timeout): ''' Block loop until timeout (ms) elapses. ''' loop = QEventLoop() QTimer.singleShot(timeout, loop.exit) loop.exec_() def wait_signal(self, signal, timeout=1000): ''' Block loop until signal emitted, or timeout (ms) elapses. ''' loop = QEventLoop() signal.connect(loop.quit) # only quit is a slot of QEventLoop QTimer.singleShot(timeout, loop.exit) loop.exec_() @pyqtSlot(str) def snapshot(self, filename_prefix=None): # open path (head, tail) = os.path.split(filename_prefix) if not os.path.exists(head): os.makedirs(head) filename = os.path.sep.join([ head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.png' ]) # write image self.wait_signal(self.signals.ready, 5000) # wait for first frame to be shot cv2.imwrite(filename, self.frame) self.msg("info; image written to " + filename) @pyqtSlot(str, int) def recordClip(self, filename_prefix=None, duration=10): # open path (head, tail) = os.path.split(filename_prefix) if not os.path.exists(head): os.makedirs(head) filename = os.path.sep.join([ head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.avi' ]) self.msg( "TODO; changing camera settings may get the process killed after several hours, probably better to open the stream in video resolution from the start if the videorecording is required!" ) # set video clip parameters frame_size_str = self.settings.value('camera/clip_frame_size') frame_size_str.split('x') frame_size = raw_frame_size((int(frame_size_str.split('x')[0]), int(frame_size_str.split('x')[1]))) frame_rate = int(self.settings.value('camera/clip_frame_rate')) self.changeCameraSettings(frame_size=frame_size, frame_rate=frame_rate, use_video_port=True, monochrome=False) # define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(filename, fourcc, frame_rate, frame_size) self.msg("info; start recording video to " + filename) # write file for i in range(0, duration * frame_rate): self.signals.progress.emit( int(100 * i / (duration * frame_rate - 1))) self.wait_signal(self.signals.ready, 1000) if self.frame is not None: out.write(self.frame) # close out.release() self.msg("info; recording done") ## self.camera.start_recording(filename) ## self.camera.wait_recording(duration) ## self.camera.stop_recording() # revert to original parameters self.loadSettings() self.initStream() self.clipRecorded.emit()
os.path.join(dst_dir, filename), fourcc, 30.0, (int(config['stream']['width']), int(config['stream']['height']))) # Start streaming vs.start() fps = FPS() fps.start() # TODO: Apply object dectection and face recognition here while vs.size() or not vs.stopped: frame = vs.read() out.write(frame) cv2.imshow("Streaming", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): vs.stop() fps.update() # Stop streaming fps.stop() vs.stop() # Release Resource out.release() cv2.destroyAllWindows() print("Elasped time: %s" % str(fps.elapsed())) print("Approximate FPS: %s" % str(fps.fps()))
class PiVideoStream(QThread): image = None finished = pyqtSignal() postMessage = pyqtSignal(str) frame = pyqtSignal(np.ndarray) progress = pyqtSignal(int) captured = pyqtSignal() camera = PiCamera() storagePath = None cropRect = [0] * 4 ## @param ins is the number of instances created. This may not exceed 1. ins = 0 def __init__(self): super().__init__() ## Instance limiter. Checks if an instance exists already. If so, it deletes the current instance. if PiVideoStream.ins >= 1: del self self.postMessage.emit( "{}: error; multiple instances of created, while only 1 instance is allowed" .format(__class__.__name__)) return try: PiVideoStream.ins += 1 except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args)) else: warnings.filterwarnings('default', category=DeprecationWarning) self.settings = QSettings("settings.ini", QSettings.IniFormat) self.loadSettings() ## self.initStream() def loadSettings(self): self.postMessage.emit( "{}: info; loading camera settings from {}".format( self.__class__.__name__, self.settings.fileName())) # load self.monochrome = self.settings.value('camera/monochrome', False, type=bool) self.use_video_port = self.settings.value('camera/use_video_port', False, type=bool) self.sensor_mode = int(self.settings.value('camera/sensor_mode')) # set frame sizes if self.sensor_mode == 0: self.frame_size = (4056, 3040) elif self.sensor_mode == 1: self.frame_size = (1920, 1080) elif self.sensor_mode == 2 or self.sensor_mode == 3: self.frame_size = (3280, 2464) elif self.sensor_mode == 4: self.frame_size = (1640, 1232) elif self.sensor_mode == 5: self.frame_size = (1640, 922) elif self.sensor_mode == 6: self.frame_size = (1280, 720) elif self.sensor_mode == 7: self.frame_size = (640, 480) else: raise ValueError frame_size_str = self.settings.value('display_frame_size') (width, height) = frame_size_str.split('x') self.display_frame_size = (int(width), int(height)) if not self.monochrome: self.display_frame_size = self.display_frame_size + (3, ) # set more camera parameters self.camera.resolution = self.frame_size self.camera.sensor_mode = self.sensor_mode self.camera.framerate = int(self.settings.value('camera/frame_rate')) self.camera.image_effect = self.settings.value('camera/effect') self.camera.shutter_speed = int( self.settings.value('camera/shutter_speed')) self.camera.iso = int(self.settings.value( 'camera/iso')) # should force unity analog gain self.camera.video_denoise = self.settings.value('camera/video_denoise', False, type=bool) # dunno if setting awb mode manually is really useful ## self.camera.awb_mode = 'off' ## self.camera.awb_gains = 5.0 ## self.camera.meter_mode = 'average' ## self.camera.exposure_mode = 'auto' # 'sports' to reduce motion blur, 'off'after init to freeze settings @pyqtSlot() def initStream(self): # Initialize the camera stream if self.isRunning(): # in case init gets called, while thread is running self.postMessage.emit( "{}: error; video stream is already running".format( __class__.__name__)) else: # init camera and open stream if self.monochrome: ## self.camera.color_effects = (128,128) # return monochrome image, not required if we take Y frame only. self.rawCapture = PiYArray(self.camera, size=self.camera.resolution) self.stream = self.camera.capture_continuous( self.rawCapture, 'yuv', self.use_video_port) else: self.rawCapture = PiRGBArray(self.camera, size=self.camera.resolution) self.stream = self.camera.capture_continuous( self.rawCapture, 'bgr', self.use_video_port) # allocate memory self.image = np.empty(self.camera.resolution + (1 if self.monochrome else 3, ), dtype=np.uint8) # init crop rectangle if self.cropRect[2] == 0: self.cropRect[2] = self.image.shape[1] if self.cropRect[3] == 0: self.cropRect[3] = self.image.shape[0] # restart thread self.start() wait_ms(1000) msg = "{}: info; video stream initialized with frame size = {} and {:d} channels".format(\ __class__.__name__, str(self.camera.resolution), 1 if self.monochrome else 3) self.postMessage.emit(msg) @pyqtSlot() def run(self): try: self.fps = FPS().start() for f in self.stream: if self.isInterruptionRequested(): self.finished.emit() return self.rawCapture.seek(0) self.image = f.array # grab the frame from the stream ## # Crop ## if (self.cropRect[2] > self.cropRect[0]) and (self.cropRect[3] > self.cropRect[1]): ## self.frame.emit(self.image[self.cropRect[0]:self.cropRect[2], self.cropRect[1]:self.cropRect[3]]) # Emit resized frame for speed self.frame.emit( cv2.resize(self.image, self.display_frame_size[:2])) self.fps.update() except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args)) @pyqtSlot() def stop(self): self.postMessage.emit("{}: info; stopping".format(__class__.__name__)) if self.isRunning(): self.requestInterruption() wait_signal(self.finished, 2000) self.fps.stop() msg = "{}: info; approx. processing speed: {:.2f} fps".format( self.__class__.__name__, self.fps.fps()) self.postMessage.emit(msg) print(msg) self.quit() @pyqtSlot(str) def takeImage(self, filename_prefix=None): if filename_prefix is not None: (head, tail) = os.path.split(filename_prefix) if not os.path.exists(head): os.makedirs(head) filename = os.path.sep.join([ head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.png' ]) else: filename = '{:016d}'.format(round(time.time() * 1000)) + '.png' # open path if self.storagePath is not None: filename = os.path.sep.join([self.storagePath, filename]) # write image wait_signal(self.frame, 5000) # wait for first frame to be shot cv2.imwrite(filename, self.image) self.captured.emit() self.postMessage.emit("{}: info; image written to {}".format( __class__.__name__, filename)) @pyqtSlot(str, int) def recordClip(self, filename_prefix=None, duration=10): # open path (head, tail) = os.path.split(filename_prefix) if not os.path.exists(head): os.makedirs(head) filename = os.path.sep.join([ head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.avi' ]) ##"TODO; changing camera settings may get the process killed after several hours, probably better to open the stream in video resolution from the start if the videorecording is required!") # set video clip parameters self.stop() frame_size_str = self.settings.value('camera/clip_frame_size') (width, height) = frame_size_str.split('x') self.camera.resolution = (int(width), int(height)) self.camera.sensor_mode = int( self.settings.value('camera/clip_sensor_mode')) self.camera.framerate = int( self.settings.value('camera/clip_frame_rate')) self.camera.image_effect = effect self.use_video_port = True self.monochrome = True self.initStream() # define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(filename, fourcc, frame_rate, frame_size) self.msg("info; start recording video to " + filename) # write file for i in range(0, duration * frame_rate): self.progress.emit(int(100 * i / (duration * frame_rate - 1))) wait_signal(self.frame, 1000) if self.image is not None: out.write(self.image) # close out.release() self.msg("info; recording done") ## self.camera.start_recording(filename) ## self.camera.wait_recording(duration) ## self.camera.stop_recording() # revert to original parameters self.loadSettings() self.initStream() self.clipRecorded.emit() @pyqtSlot(str) def setStoragePath(self, path): self.storagePath = path @pyqtSlot(int) def setCropXp1(self, val): if 0 <= val <= self.cropRect[3]: self.cropRect[1] = val else: raise ValueError('crop x1') @pyqtSlot(int) def setCropXp2(self, val): if self.cropRect[1] < val < self.camera.resolution[1]: self.cropRect[3] = val else: raise ValueError('crop x2') @pyqtSlot(int) def setCropYp1(self, val): if 0 <= val <= self.cropRect[2]: self.cropRect[0] = val else: raise ValueError('crop y1') @pyqtSlot(int) def setCropYp2(self, val): if self.cropRect[0] < val < self.camera.resolution[0]: self.cropRect[2] = val else: raise ValueError('crop y2')
crop=False) embedder.setInput(faceBlob) vec = embedder.forward() preds = recognizer.predict_proba(vec)[0] j = np.argmax(preds) proba = preds[j] name = le.classes_[j] text = "{}: {:.2f}%".format(name, proba * 100) y = startY - 10 if startY - 10 > 10 else startY + 10 # cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 0, 0), 2) # cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 0, 0), 2) fps.update() for (frameW, nameW) in zip(frames, ("Left Eye", "Right Eye")): cv2.imshow(nameW, frameW) key = cv2.waitKey(1) & 0xFF if key == 27: break if name == 'vladimir' and proba > 0.8 and block: Thread(target=voice, name='voice').start() block = False fps.stop() print("[INFO] пройденное время: {:.2f}".format(fps.elapsed())) print("[INFO] приблизительно FPS: {:.2f}".format(fps.fps())) cv2.destroyAllWindows() left_eye.stop() right_eye.stop()
class VideoCapture(object): def __init__(self, videoPath="", verbose=True, displayW=1920, displayH=1080, fontScale=1.0, inference=True, confidenceLevel=0.5): self.verbose = verbose self._debug = False self.videoPath = videoPath self._videoSourceType = CaptureDevice.Unknown self._videoSourceState = CaptureDeviceState.Unknown self.videoStream = None self._videoReadyEvent = Event() self._capture_in_progress = False # Display Resolution # Will try to set camera's resolution to the specified resolution self._displayW = displayW self._displayH = displayH self._cameraW = 0 self._cameraH = 0 # Camera's FPS self._cameraFPS = 30 # Font Scale for putText self._fontScale = float(fontScale) # turn inference on/off self.runInference = inference # confidence level threshold self.confidenceLevel = confidenceLevel # various frame data # frame data for UI self._displayFrame = None # wallpapers for UI self._frame_wp_init_system = cv2.imread( "./www/WP-InitializingSystem.png") self._frame_wp_no_video = cv2.imread("./www/WP-NoVideoData.png") self._frame_wp_init_iothub = cv2.imread( "./www/WP-InitializeIotHub.png") if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') logging.info( '===============================================================') logging.info( 'Initializing Video Capture with the following parameters:') logging.info(' - OpenCV Version : {}'.format(cv2.__version__)) logging.info(' - Video path : {}'.format(self.videoPath)) logging.info(' - Display Resolution : {} x {}'.format( self._displayW, self._displayH)) logging.info(' - Font Scale : {}'.format(self._fontScale)) logging.info(' - Inference? : {}'.format(self.runInference)) logging.info(' - ConfidenceLevel : {}'.format( self.confidenceLevel)) logging.info( '===============================================================') # set wallpaper self.set_Wallpaper(self._frame_wp_init_system) # set FPS self.fps = FPS() self.imageStreamHandler = None # Start Web Server for View self.imageServer = ImageServer(80, self) self.imageServer.start() # Set Video Source self.set_Video_Source(self.videoPath) self.set_Wallpaper(cv2.imread("./www/WP-InitializeAIEngine.png")) # logging.info('Yolo Inference Initializing\r\n') self.yoloInference = YoloInference(self._fontScale, sendMessage=False) # logging.info('Yolo Inference Initialized\r\n') def __enter__(self): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') # self.set_Video_Source(self.videoPath) return self def videoStreamReadTimeoutHandler(self, signum, frame): raise Exception("VideoStream Read Timeout") # # Video Source Management # def _set_Video_Source_Type(self, videoPath): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(videoPath)) self._reset_Video_Source() if '/dev/video' in videoPath.lower(): self._videoSourceType = CaptureDevice.Webcam elif 'rtsp:' in videoPath.lower(): self._videoSourceType = CaptureDevice.Rtsp elif '/api/holographic/stream' in videoPath.lower(): self._videoSourceType = CaptureDevice.Hololens if self.verbose: logging.info('<< ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(self._videoSourceType)) def _get_Video_Source_Type(self, videoPath): videoType = CaptureDevice.Unknown if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(videoPath)) if '/dev/video' in videoPath.lower(): videoType = CaptureDevice.Webcam elif 'rtsp:' in videoPath.lower(): videoType = CaptureDevice.Rtsp elif '/api/holographic/stream' in videoPath.lower(): videoType = CaptureDevice.Hololens return videoType # # Resets video capture/stream settings # def _reset_Video_Source(self): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') if self.videoStream: self.videoStream.stop() # self.videoStream.close() # self.videoStream = None self._videoSourceType = CaptureDevice.Unknown self._videoSourceState = CaptureDeviceState.Unknown def set_Video_Source(self, newVideoPath): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') retVal = False realVideoPath = newVideoPath if self.videoPath == newVideoPath and self._videoSourceState == CaptureDeviceState.Running: return True if self.imageStreamHandler != None: statusMsg = '{{\"DeviceStatus\":\"Connecting to {}\",\"isSuccess\":{}}}'.format( self._remove_credential(newVideoPath), 1) self.imageStreamHandler.submit_write(statusMsg) self._videoSourceState = CaptureDeviceState.Stop if self._capture_in_progress: # wait for queue to drain and loop to exit time.sleep(1.0) self._capture_in_progress = False self._set_Video_Source_Type(realVideoPath) if self._videoSourceType == CaptureDevice.Unknown: self._videoSourceState = CaptureDeviceState.ErrorNotSupported logging.error('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Unsupported Video Source {}'.format( self._videoSourceType)) else: self._videoSourceState = CaptureDeviceState.Init if self._videoSourceType == CaptureDevice.Hololens: strHololens = realVideoPath.split('?') # disable audio realVideoPath = '{}?holo=true&pv=true&mic=false&loopback=false'.format( strHololens[0]) self.videoStream = VideoStream(videoCapture=self, path=realVideoPath) fps_override = 30 if not self.videoStream.videoCapture == None: # get resolution cameraH1 = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_HEIGHT)) cameraW1 = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_WIDTH)) cameraFPS1 = int( self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS)) if self._videoSourceType == CaptureDevice.Webcam: if not cameraH1 == self._displayH: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, self._displayH) if not cameraW1 == self._displayW: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_WIDTH, self._displayW) elif self._videoSourceType == CaptureDevice.Rtsp: if not cameraH1 == self._displayH: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, self._displayH) if not cameraW1 == self._displayW: self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_WIDTH, self._displayW) elif self._videoSourceType == CaptureDevice.Hololens: holo_w = 1280 holo_h = 720 if 'live_med.mp4' in realVideoPath: holo_w = 854 holo_h = 480 elif 'live_low.mp4' in realVideoPath: holo_w = 428 holo_h = 240 fps_override = 15 self.videoStream.videoCapture.set( cv2.CAP_PROP_FRAME_HEIGHT, holo_h) self.videoStream.videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH, holo_w) self.videoStream.videoCapture.set(cv2.CAP_PROP_FPS, fps_override) self._cameraH = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_HEIGHT)) self._cameraW = int( self.videoStream.videoCapture.get( cv2.CAP_PROP_FRAME_WIDTH)) self._cameraFPS = int( self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS)) logging.info( '===============================================================' ) logging.info( 'Setting Video Capture with the following parameters:') logging.info(' - Video Source Type : {}'.format( self._videoSourceType)) logging.info(' - Display Resolution : {} x {}'.format( self._displayW, self._displayH)) logging.info(' Original : {} x {} @ {}'.format( cameraW1, cameraH1, cameraFPS1)) logging.info(' New : {} x {} @ {}'.format( self._cameraW, self._cameraH, self._cameraFPS)) logging.info( '===============================================================' ) if self.videoStream.start(): self._videoSourceState = CaptureDeviceState.Running retVal = True else: self._videoSourceState = CaptureDeviceState.ErrorRead else: if self._videoSourceType == CaptureDevice.Hololens or self._videoSourceType == CaptureDevice.Rtsp: url_parsed = urlparse(realVideoPath) if url_parsed.password != None or url_parsed.username != None: url_parsed = url_parsed._replace( netloc="{}".format(url_parsed.hostname)) ipAddress = url_parsed.netloc ping_ret = subprocess.call( ['ping', '-c', '5', '-W', '3', ipAddress], stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) if ping_ret == 0: self._videoSourceState = CaptureDeviceState.ErrorOpen else: self._videoSourceState = CaptureDeviceState.ErrorNoNetwork logging.error('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Failed to open Video Capture') self.videoPath = realVideoPath if retVal == False: self.set_Wallpaper(self._frame_wp_no_video) else: self._videoReadyEvent.set() self.sendCurrentVideoPath(realVideoPath) return retVal def get_display_frame(self): return self.displayFrame def set_status(self, device_status): self._videoSourceState = device_status if self._videoSourceState != CaptureDeviceState.Running: self.sendCurrentVideoPath("") def sendCurrentVideoPath(self, videoPath): if videoPath == "": video_path = self._remove_credential(self.videoPath) else: video_path = self._remove_credential(videoPath) logging.info('>> Current Video Status {}'.format( self._videoSourceState)) if self.imageStreamHandler != None: if self._videoSourceState == CaptureDeviceState.Running: strUserName = "" strPassword = "" videoType = self._get_Video_Source_Type(videoPath) if videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens: url_parsed = urlparse(videoPath) if url_parsed.password != None: strPassword = url_parsed.password if url_parsed.username != None: strUserName = url_parsed.username statusMsg = '{{\"DevicePath\":\"{}\",\"isSuccess\":{},\"UserName\":\"{}\",\"Password\":\"{}\"}}'.format( video_path, 1, strUserName, strPassword) else: statusMsg = '{{\"DeviceStatus\":\"Error ({}): {}\",\"isSuccess\":{},\"UserName\":\"\",\"Password\":\"\"}}'.format( self._videoSourceState, video_path, 0) self.imageStreamHandler.submit_write(statusMsg) def setVideoPathFromUI(self, json_Data): videoPath = "" json_Data = json.loads(json_Data) logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : {}'.format(json_Data["VideoPath"])) logging.info('>> {}'.format(json_Data["VideoPath"])) logging.info('>> {}'.format(json_Data["UserName"])) logging.info('>> {}'.format(json_Data["Password"])) videoType = self._get_Video_Source_Type(json_Data["VideoPath"]) if videoType == CaptureDevice.Webcam: videoPath = json_Data["VideoPath"].strip() elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens: url_parsed = urlparse(json_Data["VideoPath"].strip()) if '@' in url_parsed.netloc or len(json_Data["UserName"]) == 0: # already contains password or user name not specified videoPath = json_Data["VideoPath"] else: url_parsed = url_parsed._replace(netloc='{}:{}@{}'.format( json_Data["UserName"], json_Data["Password"], url_parsed.netloc)) videoPath = url_parsed.geturl() self.set_Video_Source(videoPath) def _remove_credential(self, videoPath): logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') ret_Path = "" videoType = self._get_Video_Source_Type(videoPath) if videoType == CaptureDevice.Webcam: ret_Path = videoPath elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens: url_parsed = urlparse(videoPath) if url_parsed.password != None or url_parsed.username != None: url_parsed = url_parsed._replace( netloc="{}".format(url_parsed.hostname)) ret_Path = url_parsed.geturl() return ret_Path def set_Wallpaper(self, image): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') self.displayFrame = cv2.imencode('.jpg', image)[1].tobytes() def start(self): if self.verbose: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') while True: if self._videoSourceState == CaptureDeviceState.Running: self._capture_in_progress = True self.__Run__() self._capture_in_progress = False else: if self._videoSourceState == CaptureDeviceState.ErrorOpen or self._videoSourceState == CaptureDeviceState.ErrorRead: self.set_Wallpaper(self._frame_wp_no_video) if self._videoSourceType == CaptureDevice.Unknown: if self._debug: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Unknown Device') time.sleep(1.0) else: if self._debug: logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '() : Device Not Running') # time.sleep(1.0) logging.info('>> Video Ready Event Enter ---------------') self._videoReadyEvent.wait() logging.info('<< Video Ready Event Exit ---------------') self._videoReadyEvent.clear() def __Run__(self): if self.verbose: logging.info( '===============================================================' ) logging.info('>> ' + self.__class__.__name__ + "." + sys._getframe().f_code.co_name + '()') # Check camera's FPS if self._cameraFPS == 0: logging.error('Error : Could not read FPS') # raise Exception("Unable to acquire FPS for Video Source") return logging.info('>> Frame rate (FPS) : {}'.format(self._cameraFPS)) logging.info('>> Run Inference {}'.format(self.runInference)) perFrameTimeInMs = 1000 / self._cameraFPS self.fps.start() self.fps.reset() while True: # Get current time before we capture a frame tFrameStart = time.time() frame = np.array([]) captureRet = False if not self._videoSourceState == CaptureDeviceState.Running: break captureRet, frame = self.videoStream.read() if captureRet == False: self._videoSourceState = CaptureDeviceState.ErrorRead logging.error("ERROR : Failed to read from video source") break if frame.size > 0: # Run Object Detection if self.runInference: self.yoloInference.runInference(frame, self._cameraW, self._cameraH, self.confidenceLevel) # Calculate FPS currentFPS = self.fps.fps() if (currentFPS > self._cameraFPS): # Cannot go faster than Camera's FPS currentFPS = self._cameraFPS # Add FPS Text to the frame cv2.putText(frame, "FPS " + str(currentFPS), (10, int(30 * self._fontScale)), cv2.FONT_HERSHEY_SIMPLEX, self._fontScale, (0, 0, 255), 2) self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes() timeElapsedInMs = (time.time() - tFrameStart) * 1000 if perFrameTimeInMs > timeElapsedInMs: # This is faster than image source (e.g. camera) can feed. waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs time.sleep(waitTimeBetweenFrames / 1000.0) def __exit__(self, exception_type, exception_value, traceback): self.imageServer.close() cv2.destroyAllWindows()
if not depth_frame or not color_frame: # try until both images are ready continue depth_image = np.asanyarray(depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) # perform inference skeletons = api.estimate_keypoints(color_image, 192) render_result(skeletons, color_image, CONFIDENCE_THRESHOLD, depth_frame, depth_scale) # compute fps if frame_count == fps_update_rate: frame_count = 0 fps.update() curr_fps = fps.fps() # draw fps on image cv2.putText(color_image, str(int(curr_fps)), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, color=(0, 255, 0), thickness=2) frame_count += 1 ###### Display frame, exit if key press cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) cv2.imshow('RealSense', color_image) k = cv2.waitKey(1) if k != -1: # exit if key pressed ESC (k=27)
# Receive frame data frame_data = sock.recv(msg_size) frame = pickle.loads(frame_data) frame = cv.imdecode(frame, cv.IMREAD_COLOR) # Time in ms time_elapsed = time.time() - msg_time print("TX Time: {}".format(time_elapsed)) fps.update() fps.stop() # Display in window FPS and TX Time info = [("TX Time", "{:.2f}".format(time_elapsed)), ("FPS: ", "{:.2f}".format(fps.fps()))] for (i, (k, v)) in enumerate(info): text = "{} : {}".format(k, v) cv.putText(frame, text, (10, 480 - ((i * 20) + 20)), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2) cv.imshow("frame", frame) key = cv.waitKey(1) & 0xFF if key == ord("q"): sock.close() break cv.destroyAllWindows()
fps = FPS() fps.start() while True: img = cap.read() cv2.imshow('Feed', img) fps.update() key = cv2.waitKey(1) & 0xFF if key == 27 or key == ord('q'): break fps.stop() cap.stop() print('With Threading: ', fps.fps()) cap = cv2.VideoCapture(0) fps = FPS() fps.start() while True: _, img = cap.read() cv2.imshow('Feed', img) fps.update() key = cv2.waitKey(1) & 0xFF if key == 27 or key == ord('q'): break
if options["face_drag"]["angle"] == "v": options["face_drag"]["angle"] = "h" else: options["face_drag"]["angle"] = "v" draw_frame = gaussian_noise(draw_frame, 8) # Optionally show debug info on top of the frame if options["show_debug"]["state"]: debug_frame_mask = debug_frame.max(axis=2, keepdims=True) > 0 # pylint: disable=unexpected-keyword-arg np.copyto(draw_frame, debug_frame, where=debug_frame_mask) # Calculate and show UI elements if not record is None: # Save current frame, accounting for slow FPS if fps.fps() >= RECORD_FPS or fps.fps() == 0: record.write(draw_frame) else: # Write extra frames to make it appear the same in the video as in the app for i in range(round(RECORD_FPS / fps.fps())): record.write(draw_frame) # Display red recording dot cv2.circle(ui_frame, (cam_w - 10, 10), 5, (0, 0, 255), -1) draw_frame = imutils.resize(draw_frame, width=MAX_CAM_W) ui_frame = imutils.resize(ui_frame, width=MAX_CAM_W) ui_frame_mask = ui_frame.max(axis=2, keepdims=True) > 0 # pylint: disable=unexpected-keyword-arg np.copyto(draw_frame, ui_frame, where=ui_frame_mask) cv2.imshow("Output", draw_frame)
#Converte para escala de cinza, aplica um filtro gaussiano e binariza gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (args['gaussian'], args['gaussian']), 0) T, bin = cv2.threshold(blur, args['threshold'], 255, cv2.THRESH_BINARY) #Soma a quantidade de pontos brancos na imagem soma = bin.sum() #Calcula a porcentagem de pontos brancos na imagem area = bin.shape[0] * bin.shape[1] * 255 percent = soma * 100 / area #Verifica se a porcentagem de branco esta maior que o limite if percent > args['limite']: write(bin, 'Plataforma Localizada', pos=(130, 40)) #Escreve na Imagem a porcentagem de branco e mostra as imagens write(bin, str(round(percent))) cv2.imshow('Gray', bin) cv2.imshow('Original', frame) fps.update() if cv2.waitKey(1) == ord('q'): fps.stop() camera.stop() print(fps.elapsed()) print(fps.fps()) cv2.destroyAllWindows() break
class PiVideoStream(QThread): """ Thread that produces frames for further processing as a PyQtSignal. Picamera is set-up according to sensormode and splitter_port 0 is used for capturing image data. A video stream is set-up, using picamera splitter port 1 and resized to frameSize. Splitter_port 2 is used for capturing video at videoFrameSize. """ image = None finished = pyqtSignal() postMessage = pyqtSignal(str) frame = pyqtSignal(np.ndarray) progress = pyqtSignal(int) captured = pyqtSignal() camera = PiCamera() videoStream = BytesIO() storagePath = None cropRect = [0] * 4 ## @param ins is the number of instances created. This may not exceed 1. ins = 0 def __init__(self): super().__init__() ## Instance limiter. Checks if an instance exists already. If so, it deletes the current instance. if PiVideoStream.ins >= 1: del self self.postMessage.emit( "{}: error; multiple instances of created, while only 1 instance is allowed" .format(__class__.__name__)) return try: PiVideoStream.ins += 1 except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args)) else: warnings.filterwarnings('default', category=DeprecationWarning) self.settings = QSettings("settings.ini", QSettings.IniFormat) self.loadSettings() ## self.initStream() def loadSettings(self): self.postMessage.emit( "{}: info; loading camera settings from {}".format( self.__class__.__name__, self.settings.fileName())) # load self.monochrome = self.settings.value('camera/monochrome', False, type=bool) self.sensorMode = int(self.settings.value('camera/sensor_mode')) self.frameRate = int(self.settings.value('camera/frame_rate')) # set frame sizes self.frameSize = frame_size_from_string( self.settings.value('frame_size')) self.captureFrameSize = frame_size_from_sensor_mode(self.sensorMode) self.videoFrameSize = frame_size_from_string( self.settings.value('camera/video_frame_size')) if not self.monochrome: self.frameSize = self.frameSize + (3, ) @pyqtSlot() def initStream(self): if self.isRunning(): self.requestInterruption() wait_signal(self.finished, 10000) # Set camera parameters self.camera.exposure_mode = 'backlight' # 'auto' self.camera.awb_mode = 'flash' # 'auto' self.camera.meter_mode = 'backlit' # 'average' self.camera.sensor_mode = self.sensorMode self.camera.resolution = self.captureFrameSize self.camera.framerate = self.frameRate self.camera.image_effect = self.settings.value('camera/effect') self.camera.iso = int(self.settings.value( 'camera/iso')) # should force unity analog gain self.camera.video_denoise = self.settings.value('camera/video_denoise', False, type=bool) # Wait for the automatic gain control to settle wait_ms(3000) # # Now fix the values # self.camera.shutter_speed = self.camera.exposure_speed # self.camera.exposure_mode = 'off' # g = self.camera.awb_gains # self.camera.awb_mode = 'off' # self.camera.awb_gains = g ## # Setup video port, GPU resizes frames, and compresses to mjpeg stream ## self.camera.start_recording(self.videoStream, format='mjpeg', splitter_port=1, resize=self.frameSize) # Setup capture from video port 1 if self.monochrome: self.rawCapture = PiYArray(self.camera, size=self.frameSize) self.captureStream = self.camera.capture_continuous( self.rawCapture, 'yuv', use_video_port=True, splitter_port=1, resize=self.frameSize) else: self.rawCapture = PiRGBArray(self.camera, size=self.frameSize) self.captureStream = self.camera.capture_continuous( self.rawCapture, 'bgr', use_video_port=True, splitter_port=1, resize=self.frameSize) # init crop rectangle if self.cropRect[2] == 0: self.cropRect[2] = self.camera.resolution[1] if self.cropRect[3] == 0: self.cropRect[3] = self.camera.resolution[0] # start the thread self.start(QThread.HighPriority) msg = "{}: info; video stream initialized with frame size = {} and {:d} channels".format(\ __class__.__name__, str(self.camera.resolution), 1 if self.monochrome else 3) self.postMessage.emit(msg) @pyqtSlot() def run(self): try: self.fps = FPS().start() for f in self.captureStream: if self.isInterruptionRequested(): break self.rawCapture.seek(0) img = f.array # grab the frame from the stream self.frame.emit(img) #cv2.resize(img, self.frameSize[:2])) self.fps.update() ## # Grab jpeg from an mpeg video stream ## self.videoStream.seek(0) ## buf = self.videoStream.read() ## if buf.startswith(b'\xff\xd8'): ## # jpeg magic number is detected ## flag = cv2.IMREAD_GRAYSCALE if self.monochrome else cv2.IMREAD_COLOR ## img = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), flag) ## self.frame.emit(img) ## self.fps.update() ## self.videoStream.truncate(0) except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args)) finally: self.fps.stop() img = np.zeros(shape=(self.frameSize[1], self.frameSize[0]), dtype=np.uint8) cv2.putText( img, 'Camera suspended', (int(self.frameSize[0] / 2) - 150, int(self.frameSize[1] / 2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255), 1) for i in range(5): wait_ms(100) self.frame.emit(img) msg = "{}: info; finished, approx. processing speed: {:.2f} fps".format( self.__class__.__name__, self.fps.fps()) self.postMessage.emit(msg) self.finished.emit() @pyqtSlot() def stop(self): self.postMessage.emit("{}: info; stopping".format(__class__.__name__)) try: if self.isRunning(): self.requestInterruption() wait_signal(self.finished, 10000) except Exception as err: msg = "{}: error; stopping method".format(self.__class__.__name__) print(msg) finally: self.quit( ) # Note that thread quit is required, otherwise strange things happen. @pyqtSlot(str) def takeImage(self, filename_prefix=None): if filename_prefix is not None: (head, tail) = os.path.split(filename_prefix) if not os.path.exists(head): os.makedirs(head) filename = os.path.sep.join([ head, '{:016d}_'.format(round(time.time() * 1000)) + tail + '.png' ]) else: filename = '{:016d}'.format(round(time.time() * 1000)) + '.png' # open path if self.storagePath is not None: filename = os.path.sep.join([self.storagePath, filename]) try: self.camera.capture(filename, use_video_port=False, splitter_port=0, format='png') except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args)) self.captured.emit() self.postMessage.emit("{}: info; image written to {}".format( __class__.__name__, filename)) @pyqtSlot(str, int) def recordClip(self, filename_prefix=None, duration=10): """ Captures a videoclip of duration at resolution videoFrameSize. The GPU resizes the captured video to the intended resolution. Note that while it seems possble to change the sensormode, reverting to the original mode fails when capturing an image. In many cases, the intended framerate is not achieved. For that reason, ffprobe counts the total number of frames that were actually taken. Next, the h264 video file is boxed using MP4box. For some reason, changing the framerate with MP4Box did not work out. """ if filename_prefix is not None: (head, tail) = os.path.split(filename_prefix) if not os.path.exists(head): os.makedirs(head) filename = os.path.sep.join([ head, '{:016d}_{}s'.format(round(time.time() * 1000), round(duration)) + tail ]) else: filename = '{:016d}_{}s'.format(round(time.time() * 1000), round(duration)) # open path if self.storagePath is not None: filename = os.path.sep.join([self.storagePath, filename]) # stop current video stream, maybe mpeg and h264 compression cannot run simultaneously? self.postMessage.emit("{}: info; starting recording for {} s".format( __class__.__name__, duration)) try: # GPU resizes frames, and compresses to h264 stream self.camera.start_recording(filename + '.h264', format='h264', splitter_port=2, resize=self.videoFrameSize, sps_timing=True) wait_ms(duration * 1000) self.camera.stop_recording(splitter_port=2) # Wrap an MP4 box around the video nr_of_frames = check_output([ "ffprobe", "-v", "error", "-count_frames", "-select_streams", "v:0", "-show_entries", "stream=nb_read_frames", "-of", "default=nokey=1:noprint_wrappers=1", filename + '.h264' ]) real_fps = duration / float(nr_of_frames) self.postMessage.emit( "{}: info; video clip captured with real framerate: {} fps". format(__class__.__name__, real_fps)) ## run(["MP4Box", "-fps", str(self.frameRate), "-add", filename + '.h264:fps=' + str(real_fps), "-new", filename + '.mp4']) run([ "MP4Box", "-fps", str(self.frameRate), "-add", filename + ".h264", "-new", filename + "_{}fr.mp4".format(int(nr_of_frames)) ]) run(["rm", filename + '.h264']) except Exception as err: self.postMessage.emit("{}: error; type: {}, args: {}".format( self.__class__.__name__, type(err), err.args)) self.captured.emit() self.postMessage.emit("{}: info; video written to {}".format( __class__.__name__, filename)) @pyqtSlot(str) def setStoragePath(self, path): self.storagePath = path @pyqtSlot(int) def setCropXp1(self, val): if 0 <= val <= self.cropRect[3]: self.cropRect[1] = val else: raise ValueError('crop x1') @pyqtSlot(int) def setCropXp2(self, val): if self.cropRect[1] < val < self.camera.resolution[1]: self.cropRect[3] = val else: raise ValueError('crop x2') @pyqtSlot(int) def setCropYp1(self, val): if 0 <= val <= self.cropRect[2]: self.cropRect[0] = val else: raise ValueError('crop y1') @pyqtSlot(int) def setCropYp2(self, val): if self.cropRect[0] < val < self.camera.resolution[0]: self.cropRect[2] = val else: raise ValueError('crop y2')
class Interface(QWidget): def __init__(self, path, config): QWidget.__init__(self) self.path = path self.config = config self.setWindowTitle('AR4maps') self.move(0, 0) self.video_size = QSize(VIDEO.WIDTH, VIDEO.HEIGHT) self.setup_ui() self.markerImg = cv.imread(self.path + self.config['target']) # cv.imshow("target",targetImg) self._cam = Camera().start() self._track = Tracking(self.markerImg) self._rendering = Rendering(self.markerImg, self.config['coords']) self._fps = FPS() self.setup_render() def setup_ui(self): self.main_layout = QHBoxLayout() self.main_layout.setContentsMargins(0, 0, 0, 0) ### CENTER LAYOUT self.center_layout = QVBoxLayout() self.main_layout.addLayout(self.center_layout) # AR self.pixmap = QLabel() self.pixmap.setFixedSize(self.video_size) self.pixmap.mousePressEvent = self.click_pixmap self.center_layout.addWidget(self.pixmap) ## SOUTH LAYOUT self.south_layout = QVBoxLayout() self.south_layout.setContentsMargins(20, 10, 20, 20) self.center_layout.addLayout(self.south_layout) # Feature Description # Title self.feature_title = QLabel('<br/>') self.feature_title.setFont(QFont('Helvetica', 18)) self.south_layout.addWidget(self.feature_title) # Description self.feature_description = QLabel('<br/><br/><br/><br/><br/>') self.feature_description.setWordWrap(True) self.south_layout.addWidget(self.feature_description) self.south_layout.addStretch() # Buttons self.south_btns_layout = QHBoxLayout() self.south_layout.addLayout(self.south_btns_layout) self.feature_website_btn = QPushButton('Website') self.feature_website_btn.hide() self.south_btns_layout.addWidget(self.feature_website_btn) self.feature_photos_btn = QPushButton('Photos') self.feature_photos_btn.hide() self.south_btns_layout.addWidget(self.feature_photos_btn) self.feature_video_btn = QPushButton('Video') self.feature_video_btn.hide() self.south_btns_layout.addWidget(self.feature_video_btn) self.south_btns_layout.addStretch() ### EAST LAYOUT self.east_layout = QVBoxLayout() self.east_layout.setContentsMargins(0, 10, 20, 20) self.main_layout.addLayout(self.east_layout) # Logo self.logo = QSvgWidget(self.path + self.config['logo']) self.logo.setMinimumSize(252, 129) self.logo.setMaximumSize(252, 129) self.east_layout.addWidget(self.logo) # Buttons for layer in self.config['layers']: btn = QPushButton(layer['name']) btn.clicked.connect(lambda state, x=layer: self.load_layer(x)) self.east_layout.addWidget(btn) # Layer Description sep = QFrame() sep.setFrameShape(QFrame.HLine) self.east_layout.addWidget(sep) self.layer_title = QLabel('Select a layer...') self.layer_title.setFont(QFont('Helvetica', 18)) self.east_layout.addWidget(self.layer_title) self.layer_description = QLabel('') self.layer_description.setWordWrap(True) self.east_layout.addWidget(self.layer_description) # FPS self.east_layout.addStretch() self.fps_label = QLabel() self.fps_label.setAlignment(Qt.AlignRight) self.east_layout.addWidget(self.fps_label) self.setLayout(self.main_layout) self.web = QWebEngineView() self.web.resize(VIDEO.WIDTH, VIDEO.HEIGHT) self.web.move(0, 0) self.web.hide() def load_layer(self, layer): self.layer_title.setText(layer['name']) self.layer_description.setText(layer['description']) self.feature_title.setText('Select an item on the screen...') self.feature_description.setText('') self._rendering.setHighlighted(None) self.feature_website_btn.hide() self.feature_photos_btn.hide() self.feature_video_btn.hide() with open(self.path + layer['file']) as json_file: data = json.load(json_file) self._rendering.setGeoJSON(data['features']) def click_pixmap(self, event): pos = (event.x(), event.y()) feature = self._rendering.getClickedFeature(pos) self.feature_website_btn.hide() self.feature_photos_btn.hide() self.feature_video_btn.hide() if feature is not None: props = feature['properties'] self.feature_title.setText(props['title'] if 'title' in props else 'NO TITLE') self.feature_description.setText( props['description'] if 'description' in props else '') self._rendering.setHighlighted(feature['uuid']) if 'website' in props: self.feature_website_btn.show() try: self.feature_website_btn.clicked.disconnect() except Exception: pass self.feature_website_btn.clicked.connect( lambda state, x=props['website']: webbrowser.open(x)) if 'photos' in props: self.feature_photos_btn.show() try: self.feature_photos_btn.clicked.disconnect() except Exception: pass self.feature_photos_btn.clicked.connect( lambda state, x=props['photos']: self.display_photos(x)) if 'video' in props: self.feature_video_btn.show() try: self.feature_video_btn.clicked.disconnect() except Exception: pass self.feature_video_btn.clicked.connect( lambda state, x=props['video']: self.display_video(x)) else: self.feature_title.setText('') self.feature_description.setText('') self._rendering.setHighlighted(None) def display_photos(self, photos): photos = list(map(lambda x: self.path + x, photos)) self.slideshow = SlideShow(photos) self.slideshow.show() def display_video(self, url): self.web.load(QUrl(url)) self.web.show() def setup_render(self): self._fps.start() self.timer = QTimer() self.timer.timeout.connect(self.render) self.timer.start(1000 / VIDEO.FPS) def render(self): _, frameImg = self._cam.read() frameImg = cv.cvtColor(frameImg, cv.COLOR_BGR2RGB) H = self._track.update(frameImg) self._rendering.update(H, frameImg) if (H is not None): # self._rendering.drawBorder() self._rendering.renderGeoJSON() # self._rendering.renderObj() image = QImage(frameImg, frameImg.shape[1], frameImg.shape[0], frameImg.strides[0], QImage.Format_RGB888) self.pixmap.setPixmap(QPixmap.fromImage(image)) self.fps_label.setText("{:.2f} FPS".format(self._fps.update())) def closeEvent(self, event): self._cam.stop() self._fps.stop() print("\033[0;30;102m[INFO]\033[0m {:.2f} seconds".format( self._fps.elapsed())) print("\033[0;30;102m[INFO]\033[0m {:.2f} FPS".format(self._fps.fps()))
# created a *threaded *video stream, allow the camera sensor to warmup, # and start the FPS counter print("[INFO] sampling THREADED frames from `picamera` module...") vs = ColorSepPVStream().start() time.sleep(2.0) fps = FPS().start() # loop over some frames...this time using the threaded stream while fps._numFrames < args["num_frames"]: # grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=400) # check to see if the frame should be displayed to our screen if args["display"] > 0: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # update the FPS counter fps.update() # stop the timer and display FPS information fps.stop() print("[INFO] elasped time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
def recognize_video(detector, embedder: Embedder, recognizer: Recognizer, detector_params='default', source=0): # инициализация видеопотока print('Starting video stream...') vs = VideoStream(src=source).start() if not is_detector(detector): raise TypeError('Incorrect type of detector') # разогрев камеры time.sleep(0.5) # запуск оценщика пропускной способности FPS fps = FPS().start() # цикл по фреймам из видео while True: frame = vs.read() if detector_params == 'default': faces_roi, boxes = detector.calc_image(frame, return_mode='both') elif type(detector) == DetectorSSD: confidence = detector_params[0] faces_roi, boxes = detector.calc_image(frame, confidence=confidence, return_mode='both') elif type(detector) == DetectorVJ or type(detector) == DetectorLBP: [scale_factor, min_neighbors] = detector_params faces_roi, boxes = detector.calc_image(frame, scale_factor=scale_factor, min_neighbors=min_neighbors, return_mode='both') elif type(detector) == DetectorHOG or type(detector) == DetectorMMOD: upsampling_times = detector_params[0] faces_roi, boxes = detector.calc_image( frame, upsampling_times=upsampling_times, return_mode='both') for i in range(len(faces_roi)): embeddings = embedder.calc_face(faces_roi[i]) name = recognizer.recognize(embeddings) start_x, start_y, end_x, end_y = boxes[i] text = '{}'.format(name) y = start_y - 10 if start_y - 10 > 10 else start_y + 10 cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (0, 0, 255), 2) cv2.putText(frame, text, (start_x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) # обновление счетчика FPS fps.update() # показ выходного фрейма cv2.imshow('Frame', frame) key = cv2.waitKey(1) & 0xFF # завершение при нажатии 'q' if key == ord("q"): break fps.stop() print('Elasped time: {:.2f}'.format(fps.elapsed())) print('Approx. FPS: {:.2f}'.format(fps.fps())) cv2.destroyAllWindows() vs.stop()
if old_img is not None and img is not None: diff = cv2.absdiff(old_img, img) > tolerance # Handle ball. x += dx y += dy if not (r < x < 640 - r) or diff is not None and diff[y][x].any(): dx *= -1 ball_color = random_color() if not (r < y < 480 - r) or diff is not None and diff[y][x].any(): dy *= -1 ball_color = random_color() cv2.circle(img, (x, y), r, ball_color, 2) write(img, "{:.2f} FPS".format(fps.fps()), 20, 30) # Handle capture. if countdown > 0: countdown -= 1 if countdown == 0: path = 'cam ' + time.strftime("%Y-%m-%d %H.%M.%S") + '.jpg' if not cv2.imwrite(path, img): msg = "Could not save photo to %r" % path else: msg = "Saved image to %s" % path caption([msg, 20, 80, 0.6]) else: write(img, str(int(countdown // fps.fps()) + 1), 250, 250, 7, 3) # Handle user input for 1 ms.
class detectionBox: def __init__(self, camera_name, name, area, video_stream_reference, config, runtime, db_reference): self.camera_name = camera_name self.name = name self.area = area # bounding box for the search self.stream = video_stream_reference # reference to the video feed self.old_detected_rect = [] # threads cannot share alpr object, needs its own self.alpr = Alpr("us", config, runtime) self.alpr.set_top_n(1) self.fps = FPS().start_timer() self.stopped = False # stores license plate objects self.license_plate_list = [] self.licensePlateService = licensePlateService(self, db_reference).start() def start(self): Thread(target=self.update, args=()).start() return self def update(self): # main method, do processing here while True: if self.stopped: return self.fps.update() # grab the most recent frame from the video thread frame = self.stream.read() frame_copy = copy.deepcopy(frame) cropped_frame = frame[int(self.area[1]):int(self.area[1] + self.area[3]), int(self.area[0]):int(self.area[0] + self.area[2])] # run the detector results = self.alpr.recognize_ndarray(cropped_frame) detected_rect = [] # finds all rects in frame and stores in detectedRect if results['results']: for plate in results['results']: # offset so points are relative to the frame, not cropped frame, use to find bounding rect left_bottom = (plate['coordinates'][3]['x'] + self.area[0], plate['coordinates'][3]['y'] + self.area[1]) right_bottom = (plate['coordinates'][2]['x'] + self.area[0], plate['coordinates'][2]['y'] + self.area[1]) right_top = (plate['coordinates'][1]['x'] + self.area[0], plate['coordinates'][1]['y'] + self.area[1]) left_top = (plate['coordinates'][0]['x'] + self.area[0], plate['coordinates'][0]['y'] + self.area[1]) all_points = np.array( [left_bottom, right_bottom, left_top, right_top]) bounding_rect = cv2.boundingRect(all_points) # X, Y, W, H detected_rect.append(bounding_rect) # convert lpr results into a license plate object and store in license_plate_list plate_number = plate['plate'] #plate_image = frame[int(bounding_rect[1]):int(bounding_rect[1] + bounding_rect[3]), int(bounding_rect[0]):int(bounding_rect[0] + bounding_rect[2])] plate_image = cv2.resize(frame_copy, (720, 480), interpolation=cv2.INTER_AREA) plate_datetime = datetime.datetime.now() plate_time = get_system_uptime() plate_confidence = plate['confidence'] new_plate = licensePlate(plate_number, plate_image, plate_time, self.camera_name, self.name, plate_datetime, plate_confidence) self.license_plate_list.append(new_plate) self.licensePlateService.notify( ) # help out the poor thread self.old_detected_rect = detected_rect # this way, detectedRect will be erased and operated on but oldDetectedRect will always have something in it else: # no results self.old_detected_rect = [] def draw(self, frame): # return frame with drawings on it # draw plates detected and bounding boxes # is this necessary? The bounding boxes of the search areas should not overlap # should check for overlapping bounding boxes in constructor # draw plates detected # print(len(self.oldDetectedRect)) for rect in self.old_detected_rect: cv2.rectangle(frame, rect, (0, 255, 0), 2) # draw search box and name cv2.putText(frame, self.name, (self.area[0], self.area[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (15, 15, 255), 2) cv2.rectangle(frame, self.area, (0, 0, 255), 2) # return drawn frame return frame def stop(self): self.fps.stop() print(self.name, "FPS: ", self.fps.fps()) self.licensePlateService.stop() self.stopped = True