def main(args): logger = multiprocessing.log_to_stderr() logger.setLevel(multiprocessing.SUBDEBUG) input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) #profile.run('pool = Pool(args.num_workers, worker, (input_q, output_q))') pool = Pool(args.num_workers, worker, (input_q, output_q)) # Inside this function there is a thread providing frames video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() #PATH_TO_FILE = os.path.join(CWD_PATH, 'rtsp://192.168.0.109:554/user=admin&password=admin&channel=1&stream=0.sdp?') #video_capture = WebcamVideoStream(src=PATH_TO_FILE, # width=args.width, # height=args.height).start() fps = FPS().start() while True: #fps._numFrames < 120: # Here the frames are read and placed into a Queue, which feeds a Pool frame = video_capture.read() input_q.put(frame) t = time.time() output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR) cv2.imshow('Video', output_rgb) fps.update() #print('[INFO] elapsed time: {:.2f}'.format(time.time() - t)) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.stop() #print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) #print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) pool.terminate() video_capture.stop() cv2.destroyAllWindows()
# logger = multiprocessing.log_to_stderr() # logger.setLevel(multiprocessing.SUBDEBUG) input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) pool = Pool(args.num_workers, worker, (input_q, output_q)) video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() fps = FPS().start() while True: # fps._numFrames < 120 print('4', sys.path) frame = video_capture.read() input_q.put(frame) t = time.time() output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR) cv2.imshow('Video', output_rgb) fps.update() print('[INFO] elapsed time: {:.2f}'.format(time.time() - t)) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
class Realtime: """ Read and apply object detection to input video stream """ def __init__(self, args): self.display = args["display"] == 1 self.queue_input = None self.queue_output = None self.pool = None self.vs = None self.fps = None self.start_queue(args["logger_debug"], args["queue_size"], args["num_workers"]) self.start_stream(args["input_device"]) def start_queue(self, debugger, size, workers): """ Starts processing queue. """ if debugger: logger = multiprocessing.log_to_stderr() logger.setLevel(multiprocessing.SUBDEBUG) self.queue_input = Queue(maxsize=size) self.queue_output = Queue(maxsize=size) self.pool = Pool(workers, worker, (self.queue_input, self.queue_output)) def start_stream(self, device): """ Create a threaded video stream and start the FPS counter. """ self.vs = WebcamVideoStream(src=device).start() self.fps = FPS().start() def start(self): """ Start processing video feed. """ if self.display: print() print( "=====================================================================" ) print( "Starting video acquisition. Press 'q' (on the video windows) to stop." ) print( "=====================================================================" ) print() # Start reading and treating the video stream running = True while running: running = self.capture() self.destroy() def capture(self): """ Capture and process video frame. """ if cv2.waitKey(1) & 0xFF == ord('q'): return False # Capture frame-by-frame ret, frame = self.vs.read() # No new frame, try again if not ret: return True # Place frame in queue self.queue_input.put(frame) # Display the resulting frame if self.display: cv2.imshow( 'frame', cv2.cvtColor(self.queue_output.get(), cv2.COLOR_RGB2BGR)) self.fps.update() return True def destroy(self): """ Stop threads and hide OpenCV frame. """ # When everything done, release the capture self.fps.stop() self.pool.terminate() self.vs.stop() cv2.destroyAllWindows()
args = parser.parse_args() input_q = Queue(5) # fps is better if queue is higher but then more lags output_q = Queue() for i in range(1): t = Thread(target=worker, args=(input_q, output_q)) t.daemon = True t.start() video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() fps = FPS().start() while True: frame = video_capture.read() input_q.put(frame) t = time.time() if output_q.empty(): pass # fill up queue else: font = cv2.FONT_HERSHEY_SIMPLEX data = output_q.get() rec_points = data['rect_points'] class_names = data['class_names'] class_colors = data['class_colors'] for point, name, color in zip(rec_points, class_names, class_colors): cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), (int(point['xmax'] * args.width), int(point['ymax'] * args.height)), color, 3)
def main(argv): print("\n---------- Starting object detection ----------\n") # Instantiate an ObjectDetector class object # Takes the name of the model graph as an argument ObjectFinder = ObjectDetector('frozen_inference_graph.pb') # Initialize a parser object parser = argparse.ArgumentParser() parser.add_argument('-src', '--source', dest='video_source', type=int, default=0, help='Device index of the camera.') parser.add_argument('-wd', '--width', dest='width', type=int, default=1080, help='Width of the frames in the video stream.') parser.add_argument('-ht', '--height', dest='height', type=int, default=720, help='Height of the frames in the video stream.') parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int, default=4, help='Number of workers.') parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int, default=25, help='Size of the queue.') args = parser.parse_args() # Initialize a logger object logger = multiprocessing.log_to_stderr() logger.setLevel(multiprocessing.SUBDEBUG) input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) pool = Pool(args.num_workers, ObjectFinder.worker, (input_q, output_q)) video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() # ------------------------------Control Loop ------------------------------ fps = FPS().start() # fps._numFrames < 120 frame_number = 0 while True: frame_number += 1 # Frame is a numpy nd array frame = video_capture.read() input_q.put(frame) t = time.time() output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR) cv2.imshow('Video', output_rgb) fps.update() print( "[INFO] elapsed time: {0:.3f}\nFrame number: {1}-------------------------------" .format((time.time() - t), frame_number)) if (cv2.waitKey(1) & 0xFF == ord('q')): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) pool.terminate() video_capture.stop() cv2.destroyAllWindows()
nFrames = 0 tLastRefresh = time.time() tLastDetection = time.time() tStartEvent = time.time() text = "Motionless" record = False # loop over the frames of the video print("Starting detection") while True: # Update timer t = time.time() # grab the current frame from detector input if defined if detector_vs is None: ret, frame_dry = record_vs.read() else: ret, frame_dry = detector_vs.read() # if the frame could not be grabbed, there is an error: stop program if frame_dry is None: break # resize the frame, convert it to grayscale, and blur it frame = imutils.resize(frame_dry, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # Refresh reference (and release video if detection occurs) if (refFrame is None) | (t - tLastRefresh > args['refresh_delay']): tLastRefresh = time.time()
width=args.width, height=args.height).start() # video_capture = WebcamVideoStream(src='rtsp://*****:*****@10.20.1.80:554/PSIA/streaming/channels/101', # width=args.width, # height=args.height).start() # # video_capture2 = WebcamVideoStream(src='rtsp://*****:*****@10.20.1.70:554/PSIA/streaming/channels/101', # width=args.width, # height=args.height).start() fps = FPS().start() while True: #frame = video_capture.read() frame = video_capture.read() frame1 = video_capture2.read() if frame.any(): input_q.put(frame) t = time.time() if output_q.empty(): pass # fill up queue else: font = cv2.FONT_HERSHEY_SIMPLEX datakeluar = output_q.get() try: rects = datakeluar['rects'] recog_data = datakeluar['recog_data']
def web(): run = True parser = argparse.ArgumentParser() parser.add_argument('-src', '--source', dest='video_source', type=int, default=0, help='Device index of the camera.') parser.add_argument('-wd', '--width', dest='width', type=int, default=480, help='Width of the frames in the video stream.') parser.add_argument('-ht', '--height', dest='height', type=int, default=360, help='Height of the frames in the video stream.') parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int, default=2, help='Number of workers.') parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int, default=5, help='Size of the queue.') args = parser.parse_args() logger = multiprocessing.log_to_stderr() logger.setLevel(multiprocessing.SUBDEBUG) input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) pool = Pool(args.num_workers, worker, (input_q, output_q)) video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() fps = FPS().start() while run is True: # fps._numFrames < 120 frame = video_capture.read() input_q.put(frame) t = time.time() output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR) cv2.imshow('Video', output_rgb) fps.update() print('[INFO] elapsed time: {:.2f}'.format(time.time() - t)) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) pool.terminate() video_capture.stop() cv2.destroyAllWindows()
src= 'rtsp://*****:*****@192.168.0.159:554/PSIA/streaming/channels/101', width=args.width, height=args.height).start() video_capture3 = WebcamVideoStream( src= 'rtsp://*****:*****@192.168.0.155:554/PSIA/streaming/channels/101', width=args.width, height=args.height).start() fps = FPS().start() while True: #frame = video_capture.read() frame = video_capture.read() frame2 = video_capture2.read() frame3 = video_capture3.read() if frame.any(): input_q.put(frame) t = time.time() if output_q.empty(): pass # fill up queue else: font = cv2.FONT_HERSHEY_SIMPLEX datakeluar = output_q.get() try: rects = datakeluar['rects']