def high_speed(args): config = load_config(args) dataset_type = config.get('dataset', 'type') detection_thresh = config.getfloat('predict', 'detection_thresh') min_num_keypoints = config.getint('predict', 'min_num_keypoints') model = create_model(args, config) if os.path.exists('mask.png'): mask = Image.open('mask.png') mask = mask.resize((200, 200)) else: mask = None capture = Capture(model.insize) predictor = Predictor(model=model, cap=capture) capture.start() predictor.start() fps_time = 0 degree = 0 main_event = threading.Event() try: while not main_event.is_set(): degree += 5 degree = degree % 360 try: image, feature_map = predictor.get() humans = get_humans_by_feature( model, feature_map, detection_thresh, min_num_keypoints ) except queue.Empty: continue except Exception: break pilImg = Image.fromarray(image) pilImg = draw_humans( model.keypoint_names, model.edges, pilImg, humans, mask=mask.rotate(degree) if mask else None, visbbox=config.getboolean('predict', 'visbbox'), ) img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR) msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF' msg += ' ' + config.get('model_param', 'model_name') cv2.putText(img_with_humans, 'FPS: %f' % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1])) cv2.imshow('Pose Proposal Network' + msg, img_with_humans) fps_time = time.time() # press Esc to exit if cv2.waitKey(1) == 27: main_event.set() except Exception as e: print(e) except KeyboardInterrupt: main_event.set() capture.stop() predictor.stop() capture.join() predictor.join()
def run(self): model = create_model(self.modelargs, self.config) logger.info('{} started at PID {} - 1920x1080 model loaded'.format(self.name, self.pid)) if chainer.backends.cuda.available: model = model.to_gpu(0) self.model = model self.insize = (1920, 1080) self.pipe_end.send(True) # model loaded sign count = 0 run = False if self.pipe_end.recv(): logger.info("start running 1920x1080") run = True while not self.stop_event.is_set(): try: if run and not self.pipe_end.poll(): t_start = time.time() # image, count = self.cap.get(1) image, count = self.queue_in.get(timeout=1) self.queue_get_time += time.time() - t_start # logger.info('get img from queue took {} sec'.format(time.time()-t_start)) # print('pred1 getting from cap:'+str(count)+'\n') image = cv2.resize(image, self.insize) t_start = time.time() with chainer.using_config('autotune', True), \ chainer.using_config('use_ideep', 'auto'): feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32)) if not self.queue.empty(): self.inf_time += time.time() - t_start # self.queue.put((image, feature_map), timeout=1) self.queue.put((feature_map), timeout=1) humans = get_humans_by_feature(model, feature_map, self.detection_threshold, self.min_num_keypoints) if len(humans) > 0: self.queue_comm.put(humans, timeout=1) # cropped_image_set = self.cut_human(image, humans) # logger.debug("pred1 queue {}: ".format(self.queue.qsize())) if self.queue.qsize() == 1: self.pipe_end.send(2) # sign that big model passed first forward path ## BLOCK ON PORPUSE ## block = False while block: time.sleep(2) pass ###### else: # logger.info("waiting for other model to load....") if self.pipe_end.recv() == 'stop': logger.info("STOP received via pipe") self.stop() if self.queue_in.qsize()==0 and self.queue.qsize()>0: self.pipe_end.send('stop') self.stop() except queue.Full: logger.info("queue full") pass except queue.Empty: logger.info("queue empty") if self.queue.qsize() > 0: self.stop() else: pass except cv2.error: logger.info("CV2 error") self.pipe_end.send('stop') time.sleep(1) self.stop() except KeyboardInterrupt: self.pipe_end.send('stop') self.stop() except: print("Unexpected error:", sys.exc_info()[0]) self.pipe_end.send('stop') time.sleep(1) self.stop() raise
def high_speed(args): config = load_config(args) dataset_type = config.get('dataset', 'type') detection_thresh = config.getfloat('predict', 'detection_thresh') min_num_keypoints = config.getint('predict', 'min_num_keypoints') model = create_model(args, config) if os.path.exists('mask.png'): mask = Image.open('mask.png') mask = mask.resize((200, 200)) else: mask = None cap = cv2.VideoCapture(0) # get input from usb camera # cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4") if cap.isOpened() is False: print('Error opening video stream or file') exit(1) cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) cap.set(cv2.CAP_PROP_FPS, 60) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS))) capture = Capture(cap, model.insize) predictor = Predictor(model=model, cap=capture) capture.start() predictor.start() fps_time = 0 degree = 0 main_event = threading.Event() try: while not main_event.is_set() and cap.isOpened(): degree += 5 degree = degree % 360 try: image, feature_map = predictor.get() humans = get_humans_by_feature(model, feature_map, detection_thresh, min_num_keypoints) except queue.Empty: continue except Exception: break pilImg = Image.fromarray(image) pilImg = draw_humans( model.keypoint_names, model.edges, pilImg, humans, mask=mask.rotate(degree) if mask else None, visbbox=config.getboolean('predict', 'visbbox'), ) img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR) msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF' msg += ' ' + config.get('model_param', 'model_name') cv2.putText(img_with_humans, 'FPS: %f' % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) img_with_humans = cv2.resize( img_with_humans, (3 * model.insize[0], 3 * model.insize[1])) cv2.imshow('Pose Proposal Network' + msg, img_with_humans) fps_time = time.time() # press Esc to exit if cv2.waitKey(1) == 27: main_event.set() except Exception as e: print(e) except KeyboardInterrupt: main_event.set() capture.stop() predictor.stop() capture.join() predictor.join()
def high_speed(args, viewer): config = load_config(args) dataset_type = config.get('dataset', 'type') detection_thresh = config.getfloat('predict', 'detection_thresh') min_num_keypoints = config.getint('predict', 'min_num_keypoints') model = create_model(args, config) svo_file_path = None #"/home/adujardin/Downloads/5m.svo" #config.get('zed', 'svo_file_path') init_cap_params = sl.InitParameters() if svo_file_path: print("Loading SVO file " + svo_file_path) init_cap_params.svo_input_filename = svo_file_path init_cap_params.svo_real_time_mode = True init_cap_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 init_cap_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA init_cap_params.coordinate_units = sl.UNIT.UNIT_METER init_cap_params.depth_stabilization = True init_cap_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP cap = sl.Camera() if not cap.is_opened(): print("Opening ZED Camera...") status = cap.open(init_cap_params) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() py_transform = sl.Transform() tracking_parameters = sl.TrackingParameters(init_pos=py_transform) cap.enable_tracking(tracking_parameters) capture = Capture(cap, model.insize) predictor = Predictor(model=model, cap=capture) capture.start() predictor.start() fps_time = 0 main_event = threading.Event() viewer.edges = model.edges try: while not main_event.is_set() and cap.is_opened(): try: image, feature_map, depth = predictor.get() humans = get_humans_by_feature(model, feature_map, detection_thresh, min_num_keypoints) humans_3d = get_humans3d(humans, depth, model) except queue.Empty: continue except Exception as e: print(e) break pilImg = Image.fromarray(image) pilImg = draw_humans( model.keypoint_names, model.edges, pilImg, humans, None, visbbox=config.getboolean('predict', 'visbbox'), ) img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR) img_with_humans = cv2.resize( img_with_humans, (700, 400)) #(3 * model.insize[0], 3 * model.insize[1])) msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF' msg += ' ' + config.get('model_param', 'model_name') fps_display = 'FPS: %f' % (1.0 / (time.time() - fps_time)) str_to_dsplay = msg + " " + fps_display cv2.putText(img_with_humans, fps_display, (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.imshow('Pose Proposal Network' + msg, img_with_humans) viewer.update_text(str_to_dsplay) viewer.update_humans(humans_3d) fps_time = time.time() key = cv2.waitKey(1) # press Esc to exit if key == 27: exit main_event.set() except Exception as e: print(e) except KeyboardInterrupt: main_event.set() capture.stop() predictor.stop() capture.join() predictor.join() cap.close()