def video(args): config = load_config(args) model = create_model(args, config) cap = cv2.VideoCapture(0) # cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4") if cap.isOpened() is False: print('Error opening video stream or file') exit(1) logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS))) if os.path.exists('mask.png'): mask = Image.open('mask.png') mask = mask.resize((200, 200)) else: mask = None fps_time = 0 degree = 0 detection_thresh = config.getfloat('predict', 'detection_thresh') min_num_keypoints = config.getint('predict', 'min_num_keypoints') while cap.isOpened(): degree += 5 degree = degree % 360 ret_val, image = cap.read() image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, model.insize) with chainer.using_config('autotune', True): humans = estimate(model, image.transpose(2, 0, 1).astype(np.float32), detection_thresh, min_num_keypoints) pilImg = Image.fromarray(image) pilImg = draw_humans( model.keypoint_names, model.edges, pilImg, humans, mask=mask.rotate(degree) if mask else None, visbbox=config.getboolean('predict', 'visbbox'), ) img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR) msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF' msg += ' ' + config.get('model_param', 'model_name') cv2.putText(img_with_humans, 'FPS: % f' % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1])) img_with_humans = cv2.resize( img_with_humans, (1 * model.insize[0], 1 * model.insize[1])) cv2.imshow('Pose Proposal Network' + msg, img_with_humans) fps_time = time.time() # press Esc to exit if cv2.waitKey(1) == 27: break
def main(): print("Reading in command-line args...") args = parse_cmd_args() config = load_config(args.config) model = load_model(args.model, args.type, device="cuda:{}".format(args.gpu)) print("Evaluate on test set...") results = predict_on_input(model, args.type, args.path_in, config, args.charchecker, "cuda:{}".format(args.gpu)) print("Writing to file {}.".format(args.outfile)) write_to_file(results, args.outfile) print("Done.")
def export_onnx(args): config = load_config(args) model = MyModel(config) chainer.serializers.load_npz(os.path.join(args.model, 'bestmodel.npz'), model) w, h = parse_size(config.get('model_param', 'insize')) x = np.zeros((1, 3, h, w), dtype=np.float32) logger.info('begin export') output = os.path.join(args.model, 'bestmodel.onnx') with chainer.using_config('train', False): onnx_chainer.export(model, x, filename=output) logger.info('end export') logger.info('run onnx.check') onnx_model = onnx.load(output) onnx.checker.check_model(onnx_model) logger.info('done')
def high_speed(args): config = load_config(args) dataset_type = config.get('dataset', 'type') detection_thresh = config.getfloat('predict', 'detection_thresh') min_num_keypoints = config.getint('predict', 'min_num_keypoints') model = create_model(args, config) if os.path.exists('mask.png'): mask = Image.open('mask.png') mask = mask.resize((200, 200)) else: mask = None capture = Capture(model.insize) predictor = Predictor(model=model, cap=capture) capture.start() predictor.start() fps_time = 0 degree = 0 main_event = threading.Event() try: while not main_event.is_set(): degree += 5 degree = degree % 360 try: image, feature_map = predictor.get() humans = get_humans_by_feature( model, feature_map, detection_thresh, min_num_keypoints ) except queue.Empty: continue except Exception: break pilImg = Image.fromarray(image) pilImg = draw_humans( model.keypoint_names, model.edges, pilImg, humans, mask=mask.rotate(degree) if mask else None, visbbox=config.getboolean('predict', 'visbbox'), ) img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR) msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF' msg += ' ' + config.get('model_param', 'model_name') cv2.putText(img_with_humans, 'FPS: %f' % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1])) cv2.imshow('Pose Proposal Network' + msg, img_with_humans) fps_time = time.time() # press Esc to exit if cv2.waitKey(1) == 27: main_event.set() except Exception as e: print(e) except KeyboardInterrupt: main_event.set() capture.stop() predictor.stop() capture.join() predictor.join()
def high_speed(args): config = load_config(args) dataset_type = config.get('dataset', 'type') detection_thresh = config.getfloat('predict', 'detection_thresh') min_num_keypoints = config.getint('predict', 'min_num_keypoints') model = create_model(args, config) if os.path.exists('mask.png'): mask = Image.open('mask.png') mask = mask.resize((200, 200)) else: mask = None cap = cv2.VideoCapture(0) # get input from usb camera # cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4") if cap.isOpened() is False: print('Error opening video stream or file') exit(1) cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) cap.set(cv2.CAP_PROP_FPS, 60) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS))) capture = Capture(cap, model.insize) predictor = Predictor(model=model, cap=capture) capture.start() predictor.start() fps_time = 0 degree = 0 main_event = threading.Event() try: while not main_event.is_set() and cap.isOpened(): degree += 5 degree = degree % 360 try: image, feature_map = predictor.get() humans = get_humans_by_feature(model, feature_map, detection_thresh, min_num_keypoints) except queue.Empty: continue except Exception: break pilImg = Image.fromarray(image) pilImg = draw_humans( model.keypoint_names, model.edges, pilImg, humans, mask=mask.rotate(degree) if mask else None, visbbox=config.getboolean('predict', 'visbbox'), ) img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR) msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF' msg += ' ' + config.get('model_param', 'model_name') cv2.putText(img_with_humans, 'FPS: %f' % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) img_with_humans = cv2.resize( img_with_humans, (3 * model.insize[0], 3 * model.insize[1])) cv2.imshow('Pose Proposal Network' + msg, img_with_humans) fps_time = time.time() # press Esc to exit if cv2.waitKey(1) == 27: main_event.set() except Exception as e: print(e) except KeyboardInterrupt: main_event.set() capture.stop() predictor.stop() capture.join() predictor.join()
def high_speed(args, viewer): config = load_config(args) dataset_type = config.get('dataset', 'type') detection_thresh = config.getfloat('predict', 'detection_thresh') min_num_keypoints = config.getint('predict', 'min_num_keypoints') model = create_model(args, config) svo_file_path = None #"/home/adujardin/Downloads/5m.svo" #config.get('zed', 'svo_file_path') init_cap_params = sl.InitParameters() if svo_file_path: print("Loading SVO file " + svo_file_path) init_cap_params.svo_input_filename = svo_file_path init_cap_params.svo_real_time_mode = True init_cap_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 init_cap_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA init_cap_params.coordinate_units = sl.UNIT.UNIT_METER init_cap_params.depth_stabilization = True init_cap_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP cap = sl.Camera() if not cap.is_opened(): print("Opening ZED Camera...") status = cap.open(init_cap_params) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() py_transform = sl.Transform() tracking_parameters = sl.TrackingParameters(init_pos=py_transform) cap.enable_tracking(tracking_parameters) capture = Capture(cap, model.insize) predictor = Predictor(model=model, cap=capture) capture.start() predictor.start() fps_time = 0 main_event = threading.Event() viewer.edges = model.edges try: while not main_event.is_set() and cap.is_opened(): try: image, feature_map, depth = predictor.get() humans = get_humans_by_feature(model, feature_map, detection_thresh, min_num_keypoints) humans_3d = get_humans3d(humans, depth, model) except queue.Empty: continue except Exception as e: print(e) break pilImg = Image.fromarray(image) pilImg = draw_humans( model.keypoint_names, model.edges, pilImg, humans, None, visbbox=config.getboolean('predict', 'visbbox'), ) img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR) img_with_humans = cv2.resize( img_with_humans, (700, 400)) #(3 * model.insize[0], 3 * model.insize[1])) msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF' msg += ' ' + config.get('model_param', 'model_name') fps_display = 'FPS: %f' % (1.0 / (time.time() - fps_time)) str_to_dsplay = msg + " " + fps_display cv2.putText(img_with_humans, fps_display, (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.imshow('Pose Proposal Network' + msg, img_with_humans) viewer.update_text(str_to_dsplay) viewer.update_humans(humans_3d) fps_time = time.time() key = cv2.waitKey(1) # press Esc to exit if key == 27: exit main_event.set() except Exception as e: print(e) except KeyboardInterrupt: main_event.set() capture.stop() predictor.stop() capture.join() predictor.join() cap.close()