def inference(base_model_name, path_to_npz, data_format, input_files, plot): model_func = get_base_model(base_model_name) height, width = (368, 432) e = measure( lambda: TfPoseEstimator(path_to_npz, model_func, target_size=(width, height), data_format=data_format), 'create TfPoseEstimator') t0 = time.time() for idx, img_name in enumerate(input_files): image = measure( lambda: read_imgfile( img_name, width, height, data_format=data_format), 'read_imgfile') humans, heatMap, pafMap = measure(lambda: e.inference(image), 'e.inference') tl.logging.info('got %d humans from %s' % (len(humans), img_name)) if humans: for h in humans: tl.logging.debug(h) if plot: if data_format == 'channels_first': image = image.transpose([1, 2, 0]) plot_humans(image, heatMap, pafMap, humans, '%02d' % (idx + 1)) tot = time.time() - t0 mean = tot / len(input_files) tl.logging.info('inference all took: %f, mean: %f, FPS: %f' % (tot, mean, 1.0 / mean))
def inference_2d(base_model_name, base_npz_path, rgb_files, dep_files): height, width, channel = (368, 432, 4) base_model_func = get_base_model(base_model_name) e_2d = measure( lambda: TfPoseEstimator(base_npz_path, base_model_func, (height, width, channel)), 'create TfPoseEstimator') time0 = time.time() for idx, (rgb_name, dep_name) in enumerate(zip(rgb_files, dep_files)): input_2d, init_h, init_w = measure( lambda: read_2dfiles(rgb_name, dep_name, height, width), 'read_2dfiles') humans, heatMap, pafMap = measure(lambda: e_2d.inference(input_2d), 'e_2d.inference') plot_humans(input_2d, heatMap, pafMap, humans, '%02d' % (idx + 1)) for h, pred_2d in enumerate(humans): coords2d, _, coords2d_vis = tranform_keypoints2d( pred_2d.body_parts, init_w, init_h, 0.1) plot_human2d(rgb_name, dep_name, coords2d, h, coords2d_vis) do_plot() mean = (time.time() - time0) / len(rgb_files) print('inference all took: %f, mean: %f, FPS: %f' % (time.time() - time0, mean, 1.0 / mean))
def main(): args = parse_args() height, width, channel = 368, 432, 3 images = [] for name in args.images.split(','): x = read_imgfile( name, width, height, 'channels_first') # channels_first is required for tensorRT images.append(x) model_func = _get_model_func(args.base_model) model_inputs, model_outputs = model_func() input_names = [p.name[:-2] for p in model_inputs] output_names = [p.name[:-2] for p in model_outputs] print('input names: %s' % ','.join(input_names)) print('output names: %s' % ','.join(output_names)) # outputs/conf,outputs/paf # with tf.Session() as sess: sess = tf.InteractiveSession() measure(lambda: tl.files.load_and_assign_npz_dict(args.path_to_npz, sess), 'load npz') frozen_graph = tf.graph_util.convert_variables_to_constants( sess, sess.graph_def, output_names) tf_model = tf.graph_util.remove_training_nodes(frozen_graph) uff_model = measure(lambda: uff.from_tensorflow(tf_model, output_names), 'uff.from_tensorflow') print('uff model created') parser = uffparser.create_uff_parser() inputOrder = 0 # NCHW, https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/c_api/_nv_uff_parser_8h_source.html parser.register_input(input_names[0], (channel, height, width), inputOrder) for name in output_names: parser.register_output(name) G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO) max_batch_size = 1 max_workspace_size = 1 << 30 engine = measure( lambda: trt.utils.uff_to_trt_engine( G_LOGGER, uff_model, parser, max_batch_size, max_workspace_size), 'trt.utils.uff_to_trt_engine') print('engine created') f_height, f_width = (height / 8, width / 8 ) # TODO: derive from model_outputs post_process = PostProcessor((height, width), (f_height, f_width), 'channels_first') for idx, x in enumerate(images): conf, paf = measure(lambda: infer(engine, x, 1), 'infer') humans, heat_up, paf_up = measure(lambda: post_process(conf, paf), 'post_process') print('got %d humans' % (len(humans))) plot_humans(x.transpose([1, 2, 0]), heat_up, paf_up, humans, '%02d' % (idx + 1))
def inference(path_to_freezed_model, input_files): h, w = 368, 432 e = measure(lambda: TfPoseestimatorLoader(path_to_freezed_model, target_size=(w, h)), 'create TfPoseestimatorLoader') for idx, img_name in enumerate(input_files): image = read_imgfile(img_name, w, h) humans, heatMap, pafMap = measure(lambda: e.inference(image), 'inference') print('got %d humans from %s' % (len(humans), img_name)) if humans: for h in humans: print(h) plot_humans(image, heatMap, pafMap, humans, '%02d' % (idx + 1))
human.score } result.append(item) scores += item['score'] avg_score = scores / len(humans) if len(humans) > 0 else 0 logger.info('image: %s humans: %d anns: %d score: %f' % (img_name, len(humans), len(anns), avg_score)) if config.EVAL.data_idx >= 0: if humans: for h in humans: logger.info(h) if config.EVAL.plot: plot_humans( image, heatMap, pafMap, humans, '%06d' % (img_idx + 1), os.path.join(config.EVAL.eval_path, config.LOG.vis_path)) write_json = os.path.join(config.EVAL.eval_path, 'eval.json') fp = open(write_json, 'w+') json.dump(result, fp) fp.close() cocoDt = cocoGt.loadRes(write_json) cocoEval = COCOeval(cocoGt, cocoDt, 'keypoints') cocoEval.params.imgIds = keys cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() write_txt = os.path.join(config.EVAL.eval_path, 'eval.txt')