Beispiel #1
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    cfg = load_module(args.config)
    exec_net, plugin, input_blob, out_blob, shape = load_ir_model(
        args.model, args.device, args.plugin_dir, args.cpu_extension)
    n_batch, channels, height, width = shape

    image = cv2.imread(args.input_image, cv2.IMREAD_GRAYSCALE)
    img_to_display = image.copy()
    cv2.imshow('sample image', image)
    print("press key to run inference engine")
    cv2.waitKey(0)  # waits until a key is pressed
    in_frame = cv2.resize(image, (width, height))
    print("in_frame shape", in_frame.shape)
    #in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n_batch, channels, height, width))

    #start = time.time()
    #for i in range(10000):
    result = exec_net.infer(inputs={input_blob: in_frame})
    #end = time.time()
    #print("elapsed time is ms: ", (end - start)/10)
    gazeX_out = result[out_blob][0]
    gazeY_out = result[out_blob][1]
    print("gazeX, gazeY out: ")
    print(gazeX_out, gazeY_out)

    del exec_net
    del plugin
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    cfg = load_module(args.config)
    exec_net, plugin, input_blob, out_blob, shape = load_ir_model(
        args.model, args.device, args.plugin_dir, args.cpu_extension)
    n_batch, channels, height, width = shape

    image = cv2.imread(args.input_image)
    img_to_display = image.copy()
    in_frame = cv2.resize(image, (width, height))
    in_frame = in_frame.transpose(
        (2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n_batch, channels, height, width))

    result = exec_net.infer(inputs={input_blob: in_frame})
    lp_code = result[out_blob][0]
    lp_number = decode_ie_output(lp_code, cfg.r_vocab)
    print('Output: {}'.format(lp_number))
    img_to_display = display_license_plate(lp_number, img_to_display)
    cv2.imshow('License Plate', img_to_display)
    cv2.waitKey(0)

    del exec_net
    del plugin
def main():
    args = build_argparser().parse_args()

    graph = load_graph(args.model)
    config = load_module(args.config)

    image = cv2.imread(args.input_image)
    img = cv2.resize(image, (94, 24))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = np.float32(img)
    img = np.multiply(img, 1.0 / 255.0)

    input = graph.get_tensor_by_name("import/input:0")
    output = graph.get_tensor_by_name("import/d_predictions:0")

    with tf.Session(graph=graph) as sess:
        results = sess.run(output, feed_dict={input: [img]})
        print(results)

        decoded_lp = decode_beams(results, config.r_vocab)[0]
        print(decoded_lp)

        img_to_display = display_license_plate(decoded_lp, image)
        cv2.imshow('License Plate', img_to_display)
        cv2.waitKey(0)
def main(_):
  args = parse_args()
  config = load_module(args.path_to_config)

  checkpoint = args.checkpoint if args.checkpoint else tf.train.latest_checkpoint(config.model_dir)
  print(checkpoint)
  if not checkpoint or not os.path.isfile(checkpoint+'.index'):
    raise FileNotFoundError(str(checkpoint))

  step = checkpoint.split('.')[-2].split('-')[-1]
  output_dir = args.output_dir if args.output_dir else os.path.join(config.model_dir, 'export_{}'.format(step))

  # Freezing graph
  frozen_dir = os.path.join(output_dir, 'frozen_graph')
  frozen_graph = freezing_graph(config, checkpoint, frozen_dir)

  # Export to IR
  export_dir = os.path.join(output_dir, 'IR', args.data_type)

  mo_params = {
    'framework': 'tf',
    'model_name': 'lpr',
    'input': 'input',
    'output': 'd_predictions',
    'reverse_input_channels': True,
    'scale': 255,
    'input_shape': [1] + list(config.input_shape),
    'data_type': args.data_type,
  }

  execute_mo(mo_params, frozen_graph, export_dir)
def main(_):
    args = parse_args()
    config = load_module(args.path_to_config)

    checkpoint = args.checkpoint if args.checkpoint else tf.train.latest_checkpoint(
        config.MODEL_DIR)
    print(checkpoint)
    if not checkpoint or not os.path.isfile(checkpoint + '.index'):
        raise FileNotFoundError(str(checkpoint))

    step = checkpoint.split('-')[-1]
    output_dir = args.output_dir if args.output_dir else os.path.join(
        config.MODEL_DIR, 'export_{}'.format(step))

    # Freezing graph
    frozen_dir = os.path.join(output_dir, 'frozen_graph')
    frozen_graph, ssd_config_path, train_param, ssd_config = freezing_graph(
        config, checkpoint, frozen_dir)

    # Export to IR
    export_dir = os.path.join(output_dir, 'IR', args.data_type)
    input_shape = [1] + list(config.input_shape)  # Add batch size 1 in shape

    scale = 1. / train_param.scale
    mean_value = [train_param.mean_value for _ in range(3)]
    mo_params = {
        'model_name': args.model_name,
        'output': ','.join(ssd_config['cut_points']),
        'input_shape': input_shape,
        'scale': scale,
        'mean_value': mean_value,
        'tensorflow_use_custom_operations_config': ssd_config_path,
        'data_type': args.data_type,
    }
    execute_mo(mo_params, frozen_graph, export_dir)
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)

    infer(cfg, args.input_type, args.input, args.conf_threshold,
          args.dump_to_json, args.show, args.dump_output_video,
          args.path_to_output_video)
def main(_):
    args = parse_args()
    config = load_module(args.path_to_config)

    checkpoint = args.checkpoint if args.checkpoint else tf.train.latest_checkpoint(
        config.model_dir)
    print("PATH OF CHECKPOINT: {}:".format(checkpoint))

    if not checkpoint or not os.path.isfile(checkpoint + '.index'):
        raise FileNotFoundError(str(checkpoint))

    step = checkpoint.split('.')[-2].split('-')[-1]
    output_dir = args.output_dir if args.output_dir else os.path.join(
        config.model_dir, 'export_{}'.format(step))

    # Freezing graph
    frozen_dir = os.path.join(output_dir, 'frozen_graph')
    frozen_graph = freezing_graph(config, checkpoint, frozen_dir)
def main(_):
  args = parse_args()
  cfg = load_module(args.path_to_config)

  session_config = create_session(cfg, 'eval')

  run_config = tf.estimator.RunConfig(session_config=session_config)

  va_estimator = tf.estimator.Estimator(
    model_fn=resnet_v1_10_1,
    params=cfg.resnet_params,
    model_dir=cfg.model_dir,
    config=run_config)

  eval_data = InputEvalData(batch_size=cfg.eval.batch_size,
                            input_shape=cfg.input_shape,
                            json_path=cfg.eval.annotation_path)

  eval_loop(va_estimator, eval_data, cfg)
Beispiel #9
0
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)
    train(cfg, args.init_checkpoint)
Beispiel #10
0
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)
    eval_loop(cfg)
Beispiel #11
0
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)
    validate(cfg)
Beispiel #12
0
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)
    infer(cfg)