예제 #1
0
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)

    infer(cfg, args.input_type, args.input, args.conf_threshold,
          args.dump_to_json, args.show, args.dump_output_video,
          args.path_to_output_video)
예제 #2
0
def main():
  log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
  args = build_argparser().parse_args()
  cfg = load_module(args.path_to_config)
  exec_net, plugin, input_blob, out_blob, shape = load_ir_model(args.model, args.device,
                                                                args.plugin_dir, args.cpu_extension)
  n_batch, channels, height, width = shape


  cur_request_id = 0
  while 1:
    frame = cv2.imread(args.input_image)
    img_to_display = frame.copy()
    in_frame = cv2.resize(frame, (width, height))
    in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    in_frame = in_frame.reshape((n_batch, channels, height, width))

    exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
    if exec_net.requests[cur_request_id].wait(-1) == 0:

      # Parse detection results of the current request
      lp_code = exec_net.requests[cur_request_id].outputs[out_blob]
      lp_number = decode_ie_output(lp_code, cfg.r_vocab)
      img_to_display = display_license_plate(lp_number, img_to_display)
      cv2.imshow('License Plate', img_to_display)
      key = cv2.waitKey(0)
      if key == 27:
        break

  del exec_net
  del plugin
예제 #3
0
def main(_):
  args = parse_args()
  cfg = load_module(args.path_to_config)
  validate(cfg)
예제 #4
0
def main(_):
  args = parse_args()
  cfg = load_module(args.path_to_config)
  eval_loop(cfg)
def main(_):
  args = parse_args()
  cfg = load_module(args.path_to_config)

  infer(cfg, args.input_type, args.input, args.conf_threshold, args.dump_to_json, args.show,
        args.dump_output_video, args.path_to_output_video)
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)
    export(cfg, args.mo, 1, 'FP32')  #set batch_size and precision
예제 #7
0
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(graph_def)
    return graph


if __name__ == "__main__":

    path = "weight/graph.pb.frozen"
    image_path = "data/000005.png"
    image_path = "E:/deepLearning/Synthetic_Chinese_License_Plates/crops/000191.png"
    config_path = "chinese_lp/config.py"

    graph = load_graph(path)
    config = load_module(config_path)

    image = cv2.imread(image_path)
    img = cv2.resize(image, (94, 24))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = np.float32(img)
    img = np.multiply(img, 1.0 / 255.0)

    input = graph.get_tensor_by_name("import/input:0")
    output = graph.get_tensor_by_name("import/d_predictions:0")

    with tf.Session(graph=graph) as sess:
        results = sess.run(output, feed_dict={input: [img]})
    print(results)
    print(config.r_vocab)
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)
    export(cfg, args.mo)
def main(_):
  args = parse_args()
  cfg = load_module(args.path_to_config)
  train(cfg)
def main(_):
  args = parse_args()
  cfg = load_module(args.path_to_config)
  export(cfg, args.mo)
def main(_):
  args = parse_args()
  cfg = load_module(args.path_to_config)
  infer(cfg)
예제 #12
0
def main(_):
    args = parse_args()
    cfg = load_module(args.path_to_config)
    eval_loop(cfg)