def main():
    # Parse arguments
    args = parse_commandline_arguments()

    # Loading FlattenConcat plugin library using CDLL has a side
    # effect of loading FlattenConcat plugin into internal TensorRT
    # PluginRegistry data structure. This will be needed when parsing
    # network into UFF, since some operations will need to use this plugin
    try:
        ctypes.CDLL(PATHS.get_flatten_concat_plugin_path())
    except:
        print("Error: {}\n{}\n{}".format(
            "Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
            "Make sure you have compiled FlattenConcat custom plugin layer",
            "For more details, check README.md"))
        sys.exit(1)

    # build engine
    trt_inference_wrapper = inference_utils.TRTInference(
        args.engine_path, args.model_file, trt.DataType.HALF, 1)

    inference_time = []
    cnt = 0
    cap = cv2.VideoCapture(args.video_file)

    start = datetime.now()
    success, frame = cap.read()

    while success:
        cnt += 1
        frame = preprocess_frame(frame)

        start_infer = datetime.now()
        trt_inference_wrapper.infer_numpy(frame)
        end_infer = datetime.now()

        inference_time.append(
            (end_infer - start_infer).total_seconds() * 1000.0)
        success, frame = cap.read()
    end = datetime.now()

    time_delta = (end - start).total_seconds()
    fps = cnt / time_delta

    print(
        "==============================================================================="
    )
    print("Process time (exclude load model time): {time:.2f}s".format(
        time=time_delta))
    print("Total #frames: {cnt}".format(cnt=cnt))
    print("Process FPS (exclude load model time): {time:.2f}".format(time=fps))
    print("Average inference time is: {time:.2f}ms".format(
        time=np.mean(inference_time)))
    print(
        "==============================================================================="
    )
예제 #2
0
def main():
    # Parse command line arguments
    args = parse_commandline_arguments()

    # Loading FlattenConcat plugin library using CDLL has a side
    # effect of loading FlattenConcat plugin into internal TensorRT
    # PluginRegistry data structure. This will be needed when parsing
    # network into UFF, since some operations will need to use this plugin
    try:
        ctypes.CDLL(PATHS.get_flatten_concat_plugin_path())
    except:
        print(
            "Error: {}\n{}\n{}".format(
                "Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
                "Make sure you have compiled FlattenConcat custom plugin layer",
                "For more details, check README.md"
            )
        )
        sys.exit(1)

    # Fetch .uff model path, convert from .pb
    # if needed, using prepare_ssd_model
    ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
    if not os.path.exists(ssd_model_uff_path):
        model_utils.prepare_ssd_model(MODEL_NAME)

    # Set up all TensorRT data structures needed for inference
    trt_inference_wrapper = inference_utils.TRTInference(
        args.trt_engine_path, ssd_model_uff_path,
        trt_engine_datatype=args.trt_engine_datatype,
        batch_size=args.max_batch_size)

    # Start measuring time
    inference_start_time = time.time()

    # Get TensorRT SSD model output
    detection_out, keep_count_out = \
        trt_inference_wrapper.infer(args.input_img_path)

    # Make PIL.Image for drawing bounding boxes and
    # let analyze_prediction() draw them based on model output
    img_pil = Image.open(args.input_img_path)
    prediction_fields = len(TRT_PREDICTION_LAYOUT)
    for det in range(int(keep_count_out[0])):
        analyze_prediction(detection_out, det * prediction_fields, img_pil)

    # Output total [img load + inference + drawing bboxes] time
    print("Total time taken for one image: {} ms\n".format(
        int(round((time.time() - inference_start_time) * 1000))))

    # Save output image and output path
    img_pil.save(args.output)
    print("Saved output image to: {}".format(args.output))
예제 #3
0
def main():
    # Parse command line arguments
    args = parse_commandline_arguments()

    # Loading FlattenConcat plugin library using CDLL has a side
    # effect of loading FlattenConcat plugin into internal TensorRT
    # PluginRegistry data structure. This will be needed when parsing
    # network into UFF, since some operations will need to use this plugin
    try:
        ctypes.CDLL(PATHS.get_flatten_concat_plugin_path())
    except:
        print(
            "Error: {}\n{}\n{}".format(
                "Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
                "Make sure you have compiled FlattenConcat custom plugin layer",
                "For more details, check README.md"
            )
        )
        sys.exit(1)

    input_shape = (3, 300, 300) if not args.input_shape else args.input_shape
    print("[SSD Mobilenet V2 Converter] Converting {input_file} to UFF format...".format(input_file=args.input_file))

    if args.model_name == "ssd_mobilenet_v2_coco":
        model_utils.model_to_uff(args.input_file, args.output_path,
                                 preprocess_func=model_utils.ssd_mobilenet_v2_unsupported_nodes_to_plugin_nodes,
                                 input_shape=input_shape)
    elif args.model_name == "ssd_mobilenet_v1_coco":
        model_utils.model_to_uff(args.input_file, args.output_path,
                                 preprocess_func=model_utils.ssd_mobilenet_v1_unsupported_nodes_to_plugin_nodes,
                                 input_shape=input_shape)
    else:
        raise ValueError("Got unsupported model: {model_name}".format(model_name=args.model_name))

    print("[SSD Mobilenet V2 Converter] Convert succeed, output is saved to {output_path}"
          .format(output_path=args.output_path))
    skip_inference = should_skip_inference(parsed)
    # And if inference will not be skipped, then we
    # create files to store its results in
    detection_files = {}
    if not skip_inference:
        for voc_class in VOC_CLASSES:
            detection_files[voc_class] = open(
                os.path.join(parsed['results_dir'],
                             'det_test_{}.txt'.format(voc_class)), 'w')

    # Loading FlattenConcat plugin library using CDLL has a side
    # effect of loading FlattenConcat plugin into internal TensorRT
    # PluginRegistry data structure. This will be needed when parsing
    # network into UFF, since some operations will need to use this plugin
    try:
        ctypes.CDLL(PATHS.get_flatten_concat_plugin_path())
    except FileNotFoundError:
        print("Error: {}\n{}\n{}".format(
            "Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
            "Make sure you have compiled FlattenConcat custom plugin layer",
            "For more details, check README.md"))
        sys.exit(1)

    # Fetch frozen model .pb path...
    ssd_model_pb_path = PATHS.get_model_pb_path(MODEL_NAME)
    # ...and .uff path, if needed (converting .pb to .uff if not already done)
    if parsed['inference_backend'] == 'tensorrt':
        ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
        if not os.path.exists(ssd_model_uff_path):
            model_utils.prepare_ssd_model(MODEL_NAME)