Exemple #1
0
def main():
    # Parse command line arguments
    args = parse_commandline_arguments()

    # Fetch .uff model path
    ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
    # convert from .pb if needed, using prepare_ssd_model
    if not os.path.exists(ssd_model_uff_path):
        model_utils.prepare_ssd_model(MODEL_NAME)
def main():
    # Parse command line arguments
    args = parse_commandline_arguments()

    # Loading FlattenConcat plugin library using CDLL has a side
    # effect of loading FlattenConcat plugin into internal TensorRT
    # PluginRegistry data structure. This will be needed when parsing
    # network into UFF, since some operations will need to use this plugin
    try:
        ctypes.CDLL(PATHS.get_flatten_concat_plugin_path())
    except:
        print(
            "Error: {}\n{}\n{}".format(
                "Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
                "Make sure you have compiled FlattenConcat custom plugin layer",
                "For more details, check README.md"
            )
        )
        sys.exit(1)

    # Fetch .uff model path, convert from .pb
    # if needed, using prepare_ssd_model
    ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
    if not os.path.exists(ssd_model_uff_path):
        model_utils.prepare_ssd_model(MODEL_NAME)

    # Set up all TensorRT data structures needed for inference
    trt_inference_wrapper = inference_utils.TRTInference(
        args.trt_engine_path, ssd_model_uff_path,
        trt_engine_datatype=args.trt_engine_datatype,
        batch_size=args.max_batch_size)

    # Start measuring time
    inference_start_time = time.time()

    # Get TensorRT SSD model output
    detection_out, keep_count_out = \
        trt_inference_wrapper.infer(args.input_img_path)

    # Make PIL.Image for drawing bounding boxes and
    # let analyze_prediction() draw them based on model output
    img_pil = Image.open(args.input_img_path)
    prediction_fields = len(TRT_PREDICTION_LAYOUT)
    for det in range(int(keep_count_out[0])):
        analyze_prediction(detection_out, det * prediction_fields, img_pil)

    # Output total [img load + inference + drawing bboxes] time
    print("Total time taken for one image: {} ms\n".format(
        int(round((time.time() - inference_start_time) * 1000))))

    # Save output image and output path
    img_pil.save(args.output)
    print("Saved output image to: {}".format(args.output))
def main():

    # Parse command line arguments
    args = parse_commandline_arguments()

    # Fetch .uff model path, convert from .pb
    # if needed, using prepare_ssd_model
    ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
    if not os.path.exists(ssd_model_uff_path):
        model_utils.prepare_ssd_model(MODEL_NAME)

    # Set up all TensorRT data structures needed for inference
    trt_inference_wrapper = inference_utils.TRTInference(
        args.trt_engine_path,
        ssd_model_uff_path,
        trt_engine_datatype=args.trt_engine_datatype,
        calib_dataset=args.calib_dataset,
        batch_size=args.max_batch_size)

    print("TRT ENGINE PATH", args.trt_engine_path)

    if args.camera == True:
        print('Running webcam:', args.camera)
        # Define the video stream
        cap = cv2.VideoCapture(
            0)  # Change only if you have more than one webcams

        # Loop for running inference on frames from the webcam
        while True:
            # Read frame from camera (and expand its dimensions to fit)
            ret, image_np = cap.read()

            # Actually run inference
            detection_out, keep_count_out = trt_inference_wrapper.infer_webcam(
                image_np)

            # Overlay the bounding boxes on the image
            # let analyze_prediction() draw them based on model output
            img_pil = Image.fromarray(image_np)
            prediction_fields = len(TRT_PREDICTION_LAYOUT)
            for det in range(int(keep_count_out[0])):
                analyze_prediction(detection_out, det * prediction_fields,
                                   img_pil)
            final_img = np.asarray(img_pil)

            # Display output
            cv2.imshow('object detection', final_img)

            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break
Exemple #4
0
def main():
    # Parse command line arguments
    #解析命令行参数,具体参考本文件下的实现
    args = parse_commandline_arguments()

    # Fetch .uff model path, convert from .pb
    # if needed, using prepare_ssd_model
    #获取相应的uff模型的路径
    #get_model_uff_path参考utils/paths.py下的实现
    ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
    #如果不存在uff模型
    if not os.path.exists(ssd_model_uff_path):
        #从pb模型完成到uff模型的转换
        #prepare_ssd_model参考utils/model.py下的实现
        model_utils.prepare_ssd_model(MODEL_NAME)

    # Set up all TensorRT data structures needed for inference
    #建立推理所需要的数据结构
    #具体参考utils/inference.py下的实现
    trt_inference_wrapper = inference_utils.TRTInference(
        args.trt_engine_path, ssd_model_uff_path,
        trt_engine_datatype=args.trt_engine_datatype,
        batch_size=args.max_batch_size)

    # Start measuring time
    inference_start_time = time.time()

    # Get TensorRT SSD model output
    #获取相应的推理输出
    #参考utils/inference.py
    detection_out, keep_count_out = \
        trt_inference_wrapper.infer(args.input_img_path)

    # Make PIL.Image for drawing bounding boxes and
    # let analyze_prediction() draw them based on model output
    img_pil = Image.open(args.input_img_path)
    prediction_fields = len(TRT_PREDICTION_LAYOUT)
    for det in range(int(keep_count_out[0])):
        #analyze_prediction参考本文件的实现
        analyze_prediction(detection_out, det * prediction_fields, img_pil)

    # Output total [img load + inference + drawing bboxes] time
    print("Total time taken for one image: {} ms\n".format(
        int(round((time.time() - inference_start_time) * 1000))))

    # Save output image and output path
    #保存相应的结果
    img_pil.save(args.output)
    print("Saved output image to: {}".format(args.output))
Exemple #5
0
        print(
            "Error: {}\n{}\n{}".format(
                "Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
                "Make sure you have compiled FlattenConcat custom plugin layer",
                "For more details, check README.md"
            )
        )
        sys.exit(1)

    # Fetch frozen model .pb path...
    ssd_model_pb_path = PATHS.get_model_pb_path(MODEL_NAME)
    # ...and .uff path, if needed (converting .pb to .uff if not already done)
    if parsed['inference_backend'] == 'tensorrt':
        ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
        if not os.path.exists(ssd_model_uff_path):
            model_utils.prepare_ssd_model(MODEL_NAME)

    # This block of code sets up and performs inference, if needed
    if not skip_inference:
        # Preprocess VOC dataset if necessary by resizing images
        preprocess_voc()

        # Fetch image list and input .ppm files path
        with open(PATHS.get_voc_image_set_path(), 'r') as f:
            voc_image_numbers = f.readlines()
            voc_image_numbers = [line.strip() for line in voc_image_numbers]
        voc_image_path = PATHS.get_voc_ppm_img_path()

        # Tensorflow and TensorRT paths are a little bit different,
        # so we must treat each one individually
        if parsed['inference_backend'] == 'tensorrt':
Exemple #6
0
def main(max_time, min_time, sum_time, num):

    # Parse command line arguments
    args = parse_commandline_arguments()
    outputdir_path = args.outpath
    inputdir_path = args.inpath
    # Fetch .uff model path, convert from .pb
    # if needed, using prepare_ssd_model
    ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
    if not os.path.exists(ssd_model_uff_path):
        model_utils.prepare_ssd_model(MODEL_NAME)

    # Set up all TensorRT data structures needed for inference
    trt_inference_wrapper = inference_utils.TRTInference(
        args.trt_engine_path,
        ssd_model_uff_path,
        trt_engine_datatype=args.trt_engine_datatype,
        calib_dataset=args.calib_dataset,
        batch_size=args.max_batch_size)
    ARGS = 3
    print("TRT ENGINE PATH", args.trt_engine_path)

    if args.camera == True:
        #if True:
        #print('Running webcam:', args.camera)
        # Define the video stream
        #cap = cv2.VideoCapture(0)
        #cap = cv2.VideoCapture('animal.webm')  # Change only if you have more than one webcams
        cap = cv2.VideoCapture(
            inputdir_path)  # Change only if you have more than one webcams
        if (cap.isOpened() == False):
            print("Error opening video stream or file")
            exit()
        else:
            print("success!")
        # Default resolutions of the frame are obtained.The default resolutions are system dependent.

# We convert the resolutions from float to integer.
        frame_width = int(cap.get(3))
        frame_height = int(cap.get(4))
        fps = cap.get(cv2.CAP_PROP_FPS)

        # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
        out = cv2.VideoWriter(str(outputdir_path + "/output.avi"),
                              cv2.VideoWriter_fourcc(*'XVID'), fps,
                              (frame_width, frame_height))

        # Loop for running inference on frames from the webcam
        while True:
            # Read frame from camera (and expand its dimensions to fit)
            ret, image_np = cap.read()
            if not ret:
                print("Video played.")
                break
            # Actually run inference
            cur, detection_out, keep_count_out = trt_inference_wrapper.infer_webcam(
                image_np)
            max_time = max(max_time, cur)
            min_time = min(min_time, cur)
            sum_time += cur
            num += 1

            # Overlay the bounding boxes on the image
            # let analyze_prediction() draw them based on model output
            img_pil = Image.fromarray(image_np)
            prediction_fields = len(TRT_PREDICTION_LAYOUT)
            for det in range(int(keep_count_out[0])):
                analyze_prediction(detection_out, det * prediction_fields,
                                   img_pil)
            final_img = np.asarray(img_pil)

            out.write(final_img)
        return outputdir_path, max_time, min_time, sum_time, num
Exemple #7
0
def main():

    args = parse_arguments()

    if args.remote_debug is True:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=('0.0.0.0', int(args.remote_debug_port)),
                            redirect_output=True)
        ptvsd.wait_for_attach()

    # Fetch .uff model path, convert from .pb
    # if needed, using prepare_ssd_model
    if not os.path.exists(args.model_uff_path):
        model.prepare_ssd_model(args.model_name)

    ## import frozen model then convert to uff
    if not os.path.exists(args.model_uff_path):
        model.model_to_uff(args.model_base_path, args.model_uff_path)

    ## build tensorrt engine and save as file
    runtime = TRTInference(args.trt_engine_path,
                           args.model_uff_path,
                           trt_engine_datatype=args.trt_engine_datatype,
                           calib_dataset=args.calib_dataset,
                           batch_size=args.max_batch_size)

    ## initialize test dataset loader
    input_shape = (DetectionModel.get_input_height(),
                   DetectionModel.get_input_width(),
                   DetectionModel.get_input_channels())
    test_loader = coco(directory=args.input_image_dir,
                       batch_size=args.max_batch_size,
                       target_shape=input_shape,
                       shuffle=False)

    if args.camera is False and args.input_image_path is not '':
        print("Running input image:", args.input_image_path)
        if os.path.exists(args.input_image_path) is False:
            raise Exception("Input file is not exist!!")

        inference_start_time = time.time()

        for i in range(args.num_iter):
            image_np = test_loader.load_image(args.input_image_path)
            runtime.infer(image_np)

        # output inference time
        print("TensorRT inference time: {} ms".format(
            int(
                round((time.time() - inference_start_time) * 1000 /
                      args.num_iter))))

    elif args.camera is False and args.input_image_path is '':
        print("Running inference in ", args.input_image_dir)
        if os.path.exists(args.input_image_dir) is False:
            raise Exception("Input directory is not exist!!")

        inference_start_time = time.time()

        for i in range(args.num_iter):
            ls_image_np = test_loader.next_batch()
            runtime.infer_batch(ls_image_np, len(ls_image_np))

        print("TensorRT inference time: {} ms".format(
            int(
                round((time.time() - inference_start_time) * 1000 /
                      args.num_iter))))

    else:
        cam = cv2.VideoCapture(0)

        # loop for running inference on frames from teh webcam
        while True:
            ret, image_np = cam.read()

            detection_out, keep_count_out = runtime.infer_webcam(image_np)

            image_pil = Image.fromarray(image_np)
            prediction_fields = len(DetectionModel.output_layout)
            for det in range(int(keep_count_out[0])):
                DetectionModel.analyze_prediction(detection_out,
                                                  det * prediction_fields,
                                                  image_pil)
            final_image = np.asarray(image_pil)

            # display output
            cv2.imshow('object detection', final_image)

            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break