Esempio n. 1
0
def perform_inference_traffic_signs(img, inference_network, h, w):
    """
    Performs inference on an input image, given a model.
    """

    # Read the input image
    image = img
    # Preprocess the input image
    preprocessed_image = preprocessing(image, h, w)

    start_time = time.time()
    # Perform synchronous inference on the image
    inference_network.sync_inference(preprocessed_image)
    # Obtain the output of the inference request
    output = inference_network.extract_output()

    # Handle the output of the network, based on args.t
    # This will require using `handle_output` to get the correct
    # function, and then feeding the output to that function.
    output_func = handle_output("TDET")
    processed_output = output_func(output, image.shape)

    end_time = time.time()
    print(f"inference time for one frame : {end_time - start_time} secs")
    # Create an output image based on network
    output_image = create_output_image("TDET", image, processed_output)

    if True:
        # cv2.imshow("tf_image", cv2.resize(output_image, (700, 350)))
        pass
    return processed_output
Esempio n. 2
0
def perform_inference(args,
                      inference_network,
                      h,
                      w,
                      request_id,
                      request_id_list=None,
                      async_flag=False):
    """
    Performs inference on an input image, given a model.
    """

    # Read the input image
    image = cv2.imread(args.i)

    # Preprocess the input image
    preprocessed_image = preprocessing(image, h, w)

    start_time = time.time()

    if async_flag:
        ### Perform inference on the frame
        inference_network.async_inference(preprocessed_image, request_id)

        ### Get the output of inference
        while True:
            status = inference_network.wait(request_id)
            if status == 0:
                break

        output = inference_network.extract_output_async(request_id)
        if request_id in request_id_list:
            request_id_list.remove(request_id)

    else:
        # Obtain the output of the inference request
        # Perform synchronous inference on the image
        inference_network.sync_inference(preprocessed_image)

        output = inference_network.extract_output()

    # Handle the output of the network, based on args.t
    # This will require using `handle_output` to get the correct
    # function, and then feeding the output to that function.
    output_func = handle_output(args.t)
    processed_output = output_func(output, image.shape)

    end_time = time.time()
    print(f"inference time for one frame : {end_time - start_time} secs")
    # Create an output image based on network
    output_image = create_output_image(args.t, image, processed_output)
    # output_image = cross_walk_detection(output_image)
    # Save down the resulting image
    # outfile_path = "outputs/{}-output_INT8.png".format(args.i.split("/")[-1].split(".")[0])
    # print('outfile_path = ', outfile_path)
    # cv2.imwrite(outfile_path, output_image)

    if args.v:
        cv2.imshow("image", output_image)
        cv2.waitKey()
        cv2.destroyAllWindows()
Esempio n. 3
0
def perform_inference_elev_depth(depth_image, inference_network, h, w):
    preprocessed_image = preprocess_depth_image(depth_image)
    inference_network.sync_inference(preprocessed_image)
    output = inference_network.extract_output()
    output_func = handle_output("ELE_DEPTH")
    processed_output = output_func(output, depth_image.shape)
    # elev_img_class = np.argmax(processed_output)

    return processed_output
Esempio n. 4
0
def perform_inference_signs_img(image, inference_network, h, w):
    preprocessed_image = preprocess_sign_classifier(image)
    inference_network.sync_inference(preprocessed_image)
    output = inference_network.extract_output()
    output_func = handle_output("TSIGN")
    processed_output = output_func(output, image.shape)
    traff_sign_class = np.argmax(processed_output)

    return traff_sign_class
Esempio n. 5
0
def perform_inference_signs(args, inference_network, h, w):
    image = io.imread(args.i)
    preprocessed_image = preprocess_sign_classifier(image)
    inference_network.sync_inference(preprocessed_image)
    output = inference_network.extract_output()
    output_func = handle_output("TSIGN")
    processed_output = output_func(output, image.shape)
    output_image = create_output_image("TSIGN", image, processed_output)
    # Save down the resulting image
    outfile_path = "outputs/{}.png".format(args.i.split("/")[-1].split(".")[0])
    print('outfile_path = ', outfile_path)
    cv2.imwrite(outfile_path, output_image)
Esempio n. 6
0
def async_main():
    args = get_args()
    # Create a Network for using the Inference Engine
    inference_network = Network()
    # Load the model in the network, and obtain its input shape
    n, c, h, w = inference_network.load_model(args.m, args.d, args.c)
    request_id = 0
    request_id_free = []
    request_ids_occupied = []
    threads = []
    if args.f:
        for i in range(inference_network.num_requests):
            request_id_free.append(i)

        start_time = time.time()
        for fname in glob.glob(args.f + "/*.jpg"):
            args.i = fname

            # Read the input image
            image = cv2.imread(args.i)

            # Preprocess the input image
            preprocessed_image = preprocessing(image, h, w)

            ### Perform inference on the frame
            print(time.time())
            if len(request_id_free) > 0:
                start_time_frame = time.time()
                request_id = request_id_free.pop(0)
                print(f"request id {request_id} started at {start_time_frame}")
                inference_network.async_inference(preprocessed_image,
                                                  request_id)
                request_ids_occupied.append(request_id)

            for req_id in request_ids_occupied:
                status = inference_network.wait(req_id)
                if status == 0:
                    request_id_free.append(req_id)
                    request_ids_occupied.remove(req_id)
                    output = inference_network.extract_output_async(request_id)
                    output_func = handle_output(args.t)
                    processed_output = output_func(output, image.shape)
                    end_time_frame = time.time()
                    print(
                        f"inference time for one frame, req id {req_id}: {end_time_frame - start_time_frame} secs, ended at {end_time_frame}"
                    )
                else:
                    print("not processed, will revisit")

        end_time = time.time()
        print("time taken : ", (end_time - start_time))
Esempio n. 7
0
def perform_inference_road_seg(img, inference_network, h, w, crosswalk=False):
    """
    Performs inference on an input image, given a model.
    """

    # Read the input image
    image = img

    # Preprocess the input image
    preprocessed_image = preprocessing(image, h, w)

    start_time = time.time()
    # Perform synchronous inference on the image
    inference_network.sync_inference(preprocessed_image)
    # Obtain the output of the inference request
    output = inference_network.extract_output()

    # Handle the output of the network, based on args.t
    # This will require using `handle_output` to get the correct
    # function, and then feeding the output to that function.
    output_func = handle_output("ROAD_SEG")
    processed_output = output_func(output, image.shape)

    end_time = time.time()
    print(
        f"road seg inference time for one frame : {end_time - start_time} secs"
    )
    # Create an output image based on network
    output_image = create_output_image("ROAD_SEG", image, processed_output)
    if crosswalk:
        output_image = cross_walk_detection(output_image)
    orig_image = image.copy()
    orig_image = cv2.resize(orig_image,
                            (output_image.shape[1], output_image.shape[0]))
    # print("--------------------------------------------------------- seg image shape ", output_image.shape)
    if True:
        cv2.imshow("road_segmentation",
                   cv2.addWeighted(orig_image, 0.5, output_image, 0.5, 0))
        seg_video.write(cv2.addWeighted(orig_image, 0.5, output_image, 0.5, 0))