def perform_inference(args): ''' Performs inference on an input image, given a model. ''' # Create a Network for using the Inference Engine inference_network = Network() # Load the model in the network, and obtain its input shape n, c, h, w = inference_network.load_model(args.m, args.d, args.c) # Read the input image image = cv2.imread(args.i) ### Preprocess the input image preprocessed_image = preprocessing(image, h, w) # Perform synchronous inference on the image inference_network.sync_inference(preprocessed_image) # Obtain the output of the inference request output = inference_network.extract_output() ### Handle the output of the network, based on args.t processed_function = handle_output(args.t) processed_output = processed_function(output, image.shape) # Create an output image based on network output_image = create_output_image(args.t, image, processed_output) # Save down the resulting image cv2.imwrite("outputs/{}-output.png".format(args.t), output_image)
def perform_inference(args): ''' Performs inference on an input image, given a model. ''' # Create a Network for using the Inference Engine plugin = Network() # Load the network model into the IE plugin.load_model(args.m, args.d, args.c) net_input_shape = plugin.get_input_shape() # Read the input image image = cv2.imread(args.i) # Preprocess the input image preprocessed_image = preprocessing(image, net_input_shape[3], net_input_shape[4]) # Perform synchronous inference on the image plugin.sync_inference(preprocessed_image) # Obtain the output of the inference request output = plugin.extract_output() processed_output = handle_asl(output) # Create an output image based on network output_image = create_output_image(image, processed_output) # Save down the resulting image cv2.imwrite("outputs/output.png", output_image)
def perform_inference(args): """ Performs inference on an input image, given a model. """ # Create a Network for using the Inference Engine inference_network = Network() # Load the model in the network, and obtain its input shape batch_size, n_chans, height, width = inference_network.load_model( args.m, args.d, args.c) # Read the input image image = cv2.imread(args.i) ### TODO: Preprocess the input image preprocessed_image = preprocessing(image, height, width) # Perform synchronous inference on the image inference_network.sync_inference(preprocessed_image) # Obtain the output of the inference request output = inference_network.extract_output() ### TODO: Handle the output of the network, based on args.t ### Note: This will require using `handle_output` to get the correct ### function, and then feeding the output to that function. input_shape = image.shape output_handler = handle_output(args.t) processed_output = output_handler(output, input_shape) # Create an output image based on network output_image = create_output_image(args.t, image, processed_output) # Save down the resulting image cv2.imwrite("outputs/{}-output.png".format(args.t), output_image)
def perform_inference(args): ''' Performs inference on an input image, given a model. ''' # Create a Network for using the Inference Engine inference_network = Network() # Load the model in the network, and obtain its input shape n, c, h, w = inference_network.load_model(args.m, args.d, args.c) # Read the input image image = cv2.imread(args.i) ### TODO: Preprocess the input image preprocessed_image = preprocessing(image, h, w) ### TODO: Process depending on model try: print("Processing...") print("Selected", args.t, "Model") if args.t == "CAR_META": processed_image = car_meta(image) if args.t == "POSE": processed_image = pose_estimation(image) if args.t == "EMO": processed_image = emotion(image) # Original # preprocessed_image = preprocessing(image, h, w) except: print("Error processing model") # Perform synchronous inference on the image inference_network.sync_inference(processed_image) # Obtain the output of the inference request output = inference_network.extract_output() ### TODO: Handle the output of the network, based on args.t ### Note: This will require using `handle_output` to get the correct ### function, and then feeding the output to that function. process_func = handle_output(args.t) processed_output = process_func(output, image.shape) # Create an output image based on network try: output_image = create_output_image(args.t, image, processed_output) print("Success writing output") except: print("Error writing output") # Save down the resulting image print("Saving output...") cv2.imwrite("outputs/{}-output-{}.png".format(args.t, args.n), output_image)
def perform_inference(args): inference_network = Network() n, c, h, w = inference_network.load_model(args.m, args.d, args.c) image = cv2.imread(args.i) preprocessed_image = preprocessing(image, h, w) inference_network.sync_inference(preprocessed_image) output = inference_network.extract_output() process_func = handle_output(args.t) processed_output = process_func(output, image.shape) for i in range(len(processed_output)): print(CLASSES_REC[processed_output[i]])
def perform_inference(args): ''' Performs inference on an input image, given a model. ''' # Create a Network for using the Inference Engine inference_network = Network() # Load the model in the network, and obtain its input shape n, c, h, w = inference_network.load_model(args.m, args.d, args.c) # Read the input image image = cv2.imread(args.i) ### TODO: Preprocess the input image preprocessed_image = preprocessing(image, h, w) #print(preprocessed_image.shape()) # Perform synchronous inference on the image inference_network.sync_inference(preprocessed_image) # Obtain the output of the inference request output = inference_network.extract_output() ### TODO: Handle the output of the network, based on args.t ### Note: This will require using `handle_output` to get the correct ### function, and then feeding the output to that function. process_func = handle_output(args.t) processed_output = process_func(output, image.shape) # Create an output image based on network try: output_image = create_output_image(args.t, image, processed_output) print("Processed output:", processed_output) print("Success") except: output_image = image print("Error") # Save down the resulting image cv2.imwrite("outputs/{}-output.png".format(args.t), output_image)
def perform_inference(args): ''' Performs inference on an input image, given a model. ''' # Create a Network for using the Inference Engine inference_network = Network() # Load the model in the network, and obtain its input shape n, c, h, w = inference_network.load_model(args.m, args.d, args.c) # Read the input image image = cv2.imread(args.i) ### TODO: Preprocess the input image preprocessed_image = preprocessing(image, h, w) # Perform synchronous inference on the image inference_network.sync_inference(preprocessed_image) # Obtain the output of the inference request output = inference_network.extract_output() ### TODO: Handle the output of the network, based on args.t ### Note: This will require using `handle_output` to get the correct ### function, and then feeding the output to that function. output_func = handle_output(args.t) processed_output = output_func(output, image.shape) # Create an output image based on network output_image = create_output_image(args.t, image, processed_output) # Create path if not exists path = "outputs" if not os.path.exists(path): os.makedirs(path) # Save down the resulting image path = f"outputs/{args.t}-output.png" result = cv2.imwrite(path, output_image) if not result: raise Exception(f"cv2.imwrite(\"{path}\") failed")
def perform_inference(args): # performs inference on input image, given a model # create a network for using the inference engine inference_network = Network() # Load the model in the network, and obtain its input image n, c, h, w = inference_network.load_model(args.m, args.d, args.c) # Read the input image image = cv2.imread(args.i) # preprocess the input image preprocessed_image = preprocessing(image, h, w) # perform synchronous inference on the image inference_network.sync_inference(preprocessed_image) # obtain the output of the inference request output = inference_network.extract_output() process_func = handle_output(args.t) processed_output = process_func(output, image.shape) # create an output image based on network try: output_image = create_output_image(args.t, image, processed_output) try: cv2.imwrite("outputs/{}-output.jpg".format(args.t), output_image) # cv2.imwrite('fha.jpg',output_image) # cv2.imshow(output_image) except: print('Cannot write image') print('Success') except: output_image = image print('Error')
def perform_inference(args): #create a network for using the inference engine inference_network = Network() #load the model in the network, and obtain its input shape n, c, h, w = inference_network.load_model(args.m, args.d, args.c) #read the input the image image = cv2.imread(args.i) #preprocess the input image preprocessed_image = preprocessing(image, h, w) #perform infernce on the image inference_network.sync_inference(preprocessed_image) #obtain the output of the inference request output = inference_network.extract_output() output_func = handle_output(args.t) processed_output = output_func(output, image.shape) #create an output image based on network output_image = create_output_image(args.t, image, processed_output) cv2.imwrite("outputs/{}-output.png".format(args.t), output_image)
def perform_inference(args): ''' Performs inference on an input image/input stream, given a model. ''' mqtt_client = mqtt.Client() mqtt_client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL) image_flag = False CODEC = 0x00000021 inference_network = Network() inference_network.load_model(args.model, args.device, CPU_EXTENSION) net_input_shape = inference_network.get_input_shape() net_input_height = net_input_shape[2] net_input_width = net_input_shape[3] cap = cv2.VideoCapture(args.input) cap.open(args.input) if cap.isOpened(): image_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) image_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) if not image_flag: out = cv2.VideoWriter('out.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (image_width, image_height)) else: out = None counter_img = 0 while cap.isOpened(): flag, frame = cap.read() if not flag: break key_pressed = cv2.waitKey(1000) preprocessed_frame = preprocessing(frame, net_input_height, net_input_width) do_async = True if do_async: inference_network.async_inference(preprocessed_frame) if inference_network.wait() == 0: result = inference_network.extract_output() output_frame, classes = draw_masks(result, image_width, image_height) class_names = get_class_names(classes) speed = randint(50, 70) mqtt_client.publish("class", json.dumps({"class_names": class_names})) mqtt_client.publish("speedometer", json.dumps({"speed": speed})) #print(output_frame.shape) #show_frame(output_frame) sys.stdout.buffer.write(output_frame) sys.stdout.flush() out.release() cap.release() cv2.destroyAllWindows() mqtt_client.disconnect()
if __name__ == "__main__": args = get_args() # Create a Network for using the Inference Engine inference_network = Network() # Load the model in the network, and obtain its input shape n, c, h, w = inference_network.load_model(args.m, args.d, args.c) bounding_boxes = [] # Read the input image image = cv2.imread(args.i) ### TODO: Preprocess the input image preprocessed_image = preprocessing(image, h, w) # Perform synchronous inference on the image inference_network.sync_inference(preprocessed_image) # Obtain the output of the inference request output = inference_network.extract_output() for box in output[0][0]: for detections in output[0][0]: image_id, label, conf, x_min, y_min, x_max, y_max = tuple( detections) if conf > 0.5: bounding_boxes.append((x_min, y_min, x_max, y_max)) bounding_box = (x_min, y_min, x_max, y_max) frame = image[x_min:x_max, y_min:y_max]