Exemple #1
0
def infer_on_video(args):
    plugin = Network()
    plugin.load_model(args.m,args.d,CPU_EXTENSION)
    b,c,h,w = plugin.get_input_shape()
    # print("Required shape:",b,c,h,w)

    if args.i == 'CAM':
        args.i = 0
    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    while cap.isOpened():
        flag,frame = cap.read()
        if not flag:
            break
        frame = cv2.resize(frame,(w,h))
        r_frame = preprocessing(frame,c,h,w)
        
        result = get_result(r_frame,plugin)
        output = bounding_boxes(frame,result)
                             
        
        cv2.imshow("input", output)
        if cv2.waitKey(1000 // int(cap.get(5))) == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #2
0
    def __init__(self, model, device, cpu_ext, mqtt_server):
        #required arguments
        self.model = model
        self.device = device
        self.cpu_ext = cpu_ext
        self.mqtt_client = mqtt_server

        #argument for other functions
        self.prob_threshold = None
        self.prob_iou = None
        self.input_ = None
        self._out_name = None
        self.out_serveronly = False
        self.classes = None
        self.frames = {}
        self.prev_boxes = {}
        self.curr_boxes = {}
        #         self.cuur_out = []
        self.tolerate = {}
        self.total_detect = 0
        self.avg_time = 0
        self.async_ = None

        # Initialise the class
        self.infer_network = Network()

        ### TODO: Load the model through `infer_network` ###
        self.infer_network.load_model(self.model, self.device, self.cpu_ext)

        #check if server is available
        self.check_server()
 def __init__(self,
              model_name,
              device='CPU',
              extensions=None,
              threshold=0.60):
     self.threshold = threshold
     self.network = Network(model_name, device, extensions)
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
   ## prob_threshold = args.prob_threshold

    single_image_mode = False

    cur_request_id = 0
    last_count = 0
    total_count = 0
    start_time = 0
    ### TODO: Load the model through `infer_network` ###
    
    infer_network = Network()
    n, c, h, w = infer_network.load_model(args.model, args.device, 1, 1,
                                          cur_request_id, args.cpu_extension)[1]
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
    prob_threshold = args.prob_threshold

    ### TODO: Load the model through `infer_network` ###
    inference_network.load_model(args.model, args.device, CPU_EXTENSION, num_requests=0)
    _, _, inh, inw = infer_network.get_input_shape()
    
    


    ### TODO: Handle the input stream ###
    try:
        cap = cv2.VideoCapture(args.input)
    except FileNotFoundError:
        print("Cannot locate video file: "+ args.input)
    except Exception as e:
        print("Something else went wrong with the video file: ", e)
Exemple #6
0
 def __init__(self, model, device="CPU", extensions=None):
     """
         set instance variables
     """
     self.model_xml = model
     self.device = device
     self.extensions = extensions
     self.infer_network = Network()
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
    prob_threshold = args.prob_threshold

    ### TODO: Load the model through `infer_network` ###
    plugin = Network()
    plugin.load_model(model, args.d, CPU_EXTENSION)
    net_input_shape = plugin.get_input_shape()
    ### TODO: Handle the input stream ###
    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    width = int(cap.get(3))
    height = int(cap.get(4))


    ### TODO: Loop until stream is over ###
     while cap.isOpened():
        ### TODO: Read from the video capture ###
        flag, frame = cap.read()
        if not flag:
            break
        key_pressed = cv2.waitKey(60)
        ### TODO: Pre-process the image as needed ###
        p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
        p_frame = p_frame.transpose((2,0,1))
        p_frame = p_frame.reshape(1, *p_frame.shape)

        ### TODO: Start asynchronous inference for specified request ###
        plugin.async_inference(p_frame)

        ### TODO: Wait for the result ###
         if plugin.wait() == 0:
            ### TODO: Get the results of the inference request ###
            result = plugin.extract_output()
            ### TODO: Extract any desired stats from the results ###
            print("result :",result)

            ### TODO: Calculate and send relevant information on ###
            ### current_count, total_count and duration to the MQTT server ###
            ### Topic "person": keys of "count" and "total" ###
            client.publish("person", json.dumps({"count": 0,"total":0}))
            ### Topic "person/duration": key of "duration" ###
            client.publish("person/duration", json.dumps({"duration": 0}))

        ### TODO: Send the frame to the FFMPEG server ###
        sys.stdout.buffer.write(out_frame)
        sys.stdout.flush()
def load_models(video_caps, args):
    """
    Load the required models

    :param video_caps: List of VideoCap objects
    :param args: Command line arguments
    :return: None
    """
    global check_feed_type

    # Check if one of the feed type is "shopper". If yes, load the face, head pose and mood detection model
    if check_feed_type[0]:
        infer_network_face = Network()
        infer_network_pose = Network()
        infer_network_mood = Network()
        tag_face = {"VPU_HDDL_GRAPH_TAG": "tagFace"}
        tag_pose = {"VPU_HDDL_GRAPH_TAG": "tagPose"}
        tag_mood = {"VPU_HDDL_GRAPH_TAG": "tagMood"}
        nchw_fd = load_model_device(infer_network_face, args.facemodel,
                                    args.facedevice, 1, 1, 2,
                                    args.cpu_extension, tag_face)
        nchw_hp = load_model_device(infer_network_pose, args.posemodel,
                                    args.posedevice, 1, 3, 2,
                                    args.cpu_extension, tag_pose)
        nchw_md = load_model_device(infer_network_mood, args.moodmodel,
                                    args.mooddevice, 1, 1, 2,
                                    args.cpu_extension, tag_mood)

    if check_feed_type[2]:
        infer_network = Network()
        tag_obj = {"VPU_HDDL_GRAPH_TAG": "tagMobile"}
        nchw = load_model_device(infer_network, args.objmodel,
                                 args.objectdevice, 1, 1, 2,
                                 args.cpu_extension, tag_obj)

    if check_feed_type[1]:
        infer_network_person = Network()
        tag_person = {"VPU_HDDL_GRAPH_TAG": "tagPerson"}
        nchw_pr = load_model_device(infer_network_person, args.personmodel,
                                    args.persondevice, 2, 1, 2,
                                    args.cpu_extension, tag_person)

    for video_cap in video_caps:
        if video_cap.type == 'shopper':
            video_cap.infer_network = infer_network_face
            video_cap.infer_network_hp = infer_network_pose
            video_cap.infer_network_md = infer_network_mood
            video_cap.nchw.extend(nchw_fd)
            video_cap.nchw_hp.extend(nchw_hp)
            video_cap.nchw_md.extend(nchw_md)

        if video_cap.type == 'shelf':
            video_cap.infer_network = infer_network
            video_cap.nchw.extend(nchw)

        if video_cap.type == 'traffic':
            video_cap.infer_network = infer_network_person
            video_cap.nchw.extend(nchw_pr)
Exemple #9
0
 def load_model(self):
     '''
     TODO: You will need to complete this method.
     This method is for loading the model to the device specified by the user.
     If your model requires any Plugins, this is where you can load them.
     '''
     self.infer_network = Network()
     self.infer_network.load_model(self.model_xml, self.device,
                                   self.extensions)
 def __init__(self, model_name, device='CPU', extensions=None):
     '''
     TODO: Use this to set your instance variables.
     '''
     self.model_xml = model_name
     self.device = device
     self.extensions = extensions
     # Initialise the class
     self.infer_network = Network()
Exemple #11
0
def infer_on_video(args):
    plugin = Network()
    plugin.load_model(args.m,args.d,None)
    net_input_shape = plugin.get_input_shape()

    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    counter = 0
    incidence_flag = False
    while cap.isOpened():
        flag,frame = cap.read()
        if not flag:
            break
        counter += 1
        r_frame = cv2.resize(frame, (net_input_shape[3],net_input_shape[2]))
        r_frame = r_frame.transpose((2,0,1))
        r_frame = r_frame.reshape(1,*r_frame.shape)

        plugin.async_inference(r_frame)
        if plugin.wait() == 0:
            result = plugin.extract_output()
            incidence_flag = incidence(result,counter,incidence_flag)        
    cap.release()
    cv2.destroyAllWindows()
Exemple #12
0
def infer_on_stream(cap, out, width, height, args):
    infer_network = Network()
    infer_network.load_model(args.model, args.device, args.cpu_extension)
    net_input_shape = infer_network.get_input_shape()

    while cap.isOpened():

        flag, frame = cap.read()
        if not flag:
            break

        p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
        p_frame = p_frame.transpose((2, 0, 1))
        p_frame = p_frame.reshape(1, *p_frame.shape)

        infer_network.exec_net(p_frame)

        if infer_network.wait() == 0:

            result = infer_network.get_output()

            frame, current_count = draw_boxes(frame, result, width, height,
                                              float(args.prob_threshold))
            send_update = update_count(current_count,
                                       cap.get(cv2.CAP_PROP_POS_MSEC) / 1000)
            if send_update:
                print(current_count, total_count, duration)

        out.write(frame)
Exemple #13
0
def infer_on_video(args):
    ### Initialize the Inference Engine
    plugin = Network()

    ### Load the network model into the IE
    plugin.load_model(args.m, args.d)
    input_shape = plugin.get_input_shape()

    # Get and open video capture
    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))

    # Create a video writer for the output video
    # The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
    # on Mac, `0x00000021` on Udacity IDE, and `0x00000021` on Linux
    out = cv2.VideoWriter('out.mp4', 'cv2.VideoWriter_fourcc("M","J","P","G")',
                          30, (width, height))

    # Process frames until the video ends, or process is exited
    while cap.isOpened():
        # Read the next frame
        flag, frame = cap.read()
        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        ### Pre-process the frame
        image = cv2.resize(frame, (input_shape[3], input_shape[2]))
        image = image.transpose((2, 0, 1))
        image = image.reshape(1, 3, input_shape[2], input_shape[3])

        ### Perform inference on the frame
        plugin.async_inference(image)

        ### Get the output of inference
        if plugin.wait() == 0:
            result = plugin.extract_output()
            ### Update the frame to include detected bounding boxes
            for box in result[0][0]:
                if box[2] >= float(args.conf):
                    x_min = int(box[3] * width)
                    y_min = int(box[4] * height)
                    x_max = int(box[5] * width)
                    y_max = int(box[6] * height)
                    cv2.rectangle(frame, (x_min, y_min), (x_max, y_max),
                                  get_color(args.color), 1)
                    # Write out the frame
                    out.write(frame)
        # Break if escape key pressed
        if key_pressed == 27:
            break

    # Release the out writer, capture, and destroy any OpenCV windows
    out.release()
    cap.release()
    cv2.destroyAllWindows()
Exemple #14
0
def infer_on_stream():
    # Initialise the class
    infer_network = Network()
    infer_network.load_model(
        "./models/mobilenet_ssd_pedestrian_detection/MobileNetSSD_deploy10695.xml",
        "CPU", CPU_EXTENSION)
    net_input_shape = infer_network.get_input_shape()
    ### TODO: Handle the input stream ###
    img = cv2.imread('./frame1.jpg', 0)

    img = cv2.resize(img, (net_input_shape[3], net_input_shape[2]))
    imgProcessed = img - 127.5
    imgProcessed = imgProcessed * 0.007843
    imgProcessed = imgProcessed.astype(np.float32)

    infer_network.exec_net(imgProcessed)
    if infer_network.wait() == 0:
        ### TODO: Get the results of the inference request ###
        width, height = imgProcessed.shape[:2]
        result = infer_network.get_output()
        h = img.shape[0]
        w = img.shape[1]
        box = result[0, 0, :, 3:7] * np.array([w, h, w, h])
        cls = result[0, 0, :, 1]
        conf = result[0, 0, :, 2]
        for i in range(len(box)):
            aR = abs(box[i][2] - box[i][0]) * (box[i][3] - box[i][1])
            if conf[i] > 0.25 and aR < 30000:
                cv2.rectangle(img, (int(box[i][0]), int(box[i][1])),
                              (int(box[i][2]), int(box[i][3])), (0, 255, 0))
        cv2.imwrite("frameProcessed.jpg", img)
Exemple #15
0
def infer_on_camera(args):
    # Convert the args for color and confidence
    args.c = convert_color(args.c)
    args.ct = float(args.ct)
    # Initialize the Inference Engine
    plugin = Network()
    # Load the network model into the IE
    plugin.load_model(args.m, args.d, CPU_EXTENSION)
    # Get input shape
    net_input_shape = plugin.get_input_shape()
    # Get and open video capture
    cap = cv2.VideoCapture(0)
    cap.open(0)  # 0 for default camera
    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))
    # Process frames until video end or process is exited
    while cap.isOpened():
        flag, frame = cap.read()
        if not flag:
            break
        key_pressed = cv2.waitKey(60)
        # Preprocess the frame
        p_frame = preprocessing(frame, 384, 672)
        # Perform inference on the frame
        plugin.async_inference(p_frame)
        # Get the output of the inference
        if plugin.wait() == 0:
            result = plugin.extract_output()
            # Update the frame to include detected bounding boxes
            frame = draw_boxes(frame, result, args, width, height)
        cv2.imshow("frame", frame)

        if key_pressed == 27:
            break
def infer_on_image(args):
    print('INFER ON IMAGE')
    # Convert the args for confidence
    args.ct = float(args.ct)

    ### Initialize the Inference Engine
    plugin = Network()
    ### Load the network model into the IE
    plugin.load_model(args.m, args.d)
    net_input_shape = plugin.get_input_shape()
    # Read the input image
    image = cv2.imread(args.i)
    h, w = net_input_shape[2], net_input_shape[3]

    ### Preprocess the input image
    preprocessed_image = preprocessing(image, h, w)

    ###  Perform inference on the frame
    plugin.async_inference(preprocessed_image)
    ###  Get the output of inference
    if plugin.wait() == 0:
        output = plugin.extract_output()

    image = draw_boxes(image, output, args, w, h)
    cv2.imwrite(args.o, image)
Exemple #17
0
def pred_at_edge(input_img):

    # Initialize the Inference Engine
    plugin = Network()

    # Load the network model into the IE
    plugin.load_model(MODEL, "CPU", CPU_EXTENSION)
    net_input_shape = plugin.get_input_shape()
    # Reading input image
    img = cv2.imread(input_img, cv2.IMREAD_COLOR)

    # Pre-process the image
    expand_img = preprocessing(img, net_input_shape[2], net_input_shape[3])
    final_img = np.expand_dims(expand_img, axis=0)
    # Perform inference on the image
    plugin.async_inference(final_img)

    # Get the output of inference
    if plugin.wait() == 0:
        results = plugin.extract_output()
        pred = np.argmax(results)
        disease = SKIN_CLASSES[pred]
        accuracy = results[0][pred]
        print(disease, accuracy)
        return disease, accuracy
Exemple #18
0
def infer_on_video(args):
    # Convert the args for color and confidence
    args.c = convert_color(args.c)
    args.ct = float(args.ct)

    ### TODO: Initialize the Inference Engine
    plugin = Network()

    ### TODO: Load the network model into the IE
    plugin.load_model(args.m, args.d, CPU_EXTENSION)
    net_input_shape = plugin.get_input_shape()

    # Get and open video capture
    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))

    # Create a video writer for the output video
    # The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
    # on Mac, and `0x00000021` on Linux
    out = cv2.VideoWriter(FILE_OUTPUT,
                          cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 30,
                          (width, height))

    # Process frames until the video ends, or process is exited
    while cap.isOpened():
        # Read the next frame
        flag, frame = cap.read()
        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        ### TODO: Pre-process the frame
        p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
        p_frame = p_frame.transpose((2, 0, 1))
        p_frame = p_frame.reshape(1, *p_frame.shape)

        ### TODO: Perform inference on the frame
        plugin.async_inference(p_frame)

        ### TODO: Get the output of inference
        if plugin.wait() == 0:
            result = plugin.extract_output()
            ### TODO: Update the frame to include detected bounding boxes
            frame = draw_boxes(frame, result, args, width, height)
            # Write out the frame
            out.write(frame)

        # Break if escape key pressed
        if key_pressed == 27:
            break

    # Release the out writer, capture, and destroy any OpenCV windows
    out.release()
    cap.release()
    cv2.destroyAllWindows()
Exemple #19
0
def infer_on_video(args):
    ### TODO: Initialize the Inference Engine
    net = Network()

    ### TODO: Load the network model into the IE
    net.load_model(model=args.m, device=args.d, cpu_extension=CPU_EXTENSION)
    net_input_shape = net.get_input_shape()

    # Get and open video capture
    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))

    # Create a video writer for the output video
    # The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
    # on Mac, and `0x00000021` on Linux
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    out = cv2.VideoWriter("out.mp4", fourcc, 30, (width, height))

    # Process frames until the video ends, or process is exited
    while cap.isOpened():
        # Read the next frame
        flag, frame = cap.read()
        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        ### TODO: Pre-process the frame

        # Taken from lesson 2: preprocess_input.py
        p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
        p_frame = p_frame.transpose((2, 0, 1))
        p_frame = p_frame.reshape(1, *p_frame.shape)

        ### TODO: Perform inference on the frame
        net.async_inference(image=p_frame)
        ### TODO: Get the output of inference
        if net.wait() == 0:
            result = net.extract_output()
            ### TODO: Update the frame to include detected bounding boxes
            try:
                frame = draw_boxes(frame, result, args, width, height)
            except Exception as e:
                print(str(e))
            # Write out the frame
            else:
                out.write(frame)

        # Break if escape key pressed
        if key_pressed == 27:
            break

    # Release the out writer, capture, and destroy any OpenCV windows
    out.release()
    cap.release()
    cv2.destroyAllWindows()
Exemple #20
0
def main():
    args = build_argparser().parse_args()

    scores = list()
    scores_wout_mispredictions = list()
    inference_time = list()

    network = Network()
    network.load_model(args.model,
                       cpu_extension=args.cpu_extension,
                       device=args.device)
    net_input_shape = network.get_input_shape()

    if args.image_directory is not None:
        if os.path.exists(args.image_directory):
            for root, dirs, files in os.walk(args.image_directory,
                                             topdown=False):
                counter = 0
                for name in files:
                    if counter >= args.maximum_images:
                        break

                    frame = cv2.imread(os.path.join(root, name))
                    res, inf_time = post_conversion_benchmark(
                        frame, args.model, args.cpu_extension, args.device,
                        args.prob_threshold, args.iou_threshold, network,
                        net_input_shape)

                    if res > 0:
                        scores_wout_mispredictions.append(res)

                    scores.append(res)
                    inference_time.append(inf_time)

                    counter += 1
    else:
        res, inf_time = post_conversion_benchmark(
            args.input_img, args.model, args.cpu_extension, args.device,
            args.prob_threshold, args.iou_threshold, network, net_input_shape)

        if res > 0:
            scores_wout_mispredictions.append(res)

        scores.append(res)
        inference_time.append(inf_time)

    print("Average score across all images: " + str(np.mean(scores)))
    print("Max score across all images: " + str(np.max(scores)))
    print("Average inference time: " + str(np.mean(inference_time)) + "ms")
    print("Average score disregarding mis-predictions: " +
          str(np.mean(scores_wout_mispredictions)))
    print("Minimum score disregarding mis-predictions: " +
          str(np.min(scores_wout_mispredictions)))
def load_models(video_caps):
    """
    Load the required models

    :param video_caps: List of VideoCap objects
    :return: None
    """
    global check_feed_type
    plugin = None

    face_device = os.environ['FACE_DEVICE'] if 'FACE_DEVICE' in os.environ.keys() else "CPU"
    mood_device = os.environ['MOOD_DEVICE'] if 'MOOD_DEVICE' in os.environ.keys() else "CPU"
    pose_device = os.environ['POSE_DEVICE'] if 'POSE_DEVICE' in os.environ.keys() else "CPU"
    obj_device = os.environ['OBJ_DEVICE'] if 'OBJ_DEVICE' in os.environ.keys() else "CPU"

    cpu_extension = os.environ['CPU_EXTENSION'] if 'CPU_EXTENSION' in os.environ.keys() else None
    face_model = os.environ['FACE_MODEL'] if 'FACE_MODEL' in os.environ.keys() else None
    pose_model = os.environ['POSE_MODEL'] if 'POSE_MODEL' in os.environ.keys() else None
    mood_model = os.environ['MOOD_MODEL'] if 'MOOD_MODEL' in os.environ.keys() else None
    obj_model = os.environ['OBJ_MODEL'] if 'OBJ_MODEL' in os.environ.keys() else None

    # Check if one the feed type is "shopper". If yes, load the face, head pose and mood detection model
    if check_feed_type[0]:
        assert face_model, 'Please specify the path to face detection model using the environment variable FACE_MODEL'
        assert pose_model, 'Please specify the path to head pose model using the environment variable POSE_MODEL'
        assert mood_model, 'Please specify the path to mood detection model using the environment variable MOOD_MODEL'
        infer_network_face = Network()
        infer_network_pose = Network()
        infer_network_mood = Network()

        nchw_fd = load_model_device(infer_network_face, face_model, face_device, 1, 1, 0, cpu_extension)
        nchw_hp = load_model_device(infer_network_pose, pose_model, pose_device, 1, 3, 0, cpu_extension)
        nchw_md = load_model_device(infer_network_mood, mood_model, mood_device, 1, 1, 0, cpu_extension)

    # Check if one the feed type is "traffic" or "shelf". If yes, load the mobilenet-ssd model
    if check_feed_type[1] or check_feed_type[2]:
        assert obj_model, 'Please specify the path to object detection model using the environment variable OBJMODEL'
        infer_network = Network()
        nchw = load_model_device(infer_network, obj_model, obj_device, 1, 1, 2, cpu_extension)

    for video_cap in video_caps:
        if video_cap.type == 'shopper':
            video_cap.infer_network = infer_network_face
            video_cap.infer_network_hp = infer_network_pose
            video_cap.infer_network_md = infer_network_mood
            video_cap.nchw.extend(nchw_fd)
            video_cap.nchw_hp.extend(nchw_hp)
            video_cap.nchw_md.extend(nchw_md)

        if video_cap.type == 'traffic' or video_cap.type == 'shelf':
            video_cap.infer_network = infer_network
            video_cap.nchw.extend(nchw)
Exemple #22
0
def infer_on_video(args):
    ### TODO: Initialize the Inference Engine
    plugin = Network()
    ### TODO: Load the network model into the IE
    plugin.load_model(args.m, args.d, CPU_EXTENSION)
    net_input_shape = plugin.get_input_shape()
    print('input shape of net format fct of model use \n', net_input_shape)
    #[B,C,H,W]

    # Get and open video capture
    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    print("capture 3: \n ", cap.get(3))  # video width
    print("capture 4: \n ", cap.get(4))  # video heigth
    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))

    # Create a video writer for the output video
    # The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
    # on Mac, and `0x00000021` on Linux
    out = cv2.VideoWriter('out.mp4', 0x00000021, 30, (width, height))

    # Process frames until the video ends, or process is exited
    while cap.isOpened():
        # Read the next frame
        flag, frame = cap.read()
        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        ### TODO: Pre-process the frame(each frame is an image)
        preproced_frame = cv2.resize(frame,
                                     (net_input_shape[3], net_input_shape[2]))
        preproced_frame = preproced_frame.transpose((2, 0, 1))
        preproced_frame = preproced_frame.reshape(1, *preproced_frame.shape)

        ### TODO: Perform inference on the frame
        plugin.async_inference(preproced_frame)
        ### TODO: Get the output of inference
        if plugin.wait() == 0:
            result = plugin.extract_output()
            #print('results: \n',result)
        ### TODO: Update the frame to include detected bounding boxes
        frame = draw_boxes(frame, result, args, width, height)
        # Write out the frame
        out.write(frame)
        # Break if escape key pressed
        if key_pressed == 27:
            break

    # Release the out writer, capture, and destroy any OpenCV windows
    out.release()
    cap.release()
    cv2.destroyAllWindows()
def emotion_detection(emotions_model, frame, result, args, width, height):
    """
    Detect the emotion of the faces of a frame.
    """
    # Initialize the Inference Engine
    plugin_emotions_detection = Network()

    # Load the network models into the IE
    plugin_emotions_detection.load_model(emotions_model, args.d, CPU_EXTENSION)
    net_input_shape_ed = plugin_emotions_detection.get_input_shape()

    for box in result[0][0]:
        conf = box[2]
        if conf >= args.ct:
            # calculate the rectangle box margins
            x_min = max(int(box[3] * width), 0)
            y_min = max(int(box[4] * height), 0)
            x_max = min(int(box[5] * width), width)
            y_max = min(int(box[6] * height), height)

            # crop the image for emotion detection
            cropped_frame = frame[y_min:y_max, x_min:x_max]
            if cropped_frame.shape[0] and cropped_frame.shape[1]:
                # Draw rectangle box on the input
                cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 0, 255), 1)

                print('cropped frame: ', cropped_frame.shape)
                # Preprocess the cropped image
                p_frame_ed = preprocessing(cropped_frame, net_input_shape_ed[2], net_input_shape_ed[3])

                # Perform inference on the frame to detect emotion
                plugin_emotions_detection.async_inference(p_frame_ed)

                if plugin_emotions_detection.wait() == 0:
                    result_ed = plugin_emotions_detection.extract_output()

                    # Get the emotions class
                    emotion_class_id = np.argmax(result_ed)
                    emotion_class = EMOTIONS[emotion_class_id]
                    print('emotion detected:', emotion_class)

                    # # Crate a rectangle box to display emotion text
                    # sub_img = frame[y_min:y_min+20, x_min:x_max]
                    # white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255

                    # res = cv2.addWeighted(sub_img, 0.5, white_rect, 0.5, 1.0)

                    # # Putting the image back to its position
                    # frame[y_min:y_min+20, x_min:x_max] = res

                    # Create a rectangle to display the predicted emotion
                    cv2.rectangle(frame, (x_min, y_min), (x_max, y_min + 20), (51, 255, 196), cv2.FILLED)
                    cv2.putText(frame, emotion_class, (x_min + 5, y_min + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                (255, 0, 0), 2)

    return frame
Exemple #24
0
def infer_on_video(args, model):
    ### TODO: Connect to the MQTT server

    # Initialize the Inference Engine
    plugin = Network()

    # Load the network model into the IE
    plugin.load_model(model, args.d, CPU_EXTENSION)
    net_input_shape = plugin.get_input_shape()

    # Get and open video capture
    cap = cv2.VideoCapture(args.i)
    cap.open(args.i)

    # Grab the shape of the input
    width = int(cap.get(3))
    height = int(cap.get(4))

    # Process frames until the video ends, or process is exited
    while cap.isOpened():
        # Read the next frame
        flag, frame = cap.read()
        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        # Pre-process the frame
        p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
        p_frame = p_frame.transpose((2, 0, 1))
        p_frame = p_frame.reshape(1, *p_frame.shape)

        # Perform inference on the frame
        plugin.async_inference(p_frame)

        # Get the output of inference
        if plugin.wait() == 0:
            result = plugin.extract_output()
            # Draw the output mask onto the input
            out_frame, classes = draw_masks(result, width, height)
            class_names = get_class_names(classes)
            speed = randint(50, 70)

            ### TODO: Send the class names and speed to the MQTT server
            ### Hint: The UI web server will check for a "class" and
            ### "speedometer" topic. Additionally, it expects "class_names"
            ### and "speed" as the json keys of the data, respectively.

        ### TODO: Send frame to the ffmpeg server

        # Break if escape key pressed
        if key_pressed == 27:
            break

    # Release the capture and destroy any OpenCV windows
    cap.release()
    cv2.destroyAllWindows()
Exemple #25
0
def infer_on_video():
    speak('Hello! I am Alicia. Please wait while I boot up the system.')
    # Initialize the Inference Engine
    plugin = Network()
    class_names = []

    # Load the network model into the IE
    plugin.load_model(MODEL, DEVICE)
    net_input_shape = plugin.get_input_shape()

    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))
    speak('System booted.')
    # allow the camera to warmup
    time.sleep(0.1)
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        # grab the raw image
        image = frame.array

        key_pressed = cv2.waitKey(60)

        # Pre-process the frame
        p_frame = cv2.resize(image, (net_input_shape[3], net_input_shape[2]))
        p_frame = p_frame.transpose((2, 0, 1))
        p_frame = p_frame.reshape(1, *p_frame.shape)

        # Perform inference on the frame
        plugin.async_inference(p_frame)

        # Get the output of inference
        if plugin.wait() == 0:
            result = plugin.extract_output()

            classes = np.transpose(result[0])[1]
            # classes = np.unique(np.transpose(result[0])[1])
            old_class_names = class_names
            class_names = get_class_names(classes)

            # publish the new environment only if something changes
            if class_names and class_names != old_class_names:
                speak_string = ''.join(class_names)
                speak(speak_string)
                print(class_names)
                print("----------------------")

        # clear the stream in preparation for the next frame
        rawCapture.truncate(0)

        # Break if escape key pressed
        if key_pressed == 27:
            break
Exemple #26
0
def perform_inference(args):
    '''
    Performs inference on an input image, given a model.
    '''
    # Create a Network for using the Inference Engine
    plugin = Network()

    # Load the network model into the IE
    plugin.load_model(args.m, args.d, args.c)
    net_input_shape = plugin.get_input_shape()

    # Read the input image
    image = cv2.imread(args.i)

    # Preprocess the input image
    preprocessed_image = preprocessing(image, net_input_shape[3],
                                       net_input_shape[4])

    # Perform synchronous inference on the image
    plugin.sync_inference(preprocessed_image)

    # Obtain the output of the inference request
    output = plugin.extract_output()
    processed_output = handle_asl(output)

    # Create an output image based on network
    output_image = create_output_image(image, processed_output)

    # Save down the resulting image
    cv2.imwrite("outputs/output.png", output_image)
Exemple #27
0
def perform_inference(args):
    '''
    Performs inference on an input image, given a model.
    '''
    # Create a Network for using the Inference Engine
    plugin = Network()

    # Load the network model into the IE
    plugin.load_model(args.m, args.d, args.c)
    net_input_shape = plugin.get_input_shape()

    # Read the input image
    image = Image.open(args.i)

    # Preprocess the input image
    preprocessed_image = preprocessing(image)

    # Perform synchronous inference on the image
    plugin.sync_inference(preprocessed_image)

    # Obtain the output of the inference request
    output = plugin.extract_output()

    processed_output = handle_output(output)
    processed_output = MODEL_OUTPUT[processed_output].split()[0]
    ret_string = get_nutritional_data(processed_output, args.id, args.k)

    # Create an output image based on network
    output_image = create_output_image(ret_string)

    # Save down the resulting image
    cv2.imwrite("../outputs/output.png", output_image)
def perform_facerecognition(face,model):
	plugin=Network()
	plugin.load_model(model=model)
	b,c,h,w=plugin.get_input_shape()
	p_image=preprocessing(face,h,w)
	plugin.async_inference(p_image)
	status=plugin.wait()
	if status==0:
		result=plugin.extract_output()
		candidate_embedding=result[0]
		return candidate_embedding
def main():

    args = build_argparser().parse_args()
    # Connect to the MQTT server
    client = connect_mqtt()
    # Perform inference on the input stream
    model = args.model
    Device = args.Device
    CPU_extension = args.cpu_extension
    net = Network()
    net.load_model(model, Device, CPU_extension)

    net_input_shape = net.get_input_shape()['image_tensor']
    net_shape = (net_input_shape[3], net_input_shape[2])
    infer_on_stream(net, args, client, net_shape)
Exemple #30
0
def infer(imarray):
    model = "asl-recognition-0003.xml"
    inet = Network.net(model)
    exec_net = Network.load_model(inet, imarray, 'CPU')
    input_blob = next(iter(exec_net.inputs))
    input_layer = inet.inputs[input_blob].shape
    ##### asynch
    asy_net = Network.async_inference(exec_net, imarray, input_blob)
    output_blob = next(iter(exec_net.outputs))
    enc_net = exec_net.requests[0].outputs[output_blob]
    ###### synch
    #syn_net = Network.inf_(exec_net, imarray, input_blob)
    #enc_net = Network.extract_output(syn_net, exec_net)
    code = (np.argmax(enc_net))
    return code
Exemple #31
0
    if algo == 'orig':
        edges = GPT_learn_original (tree, data, variables)        
    elif algo == 'mod':
        edges = GPT_learn_modified (tree, data, variables)
    else:
        print 'wrong algo'
        sys.exit (-1)
    
    if arg == "predict":
        import networkx as nx
        g = nx.DiGraph ()
        g.add_edges_from (edges)
    
        from inference import Network, get_factors_BN
        n = Network (get_factors_BN (g, variables, data))
        
        with open ('test_data.txt') as f:
            f.readline () #exclude the first row
            for l in f.readlines ():
                values = l.strip ().split ()
                assignment = dict(zip(variables, values))
                print n.total_joint_prob (assignment)

    elif arg == "graphviz":
        viz_str = viz_tree_str (edges, variables, directed = True)
        print viz_str
    elif arg == "sorted_edges":
        for v1, v2 in sort_edges (edges, list (permutations (variables, 2)), edge_weights):
            print "%s %s" %(v1, v2)
    else: