def inferOnPicture(args):
    network = Network()
    network.load_model(args.model, args.device, CPU_EXTENSION)

    #     shape = network.get_rcnn_shape()
    shape = network.get_input_shape()
    original = cv2.imread(args.input)
    image1 = np.copy(original)

    image = preprocess(image1, shape)

    network.exec_rcnn(image)
    #     network.exec_net(image)

    if (network.wait() == 0):
        output = network.get_output()
        print(output.shape)

        new_image, i = draw_boxes(image1, output)
        print(new_image.shape)

        cv2.imwrite('images/new.jpg', new_image)
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    # Initialise the class
    network = Network()

    # Set Probability threshold for detections
    threshold = args.threshold

    ### TODO: Load the model through `infer_network` ###
    network.load_model(args.model, args.device, CPU_EXTENSION)
    shape = network.get_rcnn_shape()

    ### TODO: Handle the input stream ###
    if args.input == 'CAM':
        input_im = 0

    # Checks for input image
    elif args.input.endswith('.jpg') or args.input.endswith('.bmp'):
        single_image_mode = True
        input_im = args.input

    # Checks for video file
    else:
        input_im = args.input
        assert os.path.isfile(args.input), "file doesn't exist"

    cap = cv2.VideoCapture(input_im)
    cap.open(input_im)

    width = int(cap.get(3))
    height = int(cap.get(4))

    out = cv2.VideoWriter('out.mp4', 0x00000021, 30, (width, height))

    last_count = 0
    total = 0

    detect_range = 0
    average = 0
    duration_count = 0

    ### TODO: Loop until stream is over ###
    while cap.isOpened():
        # Read the next frame
        flag, frame = cap.read()

        ### TODO: Read from the video capture ###
        if not flag:
            break

        ### TODO: Pre-process the image as needed ###
        image = preprocess(frame, shape)

        start = time.time()
        ### TODO: Start asynchronous inference for specified request ###
        network.exec_rcnn(image)
        duration = None

        ### TODO: Wait for the result ###
        if (network.wait() == 0):

            end = time.time()
            ### TODO: Get the results of the inference request ###
            output = network.get_output()

            ### TODO: Extract any desired stats from the results ###
            frame, persons = draw_boxes(frame, output, threshold)

            ### TODO: Calculate and send relevant information on ###
            diff = persons - last_count
            duration_count += 1

            if (diff == 0):
                detect_range = 0
            else:
                detect_range += 1

            if (detect_range > 4):
                last_count = persons

                if (diff > 0):
                    total += diff
                    duration_count = 0
                else:
                    #person left based on 30 frames per seconds
                    duration = duration_count / 10
                    all_dur = average * (total - 1)
                    average = (all_dur + duration) / total

            ### current_count, total_count and duration to the MQTT server ###
            ### Topic "person": keys of "count" and "total" ###
            client.publish("person",
                           json.dumps({
                               "count": persons,
                               "total": total
                           }))

            ### Topic "person/duration": key of "duration" ###
            if duration is not None:
                client.publish("person/duration",
                               json.dumps({"duration": int(average)}),
                               qos=0,
                               retain=False)

        ### TODO: Send the frame to the FFMPEG server ###
        sys.stdout.buffer.write(frame)
        sys.stdout.flush()

        # Break if escape key pressed
        key_pressed = cv2.waitKey(1)
        if key_pressed == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
    ### Disconnect from MQTT
    client.disconnect()