コード例 #1
0
def postProcess(result_list, width, height, labels_map, out_path, is_async_mode, ren_progress_file_path=None):
  post_process_t = time.time()
  vw = cv2.VideoWriter(out_path, 0x00000021, 30.0, (width, height), True)
  for i in range(len(result_list)):
    frame,res = result_list[i]
    if len(res) > 0:
      placeBoxes(res, labels_map, frame, is_async_mode)
    vw.write(frame)
    if not ren_progress_file_path is None:
      progressUpdate(ren_progress_file_path, time.time()-post_process_t, i+1, len(result_list))
  vw.release()
コード例 #2
0
indicies_validation = [40, 63, 43, 55, 99, 101, 19, 46]  #[40]
val_id = 1
infer_time = 0
process_time_start = time.time()
progress_file_path = os.path.join(png_directory, "i_progress.txt")
for idx in indicies_validation:

    input_data_transposed = input_data[idx:(idx + batch_size)].transpose(
        0, 3, 1, 2)
    start_time = time.time()
    res = exec_net.infer(
        inputs={input_blob: input_data_transposed[:, :n_channels]})
    # Save the predictions to array
    predictions = res[out_blob]
    time_elapsed = time.time() - start_time
    infer_time += time_elapsed
    plotDiceScore(idx, input_data_transposed,
                  label_data[[idx]].transpose(0, 3, 1, 2), predictions, True,
                  round(time_elapsed * 1000))
    progressUpdate(progress_file_path,
                   time.time() - process_time_start, val_id,
                   len(indicies_validation))
    val_id += 1

total_time = time.time() - process_time_start
with open(os.path.join(png_directory, 'stats.txt'), 'w') as f:
    f.write(str(round(infer_time, 4)) + '\n')
    f.write(str(val_id) + '\n')
    f.write("Frames processed per second = {}".format(
        round(val_id / infer_time)))
コード例 #3
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    if args.input == 'cam':
        input_stream = 0
        out_file_name = 'cam'
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"

    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net,
                           num_requests=args.number_infer_requests)

    log.info(
        "Starting inference in async mode, {} requests in parallel...".format(
            args.number_infer_requests))
    job_id = str(os.environ['PBS_JOBID'])
    result_file = open(
        os.path.join(args.output_dir, 'output_' + job_id + '.txt'), "w")
    pre_infer_file = os.path.join(args.output_dir,
                                  'pre_progress_' + job_id + '.txt')
    infer_file = os.path.join(args.output_dir, 'i_progress_' + job_id + '.txt')
    processed_vid = '/tmp/processed_vid.bin'

    # Read and pre-process input image
    if isinstance(net.inputs[input_blob], list):
        n, c, h, w = net.inputs[input_blob]
    else:
        n, c, h, w = net.inputs[input_blob].shape
    del net

    cap = cv2.VideoCapture(input_stream)
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if video_len < args.number_infer_requests:
        args.number_infer_requests = video_len
    #Pre inference processing, read mp4 frame by frame, process using openCV and write to binary file
    width = int(cap.get(3))
    height = int(cap.get(4))
    CHUNKSIZE = n * c * w * h
    id_ = 0
    with open(processed_vid, 'w+b') as f:
        time_start = time.time()
        while cap.isOpened():
            ret, next_frame = cap.read()
            if not ret:
                break
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            bin_frame = bytearray(in_frame)
            f.write(bin_frame)
            id_ += 1
            if id_ % 10 == 0:
                progressUpdate(pre_infer_file,
                               time.time() - time_start, id_, video_len)
    cap.release()

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the sample execution press Esc button")

    current_inference = 0
    previous_inference = 1 - args.number_infer_requests
    infer_requests = exec_net.requests
    frame_count = 0

    try:
        infer_time_start = time.time()
        with open(processed_vid, "rb") as data:
            while frame_count < video_len:
                # Read next frame from input stream if available and submit it for inference
                byte = data.read(CHUNKSIZE)
                if not byte == b"":
                    deserialized_bytes = np.frombuffer(byte, dtype=np.uint8)
                    in_frame = np.reshape(deserialized_bytes,
                                          newshape=(n, c, h, w))
                    exec_net.start_async(request_id=current_inference,
                                         inputs={input_blob: in_frame})

                # Retrieve the output of an earlier inference request
                if previous_inference >= 0:
                    status = infer_requests[previous_inference].wait()
                    if status is not 0:
                        raise Exception(
                            "Infer request not completed successfully")
                    # Parse inference results
                    res = infer_requests[previous_inference].outputs[out_blob]
                    processBoxes(frame_count, res, labels_map,
                                 args.prob_threshold, width, height,
                                 result_file)
                    frame_count += 1

                # Write data to progress tracker
                if frame_count % 10 == 0:
                    progressUpdate(infer_file,
                                   time.time() - infer_time_start,
                                   frame_count + 1, video_len + 1)

                # Increment counter for the inference queue and roll them over if necessary
                current_inference += 1
                if current_inference >= args.number_infer_requests:
                    current_inference = 0

                previous_inference += 1
                if previous_inference >= args.number_infer_requests:
                    previous_inference = 0

        # End while loop
        total_time = time.time() - infer_time_start
        with open(os.path.join(args.output_dir, 'stats_{}.txt'.format(job_id)),
                  'w') as f:
            f.write('{:.3g} \n'.format(total_time))
            f.write('{} \n'.format(frame_count))

        result_file.close()

    finally:
        log.info("Processing done...")
        del exec_net
        del plugin
コード例 #4
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    ie = IECore()
    #plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        ie.add_extension(args.cpu_extension, "CPU")


#     log.info("Loading plugins for {} device...".format(args.device))
#     plugin.add_cpu_extension(args.cpu_extension)

# Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if args.device == "CPU":
        #supported_layers = plugin.get_supported_layers(net)
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(ie.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    #exec_net = plugin.load(network=net, num_requests=2)
    ## Removed spaces ################
    exec_net = ie.load_network(network=net,
                               num_requests=2,
                               device_name=args.device)
    # Read and pre-process input image
    if isinstance(net.inputs[input_blob], list):
        n, c, h, w = net.inputs[input_blob]
    else:
        n, c, h, w = net.inputs[input_blob].shape
    del net

    out_file_name = os.path.splitext(os.path.basename(args.input))[0]

    if args.output_dir:
        out_path = os.path.join(args.output_dir, out_file_name + '.mp4')

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cur_request_id = 0
    next_request_id = 1

    job_id = os.environ['PBS_JOBID']
    #job_id = "12345"
    inf_progress_file_path = os.path.join(args.output_dir,
                                          'i_progress_' + str(job_id) + '.txt')
    ren_progress_file_path = os.path.join(args.output_dir,
                                          'v_progress_' + str(job_id) + '.txt')

    is_async_mode = True
    fps_sum = 0
    frame_count = 0
    read_time = 0
    result_list = []
    try:
        # Set up the capture stream
        input_stream = args.input
        cap = cv2.VideoCapture(input_stream)
        if cap.isOpened():
            video_len = args.count
            log.info("Live stream running at {} fps".format(cv2.CAP_PROP_FPS))
            initial_w = cap.get(3)
            initial_h = cap.get(4)
            cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)

            log.info("Starting inference in async mode...")
            infer_time_start = time.time()
            while frame_count < video_len:
                ret, frame = cap.read()
                if not ret:
                    break

                in_frame = cv2.resize(frame, (w, h))
                in_frame = in_frame.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame = in_frame.reshape((n, c, h, w))
                # Main sync point:
                # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
                # in the regular mode we start the CURRENT request and immediately wait for it's completion
                inf_start = time.time()
                if is_async_mode:
                    exec_net.start_async(request_id=next_request_id,
                                         inputs={input_blob: in_frame})
                else:
                    exec_net.start_async(request_id=cur_request_id,
                                         inputs={input_blob: in_frame})

                read_time_0 = time.time()
                if exec_net.requests[cur_request_id].wait(-1) == 0:
                    inf_end = time.time()
                    det_time = inf_end - inf_start
                    #Parse detection results of the current request
                    res = exec_net.requests[cur_request_id].outputs[out_blob]
                    processBoxes(frame_count, res, labels_map,
                                 args.prob_threshold, frame, result_list,
                                 det_time)
                read_time += time.time() - read_time_0

                frame_count += 1
                #Write data to progress tracker
                if frame_count % 10 == 0:
                    progressUpdate(inf_progress_file_path,
                                   time.time() - infer_time_start, frame_count,
                                   video_len)

                key = cv2.waitKey(1)
                if key == 27:
                    break
                if (9 == key):
                    is_async_mode = not is_async_mode
                    log.info("Switched to {} mode".format(
                        "async" if is_async_mode else "sync"))
                if is_async_mode:
                    cur_request_id, next_request_id = next_request_id, cur_request_id

            ##End while loop /
            cap.release()
            log.info("{} seconds were spent reading".format(read_time))

        if args.output_dir is None:
            cv2.destroyAllWindows()
        else:
            total_time = time.time() - infer_time_start
            with open(
                    os.path.join(args.output_dir,
                                 'stats_' + str(job_id) + '.txt'), 'w') as f:
                f.write(str(round(total_time, 1)) + '\n')
                f.write(str(frame_count) + '\n')

        o_video = os.path.join(args.output_dir,
                               'output_' + str(job_id) + '.mp4')
        post_process_t = time.time()
        postProcess(result_list, int(initial_w), int(initial_h), labels_map,
                    o_video, is_async_mode, ren_progress_file_path)
        log.info("Post processing time: {0} sec".format(time.time() -
                                                        post_process_t))

    finally:
        del exec_net
        del ie
コード例 #5
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    if isinstance(net.inputs[input_blob], list):
        n, c, h, w = net.inputs[input_blob]
    else:
        n, c, h, w = net.inputs[input_blob].shape
    del net
    if args.input == 'cam':
        input_stream = 0
        out_file_name = 'cam'
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        out_file_name = os.path.splitext(os.path.basename(args.input))[0]

    if args.output_dir:
        out_path = os.path.join(args.output_dir, out_file_name + '.mp4')

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cap = cv2.VideoCapture(input_stream)
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the sample execution press Esc button")
    job_id = os.environ['PBS_JOBID']
    result_file = open(
        os.path.join(args.output_dir, 'output_' + str(job_id) + '.txt'), "w")
    progress_file_path = os.path.join(args.output_dir,
                                      'i_progress_' + str(job_id) + '.txt')

    is_async_mode = True
    render_time = 0
    fps_sum = 0
    frame_count = 0
    inf_list = []
    res_list = []
    try:
        infer_time_start = time.time()
        while cap.isOpened():
            read_time = time.time()
            ret, frame = cap.read()
            if not ret:
                break
            initial_w = cap.get(3)
            initial_h = cap.get(4)

            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            # Main sync point:
            # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
            # in the regular mode we start the CURRENT request and immediately wait for it's completion
            inf_start = time.time()
            if is_async_mode:
                exec_net.start_async(request_id=next_request_id,
                                     inputs={input_blob: in_frame})
            else:
                exec_net.start_async(request_id=cur_request_id,
                                     inputs={input_blob: in_frame})

            if exec_net.requests[cur_request_id].wait(-1) == 0:
                inf_end = time.time()
                det_time = inf_end - inf_start
                #Parse detection results of the current request
                res = exec_net.requests[cur_request_id].outputs[out_blob]
                processBoxes(frame_count, res, labels_map, args.prob_threshold,
                             frame, initial_w, initial_h, result_file,
                             det_time)

            #
            frame_count += 1
            #Write data to progress tracker
            if frame_count % 10 == 0:
                progressUpdate(progress_file_path,
                               time.time() - infer_time_start, frame_count,
                               video_len)

            key = cv2.waitKey(1)
            if key == 27:
                break
            if (9 == key):
                is_async_mode = not is_async_mode
                log.info("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))
            if is_async_mode:
                cur_request_id, next_request_id = next_request_id, cur_request_id

##End while loop /
        cap.release()
        result_file.close()

        if args.output_dir is None:
            cv2.destroyAllWindows()
        else:
            total_time = time.time() - infer_time_start
            with open(
                    os.path.join(args.output_dir,
                                 'stats_' + str(job_id) + '.txt'), 'w') as f:
                f.write(str(round(total_time, 1)) + '\n')
                f.write(str(frame_count) + '\n')

    finally:
        del exec_net
        del plugin
コード例 #6
0
def main():
    """
    Load the network and parse the output.
    :return: None
    """
    global INFO
    global DELAY
    global POSE_CHECKED
    controller = MouseController()

    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = args_parser().parse_args()
    logger = log.getLogger()

    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"

    cap = cv2.VideoCapture(input_stream)
    initial_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    initial_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    out = cv2.VideoWriter(os.path.join(args.output_dir, "shopper.mp4"),
                          cv2.VideoWriter_fourcc(*"MP4V"), fps,
                          (initial_width, initial_height), True)
    frame_count = 0

    job_id = 1
    progress_file_path = os.path.join(args.output_dir,
                                      'i_progress_' + str(job_id) + '.txt')

    infer_time_start = time.time()

    if input_stream:
        cap.open(args.input)
        # Adjust DELAY to match the number of FPS of the video file
        DELAY = 1000 / cap.get(cv2.CAP_PROP_FPS)

    if not cap.isOpened():
        logger.error("ERROR! Unable to open video source")
        return

    # Initialise the class
    if args.cpu_extension:
        facedet = FaceDetection(args.facemodel,
                                args.confidence,
                                extensions=args.cpu_extension)
        posest = HeadPoseEstimation(args.posemodel,
                                    args.confidence,
                                    extensions=args.cpu_extension)
        landest = FaceLandmarksDetection(args.landmarksmodel,
                                         args.confidence,
                                         extensions=args.cpu_extension)
        gazeest = GazeEstimation(args.gazemodel,
                                 args.confidence,
                                 extensions=args.cpu_extension)
    else:
        facedet = FaceDetection(args.facemodel, args.confidence)
        posest = HeadPoseEstimation(args.posemodel, args.confidence)
        landest = FaceLandmarksDetection(args.landmarksmodel, args.confidence)
        gazeest = GazeEstimation(args.gazemodel, args.confidence)

    # Load the network to IE plugin to get shape of input layer

    facedet.load_model()
    posest.load_model()
    landest.load_model()
    gazeest.load_model()
    print("loaded models")
    ret, frame = cap.read()
    while ret:
        looking = 0
        POSE_CHECKED = False
        ret, frame = cap.read()
        frame_count += 1
        if not ret:
            print("checkpoint *BREAKING")
            break

        if frame is None:
            log.error("checkpoint ERROR! blank FRAME grabbed")
            break

        initial_width = int(cap.get(3))
        initial_height = int(cap.get(4))

        # Start asynchronous inference for specified request
        inf_start_fd = time.time()
        # Results of the output layer of the network
        coords, frame = facedet.predict(frame)
        if args.visualization == "fm":
            cv2.startWindowThread()
            cv2.namedWindow("preview")
            cv2.imshow("preview", frame)
        det_time_fd = time.time() - inf_start_fd
        if len(coords) > 0:
            [xmin, ymin, xmax,
             ymax] = coords[0]  # use only the first detected face
            head_pose = frame[ymin:ymax, xmin:xmax]
            inf_start_hp = time.time()
            is_looking, pose_angles = posest.predict(head_pose)
            if args.visualization == "pm":
                cv2.startWindowThread()
                cv2.namedWindow("preview")
                p = "Pose Angles {}, is Looking? {}".format(
                    pose_angles, is_looking)
                cv2.putText(frame, p, (50, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                            (255, 255, 255), 1)
                cv2.imshow("preview", frame)

            if is_looking:
                det_time_hp = time.time() - inf_start_hp
                POSE_CHECKED = True
                print(is_looking)
                inf_start_lm = time.time()
                coords, f = landest.predict(head_pose)
                if args.visualization == "lm":
                    cv2.startWindowThread()
                    cv2.namedWindow("preview")
                    cv2.imshow("preview", f)

                frame[ymin:ymax, xmin:xmax] = f
                det_time_lm = time.time() - inf_start_lm
                [[xlmin, ylmin, xlmax, ylmax], [xrmin, yrmin, xrmax,
                                                yrmax]] = coords
                left_eye_image = f[ylmin:ylmax, xlmin:xlmax]
                right_eye_image = f[yrmin:yrmax, xrmin:xrmax]
                output, gaze_vector = gazeest.predict(left_eye_image,
                                                      right_eye_image,
                                                      pose_angles)
                if args.visualization == "gm":
                    cv2.startWindowThread()
                    cv2.namedWindow("preview")
                    p = "Gaze Vector {}".format(gaze_vector)
                    cv2.putText(frame, p, (50, 15), cv2.FONT_HERSHEY_COMPLEX,
                                0.5, (255, 255, 255), 1)
                    fl = draw_gaze(left_eye_image, gaze_vector)
                    fr = draw_gaze(right_eye_image, gaze_vector)
                    f[ylmin:ylmax, xlmin:xlmax] = fl
                    f[yrmin:yrmax, xrmin:xrmax] = fr
                    cv2.imshow("preview", f)

                if frame_count % 10 == 0:
                    controller.move(output[0], output[1])
        # Draw performance stats
        inf_time_message = "Face Inference time: {:.3f} ms.".format(
            det_time_fd * 1000)
        #
        if POSE_CHECKED:
            cv2.putText(
                frame, "Head pose Inference time: {:.3f} ms.".format(
                    det_time_hp * 1000), (0, 35), cv2.FONT_HERSHEY_SIMPLEX,
                0.5, (255, 255, 255), 1)
            cv2.putText(frame, inf_time_message, (0, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
        out.write(frame)
        if frame_count % 10 == 0:
            demoutils.progressUpdate(progress_file_path,
                                     int(time.time() - infer_time_start),
                                     frame_count, video_len)
        if args.output_dir:
            total_time = time.time() - infer_time_start
            with open(os.path.join(args.output_dir, 'stats.txt'), 'w') as f:
                f.write(str(round(total_time, 1)) + '\n')
                f.write(str(frame_count) + '\n')
    facedet.clean()
    posest.clean()
    landest.clean()
    gazeest.clean()
    cap.release()
    cv2.destroyAllWindows()
    out.release()
コード例 #7
0
def main():
    # Plugin initialization for specified device and load extensions library
    global rolling_log
    #defaultTarget = TARGET_DEVICE

    env_parser()
    args_parser()
    check_args()
    parse_conf_file()

    # if TARGET_DEVICE not in acceptedDevices:
    #     print ("Unsupporterd device " + TARGET_DEVICE + ". Defaulting to CPU")
    #     TARGET_DEVICE = 'CPU'

    print("Initializing plugin for {} device...".format(TARGET_DEVICE))
    #plugin = IEPlugin(device=TARGET_DEVICE)
    ie = IECore()
    if CPU_EXTENSION and 'CPU' == TARGET_DEVICE:
        #plugin.add_cpu_extension(CPU_EXTENSION)
        ie.add_extension(CPU_EXTENSION, "CPU")

    # Read IR
    print("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Load the IR
    print("Loading IR to the plugin...")
    #exec_net = plugin.load(network=net, num_requests=2)
    exec_net = ie.load_network(network=net,
                               num_requests=2,
                               device_name=TARGET_DEVICE)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net

    minFPS = min([i.cap.get(cv2.CAP_PROP_FPS) for i in videoCaps])
    minlength = min([i.cap.get(cv2.CAP_PROP_FRAME_COUNT) for i in videoCaps])
    for vc in videoCaps:
        vc.rate = int(math.ceil(vc.length / minlength))
    print(minFPS)
    waitTime = int(
        round(1000 / minFPS /
              len(videoCaps)))  # wait time in ms between showing frames
    frames_sum = 0
    for vc in videoCaps:
        vc.init_vw(h, w, minFPS)
        frames_sum += vc.length
    statsWidth = w if w > 345 else 345
    statsHeight = h if h > (len(videoCaps) * 20 + 15) else (
        len(videoCaps) * 20 + 15)
    statsVideo = cv2.VideoWriter(os.path.join(output_dir, 'Statistics.mp4'),
                                 cv2.VideoWriter_fourcc(*"AVC1"), minFPS,
                                 (statsWidth, statsHeight), True)
    if not statsVideo.isOpened():
        print("Couldn't open stats video for writing")
        sys.exit(4)

    # Read the labels file
    if labels_file:
        with open(labels_file, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    # Init a rolling log to store events
    rolling_log_size = int((h - 15) / 20)
    rolling_log = collections.deque(maxlen=rolling_log_size)

    # Init inference request IDs
    cur_request_id = 0
    next_request_id = 1
    # Start with async mode enabled
    is_async_mode = True

    if not UI_OUTPUT:
        # Arrange windows so they are not overlapping
        #arrange_windows(w, h)
        print("To stop the execution press Esc button")

    no_more_data = False

    for vc in videoCaps:
        vc.start_time = datetime.datetime.now()
    frame_count = 0
    job_id = os.environ['PBS_JOBID']
    progress_file_path = os.path.join(output_dir,
                                      'i_progress_' + job_id + '.txt')
    infer_start_time = time.time()

    #Start while loop
    while True:
        # If all video captures are closed stop the loop
        if False not in [videoCap.closed for videoCap in videoCaps]:
            print("I broke here line 387")
            break

        no_more_data = False

        # loop over all video captures
        for idx, videoCapInfer in enumerate(videoCaps):
            # read the next frame
            #print("Video {0} has length {1} and fps {2}".format(idx, videoCapInfer.length,  videoCapInfer.fps))
            if not videoCapInfer.closed:
                #print("ID {0}".format(idx))
                vfps = int(round(videoCapInfer.cap.get(cv2.CAP_PROP_FPS)))
                #for i in range(0, int(round(vfps / minFPS))):
                for i in range(videoCapInfer.rate):
                    frame_count += 1
                    #print("i = {0}".format(i))
                    ret, frame = videoCapInfer.cap.read()
                    videoCapInfer.cur_frame_count += 1
                    # If the read failed close the program
                    if not ret:
                        videoCapInfer.closed = True
                        no_more_data = True
                        break

                if videoCapInfer.closed:
                    print("Video {0} is done".format(idx))
                    print("Video has  {0} frames ".format(
                        videoCapInfer.length))
                    break

                # Copy the current frame for later use
                videoCapInfer.cur_frame = frame.copy()
                videoCapInfer.initial_w = videoCapInfer.cap.get(3)
                videoCapInfer.initial_h = videoCapInfer.cap.get(4)
                # Resize and change the data layout so it is compatible
                in_frame = cv2.resize(videoCapInfer.cur_frame, (w, h))
                in_frame = in_frame.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame = in_frame.reshape((n, c, h, w))

                infer_start = datetime.datetime.now()
                if is_async_mode:
                    exec_net.start_async(request_id=next_request_id,
                                         inputs={input_blob: in_frame})
                    # Async enabled and only one video capture
                    if (len(videoCaps) == 1):
                        videoCapResult = videoCapInfer
                    # Async enabled and more than one video capture
                    else:
                        # Get previous index
                        videoCapResult = videoCaps[idx - 1 if idx -
                                                   1 >= 0 else len(videoCaps) -
                                                   1]
                else:
                    # Async disabled
                    exec_net.start_async(request_id=cur_request_id,
                                         inputs={input_blob: in_frame})
                    videoCapResult = videoCapInfer

                if exec_net.requests[cur_request_id].wait(-1) == 0:
                    infer_end = datetime.datetime.now()
                    infer_duration = infer_end - infer_start
                    current_count = 0
                    # Parse detection results of the current request
                    res = exec_net.requests[cur_request_id].outputs[out_blob]
                    for obj in res[0][0]:
                        class_id = int(obj[1])
                        # Draw only objects when probability more than specified threshold
                        if (obj[2] > PROB_THRESHOLD
                                and videoCapResult.req_label in labels_map
                                and labels_map.index(
                                    videoCapResult.req_label) == class_id - 1):
                            current_count += 1
                            xmin = int(obj[3] * videoCapResult.initial_w)
                            ymin = int(obj[4] * videoCapResult.initial_h)
                            xmax = int(obj[5] * videoCapResult.initial_w)
                            ymax = int(obj[6] * videoCapResult.initial_h)
                            # Draw box
                            cv2.rectangle(videoCapResult.cur_frame,
                                          (xmin, ymin), (xmax, ymax),
                                          (0, 255, 0), 4, 16)

                    if videoCapResult.candidate_count is current_count:
                        videoCapResult.candidate_confidence += 1
                    else:
                        videoCapResult.candidate_confidence = 0
                        videoCapResult.candidate_count = current_count

                    if videoCapResult.candidate_confidence is FRAME_THRESHOLD:
                        videoCapResult.candidate_confidence = 0
                        if current_count > videoCapResult.last_correct_count:
                            videoCapResult.total_count += current_count - videoCapResult.last_correct_count

                        if current_count is not videoCapResult.last_correct_count:
                            if UI_OUTPUT:
                                currtime = datetime.datetime.now().strftime(
                                    "%H:%M:%S")
                                fr = FrameInfo(videoCapResult.frames,
                                               current_count, currtime)
                                videoCapResult.countAtFrame.append(fr)
                            new_objects = current_count - videoCapResult.last_correct_count
                            for _ in range(new_objects):
                                str = "{} - {} detected on {}".format(
                                    time.strftime("%H:%M:%S"),
                                    videoCapResult.req_label,
                                    videoCapResult.cap_name)
                                rolling_log.append(str)

                        videoCapResult.frames += 1
                        videoCapResult.last_correct_count = current_count
                    else:
                        videoCapResult.frames += 1

                    videoCapResult.cur_frame = cv2.resize(
                        videoCapResult.cur_frame, (w, h))

                    if not UI_OUTPUT:
                        # Add log text to each frame
                        log_message = "Async mode is on." if is_async_mode else \
                                      "Async mode is off."
                        cv2.putText(videoCapResult.cur_frame, log_message,
                                    (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (255, 255, 255), 1)
                        log_message = "Total {} count: {}".format(
                            videoCapResult.req_label,
                            videoCapResult.total_count)
                        cv2.putText(videoCapResult.cur_frame, log_message,
                                    (10, h - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.5, (255, 255, 255), 1)
                        log_message = "Current {} count: {}".format(
                            videoCapResult.req_label,
                            videoCapResult.last_correct_count)
                        cv2.putText(videoCapResult.cur_frame, log_message,
                                    (10, h - 30), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.5, (255, 255, 255), 1)
                        cv2.putText(
                            videoCapResult.cur_frame, 'Infer wait: %0.3fs' %
                            (infer_duration.total_seconds()), (10, h - 70),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

                        # Display inferred frame and stats
                        stats = numpy.zeros((statsHeight, statsWidth, 1),
                                            dtype='uint8')
                        for i, log in enumerate(rolling_log):
                            cv2.putText(stats, log, (10, i * 20 + 15),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (255, 255, 255), 1)
                        #cv2.imshow(STATS_WINDOW_NAME, stats)
                        if idx == 0:
                            stats = cv2.cvtColor(stats, cv2.COLOR_GRAY2BGR)
                            #Write
                            statsVideo.write(stats)
                        end_time = datetime.datetime.now()
                        cv2.putText(
                            videoCapResult.cur_frame, 'FPS: %0.2fs' %
                            (1 / (end_time -
                                  videoCapResult.start_time).total_seconds()),
                            (10, h - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (255, 255, 255), 1)
                        #cv2.imshow(videoCapResult.cap_name, videoCapResult.cur_frame)
                        videoCapResult.start_time = datetime.datetime.now()
                        #Write
                        videoCapResult.video.write(videoCapResult.cur_frame)

            if frame_count % 10 == 0:
                progressUpdate(progress_file_path,
                               time.time() - infer_start_time, frame_count,
                               frames_sum)

            # Wait if necessary for the required time
            #key = cv2.waitKey(waitTime)
            key = cv2.waitKey(1)

            # Esc key pressed
            if key == 27:
                cv2.destroyAllWindows()
                del exec_net
                #del plugin
                del ie
                print("Finished")
                return
            # Tab key pressed
            if key == 9:
                is_async_mode = not is_async_mode
                print("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))

            if is_async_mode:
                # Swap infer request IDs
                cur_request_id, next_request_id = next_request_id, cur_request_id

            # Loop video if LOOP_VIDEO = True and input isn't live from USB camera
            if LOOP_VIDEO and not videoCapInfer.is_cam:
                vfps = int(round(videoCapInfer.cap.get(cv2.CAP_PROP_FPS)))
                # If a video capture has ended restart it
                if (videoCapInfer.cur_frame_count >
                        videoCapInfer.cap.get(cv2.CAP_PROP_FRAME_COUNT) -
                        int(round(vfps / minFPS))):
                    videoCapInfer.cur_frame_count = 0
                    videoCapInfer.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

        if no_more_data:
            progressUpdate(progress_file_path,
                           time.time() - infer_start_time, frames_sum,
                           frames_sum)
            break


#End of while loop--------------------
    no_more_data = True
    t2 = time.time() - infer_start_time
    for videos in videoCaps:
        print(videos.length)
        print(videos.closed)
    print("End loop")
    print("Total time {0}".format(t2))
    print("Total frame count {0}".format(frame_count))
    print("fps {0}".format(frame_count / t2))
    with open(os.path.join(output_dir, 'stats.txt'), 'w') as f:
        f.write('{} \n'.format(round(t2)))
        f.write('{} \n'.format(frame_count))

    for vc in videoCaps:
        print("Frames processed {}".format(vc.cur_frame_count))
        print("Frames count {}".format(vc.length))

    for vc in videoCaps:
        vc.video.release()
        vc.cap.release()

        if no_more_data:
            break
コード例 #8
0
        start_time = time.time()
        pred_mask = model.predict(img, verbose=0, steps=None)
        #print ("Time for prediction TF: ", '\033[1m %.0f \033[0m'%((time.time()-start_time)*1000),"ms")
       	end_time = (time.time()-start_time)*1000 
        print(end_time)
    plotDiceScore(img_no,img,msk,pred_mask,plot_result, round(end_time))
    return end_time

indicies_validation = [40, 63, 43, 55, 99, 101, 19, 46] #[40]
val_id = 1
infer_time_start = time.time()
progress_file_path = os.path.join(png_directory, "i_progress.txt")
infer_time = 0
for idx in indicies_validation:
    infer_time_idx = predict(idx, plot_result=True)
    if val_id > 2:
        infer_time += infer_time_idx
    #print((time.time()-infer_time_start)*1000/val_id)
    progressUpdate(progress_file_path, time.time()-infer_time_start, val_id, len(indicies_validation)-1) 
    val_id += 1


total_time = time.time() - infer_time_start
with open(os.path.join(png_directory, 'stats.txt'), 'w') as f:
                f.write(str(round(infer_time/1000, 4))+'\n')
                #f.write(str(round(total_time, 1))+'\n')
                f.write(str(val_id-2)+'\n')
                f.write("Frames processed per second = {} ".format(round((val_id-2)/ (infer_time/1000))))