コード例 #1
0
def postProcess(result_list,
                width,
                height,
                labels_map,
                out_path,
                is_async_mode,
                ren_progress_file_path=None):
    post_process_t = time.time()
    # 0x00000021,
    vw = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"AVC1"), 30.0,
                         (width, height), True)
    for i in range(len(result_list)):
        frame, res = result_list[i]
        if len(res) > 0:
            placeBoxes(res, labels_map, frame, is_async_mode)
        vw.write(frame)
        if not ren_progress_file_path is None:
            progressUpdate(ren_progress_file_path,
                           time.time() - post_process_t, i + 1,
                           len(result_list))
    vw.release()
コード例 #2
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    job_id = str(os.environ['PBS_JOBID'])
    job_id = job_id.rstrip().split('.')[0]

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        plugin.add_cpu_extension(args.cpu_extension)

    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    if args.input == 'cam':
        input_stream = 0
        out_file_name = 'cam'
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"

    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net,
                           num_requests=args.number_infer_requests)

    log.info(
        "Starting inference in async mode, {} requests in parallel...".format(
            args.number_infer_requests))
    result_file = open(os.path.join(args.output_dir, job_id, 'output.txt'),
                       "w")
    pre_infer_file = os.path.join(args.output_dir, job_id, 'pre_progress.txt')
    infer_file = os.path.join(args.output_dir, job_id, 'i_progress.txt')
    processed_vid = '/tmp/processed_vid.bin'

    # Read and pre-process input image
    if isinstance(net.inputs[input_blob], list):
        n, c, h, w = net.inputs[input_blob]
    else:
        n, c, h, w = net.inputs[input_blob].shape
    del net

    cap = cv2.VideoCapture(input_stream)
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if video_len < args.number_infer_requests:
        args.number_infer_requests = video_len
    #Pre inference processing, read mp4 frame by frame, process using openCV and write to binary file
    width = int(cap.get(3))
    height = int(cap.get(4))
    CHUNKSIZE = n * c * w * h
    id_ = 0
    with open(processed_vid, 'w+b') as f:
        time_start = time.time()
        while cap.isOpened():
            ret, next_frame = cap.read()
            if not ret:
                break
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            bin_frame = bytearray(in_frame)
            f.write(bin_frame)
            id_ += 1
            if id_ % 10 == 0:
                progressUpdate(pre_infer_file,
                               time.time() - time_start, id_, video_len)
    cap.release()

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the sample execution press Esc button")

    current_inference = 0
    previous_inference = 1 - args.number_infer_requests
    infer_requests = exec_net.requests
    frame_count = 0

    try:
        infer_time_start = time.time()
        with open(processed_vid, "rb") as data:
            while frame_count < video_len:
                # Read next frame from input stream if available and submit it for inference
                byte = data.read(CHUNKSIZE)
                if not byte == b"":
                    deserialized_bytes = np.frombuffer(byte, dtype=np.uint8)
                    in_frame = np.reshape(deserialized_bytes,
                                          newshape=(n, c, h, w))
                    exec_net.start_async(request_id=current_inference,
                                         inputs={input_blob: in_frame})

                # Retrieve the output of an earlier inference request
                if previous_inference >= 0:
                    status = infer_requests[previous_inference].wait()
                    if status is not 0:
                        raise Exception(
                            "Infer request not completed successfully")
                    # Parse inference results
                    res = infer_requests[previous_inference].outputs[out_blob]
                    processBoxes(frame_count, res, labels_map,
                                 args.prob_threshold, width, height,
                                 result_file)
                    frame_count += 1

                # Write data to progress tracker
                if frame_count % 10 == 0:
                    progressUpdate(infer_file,
                                   time.time() - infer_time_start,
                                   frame_count + 1, video_len + 1)

                # Increment counter for the inference queue and roll them over if necessary
                current_inference += 1
                if current_inference >= args.number_infer_requests:
                    current_inference = 0

                previous_inference += 1
                if previous_inference >= args.number_infer_requests:
                    previous_inference = 0
            progressUpdate(infer_file,
                           time.time() - infer_time_start, frame_count + 1,
                           video_len + 1)

        # End while loop
        total_time = round(time.time() - infer_time_start, 2)
        stats = {}
        stats['time'] = str(total_time)
        stats['frames'] = str(frame_count)
        stats['fps'] = str(round(frame_count / total_time, 2))
        with open(os.path.join(args.output_dir, job_id, 'stats.json'),
                  'w') as json_file:
            json.dump(stats, json_file)

        result_file.close()

    finally:
        log.info("Processing done...")
        del exec_net
        del plugin
コード例 #3
0
def main():
    """
    Load the network and parse the SSD output.

    :return: None
    """

    args = build_argparser().parse_args()

    # Flag for the input image
    single_image_mode = False
    total_count = 0
    cur_request_id = 0
    last_count = 0
    total_count = 0
    start_time = 0

    # Initialise the class
    infer_network = Network()
    # Load the network to IE plugin to get shape of input layer
    n, c, h, w = infer_network.load_model(args.model, args.device, 1, 1,
                                          cur_request_id, args.cpu_extension)
    # Checks for live feed
    #if args.input == 'CAM':
    #input_stream = 0

    # Checks for input image
    if args.input.endswith('.jpg') or args.input.endswith('.bmp'):
        single_image_mode = True
        input_stream = args.input

    # Checks for video file
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"

    cap = cv2.VideoCapture(input_stream)
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    frame_count = 0
    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]
    progress_file_path = os.path.join(args.output_dir, str(job_id),
                                      'i_progress.txt')
    infer_time_start = time.time()
    if input_stream:
        cap.open(args.input)

    if not cap.isOpened():
        log.error("ERROR! Unable to open video source")
    global initial_w, initial_h, prob_threshold
    prob_threshold = args.prob_threshold
    initial_w = cap.get(3)
    initial_h = cap.get(4)
    people_counter = cv2.VideoWriter(
        os.path.join(args.output_dir, str(job_id), "people_counter.mp4"),
        cv2.VideoWriter_fourcc(*"AVC1"), fps, (int(initial_w), int(initial_h)),
        True)
    while cap.isOpened():
        flag, frame = cap.read()
        frame_count += 1
        if not flag:
            break
        # Start async inference
        image = cv2.resize(frame, (w, h))
        # Change data layout from HWC to CHW
        image = image.transpose((2, 0, 1))
        image = image.reshape((n, c, h, w))
        # Start asynchronous inference for specified request.
        inf_start = time.time()
        infer_network.exec_net(cur_request_id, image)
        # Wait for the result
        if infer_network.wait(cur_request_id) == 0:
            det_time = time.time() - inf_start
            # Results of the output layer of the network
            result = infer_network.get_output(cur_request_id)
            if args.perf_counts:
                perf_count = infer_network.performance_counter(cur_request_id)
                performance_counts(perf_count)

            frame, current_count = ssd_out(frame, result)
            inf_time_message = "Inference time: {:.3f}ms"\
                               .format(det_time * 1000)
            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            current_count_message = "Current count: {}"\
                                     .format(current_count)
            cv2.putText(frame, current_count_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)

            last_count = current_count

            people_counter.write(frame)
        if frame_count % 10 == 0 or frame_count % video_len == 0:
            print("frame_count: {}, video_len: {}".format(
                frame_count, video_len))
            progressUpdate(progress_file_path,
                           int(time.time() - infer_time_start), frame_count,
                           video_len)

        if single_image_mode:
            cv2.imwrite('output_image.jpg', frame)
    if args.output_dir:
        total_time = round(time.time() - infer_time_start, 2)
        stats = {}
        stats['time'] = str(total_time)
        stats['frames'] = str(frame_count)
        stats['fps'] = str(round(frame_count / total_time, 2))
        with open(os.path.join(args.output_dir, str(job_id), 'stats.json'),
                  'w') as f:
            json.dump(stats, f)
    cap.release()
    infer_network.clean()
コード例 #4
0
    else:
        start_time = time.time()
        pred_mask = model.predict(img, verbose=0, steps=None)
        #print ("Time for prediction TF: ", '\033[1m %.0f \033[0m'%((time.time()-start_time)*1000),"ms")
       	end_time = (time.time()-start_time)*1000 
        print(end_time)
    plotDiceScore(img_no,img,msk,pred_mask,plot_result, round(end_time))
    return end_time

indicies_validation = [40, 63, 43, 55, 99, 101, 19, 46] #[40]
val_id = 1
infer_time_start = time.time()
progress_file_path = os.path.join(png_directory, "i_progress.txt")
infer_time = 0
for idx in indicies_validation:
    infer_time_idx = predict(idx, plot_result=True)
    if val_id > 2:
        infer_time += infer_time_idx
    #print((time.time()-infer_time_start)*1000/val_id)
    progressUpdate(progress_file_path, time.time()-infer_time_start, val_id, len(indicies_validation)-1) 
    val_id += 1


total_time = round(time.time() - infer_time_start, 2)
stats = {}
stats['time'] = str(total_time)
stats['frames'] = str(val_id - 2)
stats['fps'] = str( round((val_id - 2) / total_time , 2))
with open(os.path.join(png_directory, 'stats.json'), 'w') as json_file:
    json.dump(stats, json_file)
コード例 #5
0
def main():
    colormap = 'viridis'
    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    device = args.device

    fp16 = True
    if device == "CPU":
        fp16 = False

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=device)

    # Read IR
    net = IENetwork(model=model_xml, weights=model_bin)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"

    bn = "relu_1/Relu"
    print(bn)
    # add the last convolutional layer as output
    net.add_outputs(bn)
    fc = "predictions_1/MatMul"

    # name of the inputs and outputs
    input_blob = next(iter(net.inputs))
    out_blob = "predictions_1/Sigmoid"

    net.batch_size = 1

    exec_net = plugin.load(network=net)

    n, c, h, w = net.inputs[input_blob].shape
    print("Current Directory:", os.getcwd())
    files = glob.glob(os.getcwd() + args.input[0])

    if not os.path.isdir(args.output_dir):
        os.makedirs(args.output_dir, exist_ok=True)
    f = open(os.path.join(args.output_dir, job_id, 'result.txt'), 'w')
    progress_file_path = os.path.join(args.output_dir, job_id,
                                      "i_progress.txt")
    print(progress_file_path)
    time_images = []
    tstart = time.time()
    for index_f, file in enumerate(files):
        [image1, image] = read_image(file)
        t0 = time.time()
        for i in range(args.number_iter):
            res = exec_net.infer(inputs={input_blob: image1})
            #infer_time.append((time()-t0)*1000)
        infer_time = (time.time() - t0) * 1000
        log.info("Average running time of one iteration: {} ms".format(
            np.average(np.asarray(infer_time))))
        if args.perf_counts:
            perf_counts = exec_net.requests[0].get_perf_counts()
            log.info("Performance counters:")
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                'name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
            for layer, stats in perf_counts.items():
                print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                    layer, stats['layer_type'], stats['exec_type'],
                    stats['status'], stats['real_time']))
        res_pb = res[out_blob]
        probs = res_pb[0][0]
        print("Probability of having disease= " + str(probs) +
              ", performed in " + str(np.average(np.asarray(infer_time))) +
              " ms")

        # Class Activation Map
        t0 = time.time()
        cam = class_activation_map_openvino(res, bn, fc, net, fp16)
        cam_time = (time.time() - t0) * 1000
        print("Time for CAM: {} ms".format(cam_time))

        fig, ax = plt.subplots(1, 2)
        # Visualize the CAM heatmap
        cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam))
        im = ax[0].imshow(cam, cmap=colormap)
        ax[0].axis('off')
        plt.colorbar(im, ax=ax[0], fraction=0.046, pad=0.04)

        # Visualize the CAM overlaid over the X-ray image
        colormap_val = cm.get_cmap(colormap)
        imss = np.uint8(colormap_val(cam) * 255)
        im = Image.fromarray(imss)
        width, height = image.size
        cam1 = resize_image(im, (height, width))
        heatmap = np.asarray(cam1)
        img1 = heatmap[:, :, :3] * 0.3 + image
        ax[1].imshow(np.uint16(img1))
        plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
        plt.savefig(os.path.join(args.output_dir, job_id,
                                 'result' + str(index_f) + '.png'),
                    bbox_inches='tight',
                    pad_inches=0,
                    dpi=300)

        avg_time = round((infer_time / args.number_iter), 1)
        #f.write(res + "\n Inference performed in " + str(np.average(np.asarray(infer_time))) + "ms")
        f.write("Pneumonia probability: " + str(probs) +
                ", Inference performed in " + str(avg_time) + "ms \n")
        time_images.append(avg_time)
        progressUpdate(progress_file_path, index_f * avg_time, index_f + 1,
                       len(files))

    total_time = round(time.time() - tstart, 2)
    stats = {}
    stats['time'] = str(total_time)
    stats['frames'] = str(len(files))
    stats['fps'] = str(round(len(files) / total_time, 2))
    with open(os.path.join(args.output_dir, job_id, 'stats.json'),
              'w') as json_file:
        json.dump(stats, json_file)
コード例 #6
0
ファイル: crop_img.py プロジェクト: rajpatel9498/qarpo
if not os.path.isdir(result_dir):
    print(result_dir)
    os.makedirs(result_dir, exist_ok=True)
print(args.input)
input_img = Image.open(args.input[0])
width, height = input_img.size
op_height, op_width = height // 2, width // 2
print(width, height, op_width, op_height)
total_op_imgs = 4
img_num = 0
t0 = time()
for i in range(0, height, op_height):
    for j in range(0, width, op_width):
        print(i, j)
        img_num += 1
        qarpo.progressUpdate(progress_file_path,
                             time() - t0, img_num, total_op_imgs)
        box = (j, i, j + op_width, i + op_height)
        a = input_img.crop(box)
        print("image cropped")
        a.save(os.path.join(result_dir, f"IMG_{img_num}.png"))

t1 = time() - t0
stats = {}
stats['time'] = str(round(t1, 1))
stats['frames'] = str(img_num)
stats['fps'] = str(img_num / t1)
stats_file = result_dir + "/stats.json"
with open(stats_file, 'w') as f:
    json.dump(stats, f)
コード例 #7
0
def main():

    job_id = os.environ['PBS_JOBID']
    codec = data_utils.TextFeatureIO(
        char_dict_path='app/Config/char_dict.json',
        ord_map_dict_path=r'app/Config/ord_map.json')

    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # if plugin.device == "CPU":
    # supported_layers = plugin.get_supported_layers(net)
    # not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    # if len(not_supported_layers) != 0:
    # log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
    # format(plugin.device, ', '.join(not_supported_layers)))
    # log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
    # "or --cpu_extension command line argument")
    # sys.exit(1)

    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = len(args.input)
    job_id = os.environ['PBS_JOBID'].split('.')[0]

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    images = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = cv2.imread(args.input[i])
        if image.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                args.input[i], image.shape[:-1], (h, w)))
            image = cv2.resize(image, (w, h))
        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[i] = image
    log.info("Batch size is {}".format(n))

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)
    del net

    # Start sync inference
    log.info("Starting inference ({} iterations)".format(args.number_iter))
    infer_time = []

    result_dir = os.path.join(args.output_dir, job_id)

    if not os.path.isdir(result_dir):
        print(result_dir)
        os.makedirs(result_dir, exist_ok=True)
    progress_file_path = os.path.join(result_dir, 'i_progress.txt')
    t0 = time()
    for i in range(args.number_iter):
        #t0 = time()
        res = exec_net.infer(inputs={input_blob: images})
        #infer_time.append((time()-t0)*1000)
        print(i, args.number_iter)
        progressUpdate(progress_file_path,
                       time() - t0, i + 1, args.number_iter)
    t1 = (time() - t0)
    log.info("Average running time of one iteration: {} ms".format(
        np.average(np.asarray(infer_time))))
    if args.perf_counts:
        perf_counts = exec_net.requests[0].get_perf_counts()
        log.info("Performance counters:")
        print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
            'name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
        for layer, stats in perf_counts.items():
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(
                layer, stats['layer_type'], stats['exec_type'],
                stats['status'], stats['real_time']))

    # Processing output blob
    log.info("Processing output blob")
    res = res[out_blob]

    preds = res.argmax(2)
    preds = preds.transpose(1, 0)
    preds = np.ascontiguousarray(preds, dtype=np.int8).view(dtype=np.int8)
    values = codec.writer.ordtochar(preds[0].tolist())
    values = [v for i, v in enumerate(values) if i == 0 or v != values[i - 1]]
    values = [x for x in values if x != ' ']
    res = ''.join(values)
    print("The result is : " + res)

    avg_time = round((t1 * 1000 / args.number_iter), 3)
    with open(os.path.join(args.output_dir, job_id, 'result.txt'), 'w') as f:
        f.write(res + "\n Inference performed in " + str(avg_time) + "ms")

    stats = {}
    stats['time'] = str(round(t1, 1))
    stats['frames'] = str(args.number_iter * n)
    stats['fps'] = str(args.number_iter * n / t1)
    stats_file = result_dir + "/stats.json"
    with open(stats_file, 'w') as f:
        json.dump(stats, f)

    del exec_net
    del plugin
コード例 #8
0
indicies_validation = [40, 63, 43, 55, 99, 101, 19, 46] #[40]
val_id = 1
infer_time = 0
process_time_start = time.time()
progress_file_path = os.path.join(png_directory, "i_progress.txt")
for idx in indicies_validation:

    input_data_transposed=input_data[idx:(idx+batch_size)].transpose(0,3,1,2)
    start_time = time.time()
    res = exec_net.infer(inputs={input_blob:input_data_transposed[:,:n_channels]})
    # Save the predictions to array
    predictions = res[out_blob]
    time_elapsed = time.time()-start_time
    infer_time += time_elapsed
    plotDiceScore(idx,input_data_transposed,label_data[[idx]].transpose(0,3,1,2),predictions,True, round(time_elapsed*1000))
    progressUpdate(progress_file_path, time.time()-process_time_start, val_id, len(indicies_validation)) 
    val_id += 1


total_time = round(time.time() - process_time_start, 2)

# Stats Json file for output mp4 file
stats = {}
stats['time'] = str(total_time)
stats['frames'] = str(val_id)
stats['fps'] = str(round(val_id / total_time,2))

with open(os.path.join(png_directory, 'stats.json'), 'w') as json_file:
    json.dump(stats, json_file)

コード例 #9
0
def main():
    args = build_argparser().parse_args()

    account_name = args.account_name
    account_key = args.account_key
    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]

    if account_name is "" or account_key is "":
        print("Invalid account name or account key!")
        sys.exit(1)
    elif account_name is not None and account_key is None:
        print("Please provide account key using -ak option!")
        sys.exit(1)
    elif account_name is None and account_key is not None:
        print("Please provide account name using -an option!")
        sys.exit(1)
    elif account_name is None and account_key is None:
        upload_azure = 0
    else:
        print("Uploading the results to Azure storage \"" + account_name +
              "\"")
        upload_azure = 1
        create_cloud_container(account_name, account_key)

    #if args.input == 'cam':
    #input_stream = 0
    #else:
    input_stream = args.input
    assert os.path.isfile(args.input), "Specified input file doesn't exist"

    cap = cv2.VideoCapture(input_stream)
    if cap is None or not cap.isOpened():
        print('Warning: unable to open video source: ', args.input)
        sys.exit(1)

    # Initialise the class
    infer_network = Network()
    # Load the network to IE plugin to get shape of input layer
    n, c, h, w = infer_network.load_model(args.model, args.device, 1, 1, 0,
                                          args.cpu_extension)

    print("To stop the execution press Esc button")
    initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    store_aisle = cv2.VideoWriter(
        os.path.join(args.output_dir, job_id, "store_aisle.mp4"),
        cv2.VideoWriter_fourcc(*'avc1'), fps, (initial_w, initial_h), True)
    progress_file_path = os.path.join(args.output_dir, job_id,
                                      'i_progress.txt')
    infer_time_start = time.time()
    frame_count = 1
    ret, frame = cap.read()
    while cap.isOpened():
        ret, next_frame = cap.read()
        if not ret:
            break
        frame_count = frame_count + 1
        in_frame = cv2.resize(next_frame, (w, h))
        # Change data layout from HWC to CHW
        in_frame = in_frame.transpose((2, 0, 1))
        in_frame = in_frame.reshape((n, c, h, w))

        # Start asynchronous inference for specified request.
        inf_start = time.time()
        infer_network.exec_net(0, in_frame)
        # Wait for the result
        infer_network.wait(0)
        det_time = time.time() - inf_start

        people_count = 0

        # Results of the output layer of the network
        res = infer_network.get_output(0)
        for obj in res[0][0]:
            # Draw only objects when probability more than specified threshold
            if obj[2] > args.prob_threshold:
                xmin = int(obj[3] * initial_w)
                ymin = int(obj[4] * initial_h)
                xmax = int(obj[5] * initial_w)
                ymax = int(obj[6] * initial_h)
                class_id = int(obj[1])
                # Draw bounding box
                color = (min(class_id * 12.5,
                             255), min(class_id * 7,
                                       255), min(class_id * 5, 255))
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                people_count = people_count + 1

        people_count_message = "People Count : " + str(people_count)
        inf_time_message = "Inference time: {:.3f} ms".format(det_time * 1000)
        cv2.putText(frame, inf_time_message, (15, 25),
                    cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
        cv2.putText(frame, people_count_message, (15, 65),
                    cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
        store_aisle.write(frame)
        time_interval = MULTIPLICATION_FACTOR * fps
        if frame_count % time_interval == 0:
            apply_time_stamp_and_save(frame, people_count, upload_azure)
        if frame_count % 10 == 0:
            progressUpdate(progress_file_path,
                           int(time.time() - infer_time_start), frame_count,
                           video_len)
        frame = next_frame

    if args.output_dir:
        total_time = round(time.time() - infer_time_start, 2)
        stats = {}
        stats['time'] = str(total_time)
        stats['frames'] = str(frame_count)
        stats['fps'] = str(round(frame_count / total_time, 2))
        with open(os.path.join(args.output_dir, job_id, 'stats.json'),
                  'w') as json_file:
            json.dump(stats, json_file)
    cap.release()
    infer_network.clean()
コード例 #10
0
def main():
    """
    Load the network and parse the output.

    :return: None
    """
    global DELAY
    global CLIENT
    global SIG_CAUGHT
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    logger = log.getLogger()
    render_time = 0
    roi_x = args.pointx
    roi_y = args.pointy
    roi_w = args.width
    roi_h = args.height

    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"

    cap = cv2.VideoCapture(input_stream)

    if not cap.isOpened():
        logger.error("ERROR! Unable to open video source")
        sys.exit(1)

    if input_stream:
        # Adjust DELAY to match the number of FPS of the video file
        DELAY = 1000 / cap.get(cv2.CAP_PROP_FPS)
    # Initialise the class
    infer_network = Network()
    # Load the network to IE plugin to get shape of input layer
    n, c, h, w = infer_network.load_model(args.model, args.device, 1, 1, 0,
                                          args.cpu_extension)

    ret, frame = cap.read()
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_count = 0
    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]
    result_file = open(
        os.path.join(args.output_dir, str(job_id), 'output.txt'), "w")
    progress_file_path = os.path.join(args.output_dir, str(job_id),
                                      'i_progress.txt')
    infer_time_start = time.time()
    while ret:
        dims = ""
        ret, next_frame = cap.read()
        if not ret:
            break

        initial_wh = [cap.get(3), cap.get(4)]

        if next_frame is None:
            log.error("ERROR! blank FRAME grabbed")
            break

        # If either default values or negative numbers are given,
        # then we will default to start of the FRAME
        if roi_x <= 0 or roi_y <= 0:
            roi_x = 0
            roi_y = 0
        if roi_w <= 0:
            roi_w = next_frame.shape[1]
        if roi_h <= 0:
            roi_h = next_frame.shape[0]
        key_pressed = cv2.waitKey(int(DELAY))

        selected_region = [roi_x, roi_y, roi_w, roi_h]
        selected_region = [roi_x, roi_y, roi_w, roi_h]
        x_max1 = str(selected_region[0])
        x_min1 = str(selected_region[0] + selected_region[2])
        y_min1 = str(selected_region[1] + selected_region[3])
        y_max1 = str(selected_region[1])

        in_frame_fd = cv2.resize(next_frame, (w, h))
        # Change data layout from HWC to CHW
        in_frame_fd = in_frame_fd.transpose((2, 0, 1))
        in_frame_fd = in_frame_fd.reshape((n, c, h, w))

        # Start asynchronous inference for specified request.
        inf_start = time.time()
        infer_network.exec_net(0, in_frame_fd)
        # Wait for the result
        infer_network.wait(0)
        det_time = time.time() - inf_start
        # Results of the output layer of the network
        res = infer_network.get_output(0)
        # Parse SSD output
        ssd_out(res, args, initial_wh, selected_region)

        est = str(render_time * 1000)
        time1 = round(det_time * 1000)
        Worker = INFO.safe
        out_list = [
            str(frame_count), x_min1, y_min1, x_max1, y_max1,
            str(Worker), est,
            str(time1)
        ]
        for i in range(len(out_list)):
            dims += out_list[i] + ' '
        dims += '\n'
        result_file.write(dims)

        render_start = time.time()
        render_end = time.time()
        render_time = render_end - render_start

        frame_count += 1
        if frame_count % 10 == 0:
            progressUpdate(progress_file_path,
                           int(time.time() - infer_time_start), frame_count,
                           video_len)
        frame = next_frame

        if key_pressed == 27:
            print("Attempting to stop background threads")
            break
    progressUpdate(progress_file_path, int(time.time() - infer_time_start),
                   frame_count, video_len)
    if args.output_dir is None:
        cv2.destroyAllWindows()
    else:
        total_time = round(time.time() - infer_time_start, 2)
        stats = {}
        stats["time"] = str(total_time)
        stats["frames"] = str(frame_count)
        stats["fps"] = str(round(frame_count / total_time, 2))
        with open(os.path.join(args.output_dir, str(job_id), 'stats.json'),
                  'w') as json_file:
            json.dump(stats, json_file)

    infer_network.clean()
    cap.release()
    cv2.destroyAllWindows()
コード例 #11
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    ie = IECore()
    #plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        ie.add_extension(args.cpu_extension, "CPU")


#     log.info("Loading plugins for {} device...".format(args.device))
#     plugin.add_cpu_extension(args.cpu_extension)

# Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if args.device == "CPU":
        #supported_layers = plugin.get_supported_layers(net)
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(ie.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(
        net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    #exec_net = plugin.load(network=net, num_requests=2)
    ## Removed spaces ################
    exec_net = ie.load_network(network=net,
                               num_requests=2,
                               device_name=args.device)
    # Read and pre-process input image
    if isinstance(net.inputs[input_blob], list):
        n, c, h, w = net.inputs[input_blob]
    else:
        n, c, h, w = net.inputs[input_blob].shape
    del net

    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]
    out_file_name = os.path.splitext(os.path.basename(args.input))[0]

    if args.output_dir:
        out_path = os.path.join(args.output_dir, job_id,
                                out_file_name + '.mp4')

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cur_request_id = 0
    next_request_id = 1

    #job_id = os.environ['PBS_JOBID']
    #job_id = "12345"
    inf_progress_file_path = os.path.join(args.output_dir, job_id,
                                          'i_progress.txt')
    # Post Progress file update file
    ren_progress_file_path = os.path.join(args.output_dir, job_id,
                                          'post_progress.txt')

    is_async_mode = True
    fps_sum = 0
    frame_count = 0
    read_time = 0
    result_list = []
    try:
        # Set up the capture stream
        input_stream = args.input
        cap = cv2.VideoCapture(input_stream)
        if cap.isOpened():
            video_len = args.count
            log.info("Live stream running at {} fps".format(cv2.CAP_PROP_FPS))
            initial_w = cap.get(3)
            initial_h = cap.get(4)
            cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)

            log.info("Starting inference in async mode...")
            infer_time_start = time.time()
            while frame_count < video_len:
                ret, frame = cap.read()
                if not ret:
                    break

                in_frame = cv2.resize(frame, (w, h))
                in_frame = in_frame.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame = in_frame.reshape((n, c, h, w))
                # Main sync point:
                # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
                # in the regular mode we start the CURRENT request and immediately wait for it's completion
                inf_start = time.time()
                if is_async_mode:
                    exec_net.start_async(request_id=next_request_id,
                                         inputs={input_blob: in_frame})
                else:
                    exec_net.start_async(request_id=cur_request_id,
                                         inputs={input_blob: in_frame})

                read_time_0 = time.time()
                if exec_net.requests[cur_request_id].wait(-1) == 0:
                    inf_end = time.time()
                    det_time = inf_end - inf_start
                    #Parse detection results of the current request
                    res = exec_net.requests[cur_request_id].outputs[out_blob]
                    processBoxes(frame_count, res, labels_map,
                                 args.prob_threshold, frame, result_list,
                                 det_time)
                read_time += time.time() - read_time_0

                frame_count += 1
                #Write data to progress tracker
                if frame_count % 10 == 0:
                    progressUpdate(inf_progress_file_path,
                                   time.time() - infer_time_start, frame_count,
                                   video_len)

                key = cv2.waitKey(1)
                if key == 27:
                    break
                if (9 == key):
                    is_async_mode = not is_async_mode
                    log.info("Switched to {} mode".format(
                        "async" if is_async_mode else "sync"))
                if is_async_mode:
                    cur_request_id, next_request_id = next_request_id, cur_request_id

            ##End while loop /
            cap.release()
            log.info("{} seconds were spent reading".format(read_time))

        if args.output_dir is None:
            cv2.destroyAllWindows()
        else:
            total_time = time.time() - infer_time_start
            stats = {}
            stats["time"] = str(round(total_time, 2))
            stats["fps"] = str(round(frame_count / total_time, 2)),
            stats["frames"] = str(frame_count)
            with open(os.path.join(args.output_dir, job_id, 'stats.json'),
                      'w') as f:
                json.dump(stats, f)

        o_video = os.path.join(args.output_dir, job_id, 'output.mp4')
        post_process_t = time.time()
        postProcess(result_list, int(initial_w), int(initial_h), labels_map,
                    o_video, is_async_mode, ren_progress_file_path)
        log.info("Post processing time: {0} sec".format(time.time() -
                                                        post_process_t))

    finally:
        del exec_net
        del ie
コード例 #12
0
def main():
    """
    Load the network and parse the output.
    :return: None
    """
    global INFO
    global DELAY
    global POSE_CHECKED

    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = args_parser().parse_args()
    logger = log.getLogger()
    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]

    #if args.input == 'cam':
    # input_stream = 0
    #else:
    input_stream = args.input
    assert os.path.isfile(args.input), "Specified input file doesn't exist"

    cap = cv2.VideoCapture(input_stream)
    initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    shopper = cv2.VideoWriter(
        os.path.join(args.output_dir, job_id, "shopper.mp4"),
        cv2.VideoWriter_fourcc(*"AVC1"), fps, (initial_w, initial_h), True)
    frame_count = 0
    progress_file_path = os.path.join(args.output_dir, job_id,
                                      'i_progress.txt')
    infer_time_start = time.time()

    if input_stream:
        cap.open(args.input)
        # Adjust DELAY to match the number of FPS of the video file
        DELAY = 1000 / cap.get(cv2.CAP_PROP_FPS)

    if not cap.isOpened():
        logger.error("ERROR! Unable to open video source")
        return

    # Initialise the class
    infer_network = Network()
    infer_network_pose = Network()
    # Load the network to IE plugin to get shape of input layer

    plugin, (n_fd, c_fd, h_fd,
             w_fd) = infer_network.load_model(args.model, args.device, 1, 1, 0,
                                              args.cpu_extension)
    n_hp, c_hp, h_hp, w_hp = infer_network_pose.load_model(
        args.posemodel, args.device, 1, 3, 0, args.cpu_extension, plugin)[1]

    ret, frame = cap.read()

    while ret:
        looking = 0
        ret, next_frame = cap.read()
        frame_count += 1
        if not ret:
            print("checkpoint *BREAKING")
            break

        if next_frame is None:
            log.error("checkpoint ERROR! blank FRAME grabbed")
            break

        initial_wh = [cap.get(3), cap.get(4)]
        in_frame_fd = cv2.resize(next_frame, (w_fd, h_fd))
        # Change data layout from HWC to CHW
        in_frame_fd = in_frame_fd.transpose((2, 0, 1))
        in_frame_fd = in_frame_fd.reshape((n_fd, c_fd, h_fd, w_fd))

        # Start asynchronous inference for specified request
        inf_start_fd = time.time()
        infer_network.exec_net(0, in_frame_fd)
        # Wait for the result
        infer_network.wait(0)
        det_time_fd = time.time() - inf_start_fd

        # Results of the output layer of the network
        res = infer_network.get_output(0)

        # Parse face detection output
        faces = face_detection(res, args, initial_wh)

        if len(faces) != 0:
            # Look for poses
            for res_hp in faces:
                xmin, ymin, xmax, ymax = res_hp
                head_pose = frame[ymin:ymax, xmin:xmax]
                in_frame_hp = cv2.resize(head_pose, (w_hp, h_hp))
                in_frame_hp = in_frame_hp.transpose((2, 0, 1))
                in_frame_hp = in_frame_hp.reshape((n_hp, c_hp, h_hp, w_hp))

                inf_start_hp = time.time()
                infer_network_pose.exec_net(0, in_frame_hp)
                infer_network_pose.wait(0)
                det_time_hp = time.time() - inf_start_hp

                # Parse head pose detection results
                angle_p_fc = infer_network_pose.get_output(0, "angle_p_fc")
                angle_y_fc = infer_network_pose.get_output(0, "angle_y_fc")
                if ((angle_y_fc > -22.5) & (angle_y_fc < 22.5) &
                    (angle_p_fc > -22.5) & (angle_p_fc < 22.5)):
                    looking += 1
                    POSE_CHECKED = True
                    INFO = INFO._replace(looker=looking)
                else:
                    INFO = INFO._replace(looker=looking)
        else:
            INFO = INFO._replace(looker=0)

        # Draw performance stats
        inf_time_message = "Face Inference time: {:.3f} ms.".format(
            det_time_fd * 1000)

        if POSE_CHECKED:
            cv2.putText(
                frame, "Head pose Inference time: {:.3f} ms.".format(
                    det_time_hp * 1000), (0, 35), cv2.FONT_HERSHEY_SIMPLEX,
                0.5, (255, 255, 255), 1)
        cv2.putText(frame, inf_time_message, (0, 15), cv2.FONT_HERSHEY_COMPLEX,
                    0.5, (255, 255, 255), 1)
        cv2.putText(frame, "Shopper: {}".format(INFO.shopper), (0, 90),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
        cv2.putText(frame, "Looker: {}".format(INFO.looker), (0, 110),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

        shopper.write(frame)
        if (frame_count % 10 == 0) or ((frame_count + 1) / video_len == 1):
            print("Frame Count: ", frame_count, "video length", video_len)
            progressUpdate(progress_file_path,
                           int(time.time() - infer_time_start),
                           frame_count + 1, video_len)
        frame = next_frame
    if args.output_dir:
        total_time = round(time.time() - infer_time_start, 2)
        stats = {}
        stats['time'] = str(total_time)
        stats['frames'] = str(video_len)
        stats['fps'] = str(round(video_len / total_time, 2))
        with open(os.path.join(args.output_dir, job_id, 'stats.json'),
                  'w') as json_file:
            json.dump(stats, json_file)

    infer_network.clean()
    infer_network_pose.clean()
    cap.release()
コード例 #13
0
def main():
    # Plugin initialization for specified device and load extensions library
    global rolling_log
    #defaultTarget = TARGET_DEVICE

    env_parser()
    args_parser()
    check_args()
    parse_conf_file()
    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]

    # if TARGET_DEVICE not in acceptedDevices:
    #     print ("Unsupporterd device " + TARGET_DEVICE + ". Defaulting to CPU")
    #     TARGET_DEVICE = 'CPU'

    print("Initializing plugin for {} device...".format(TARGET_DEVICE))
    #plugin = IEPlugin(device=TARGET_DEVICE)
    ie = IECore()
    if CPU_EXTENSION and 'CPU' == TARGET_DEVICE:
        #plugin.add_cpu_extension(CPU_EXTENSION)
        ie.add_extension(CPU_EXTENSION, "CPU")

    # Read IR
    print("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Load the IR
    print("Loading IR to the plugin...")
    #exec_net = plugin.load(network=net, num_requests=2)
    exec_net = ie.load_network(network=net, num_requests=2, device_name=TARGET_DEVICE)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net

    minFPS = min([i.cap.get(cv2.CAP_PROP_FPS) for i in videoCaps])
    minlength = min([i.cap.get(cv2.CAP_PROP_FRAME_COUNT) for i in videoCaps])
    for vc in videoCaps:
        vc.rate = int(math.ceil(vc.length/minlength))
    print(minFPS)
    waitTime = int(round(1000 / minFPS / len(videoCaps))) # wait time in ms between showing frames
    frames_sum = 0
    for vc in videoCaps:
        vc.init_vw(h, w, minFPS)
        frames_sum += vc.length
    statsWidth = w if w > 345 else 345
    statsHeight = h if h > (len(videoCaps) * 20 + 15) else (len(videoCaps) * 20 + 15)
    statsVideo = cv2.VideoWriter(os.path.join(output_dir,'Statistics.mp4'), cv2.VideoWriter_fourcc(*"AVC1"), minFPS, (statsWidth, statsHeight), True)
    if not statsVideo.isOpened():
        print ("Couldn't open stats video for writing")
        sys.exit(4)

    # Read the labels file
    if labels_file:
        with open(labels_file, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    # Init a rolling log to store events
    rolling_log_size = int((h - 15) / 20)
    rolling_log = collections.deque(maxlen=rolling_log_size)

    # Init inference request IDs
    cur_request_id = 0
    next_request_id = 1
    # Start with async mode enabled
    is_async_mode = True

    if not UI_OUTPUT:
        # Arrange windows so they are not overlapping
        #arrange_windows(w, h)
        print("To stop the execution press Esc button")

    no_more_data = False

    for vc in videoCaps:
        vc.start_time = datetime.datetime.now()
    frame_count = 0
    progress_file_path = os.path.join(output_dir, job_id, 'i_progress.txt')
    infer_start_time = time.time()

    #Start while loop
    while True:
        # If all video captures are closed stop the loop
        if False not in [videoCap.closed for videoCap in videoCaps]:
            print("I broke here line 387")
            break

        no_more_data = False

        # loop over all video captures
        for idx, videoCapInfer in enumerate(videoCaps):
            # read the next frame
            #print("Video {0} has length {1} and fps {2}".format(idx, videoCapInfer.length,  videoCapInfer.fps))
            if not videoCapInfer.closed:
                 #print("ID {0}".format(idx))
                 vfps = int(round(videoCapInfer.cap.get(cv2.CAP_PROP_FPS)))
                 #for i in range(0, int(round(vfps / minFPS))):
                 for i in range(videoCapInfer.rate):
                     frame_count += 1
                     #print("i = {0}".format(i))
                     ret, frame = videoCapInfer.cap.read()
                     videoCapInfer.cur_frame_count += 1
                     # If the read failed close the program
                     if not ret:
                         videoCapInfer.closed = True
                         no_more_data = True
                         break


                 if videoCapInfer.closed:
                     print("Video {0} is done".format(idx))
                     print("Video has  {0} frames ".format(videoCapInfer.length))
                     break

                 # Copy the current frame for later use
                 videoCapInfer.cur_frame = frame.copy()
                 videoCapInfer.initial_w = videoCapInfer.cap.get(3)
                 videoCapInfer.initial_h = videoCapInfer.cap.get(4)
                 # Resize and change the data layout so it is compatible
                 in_frame = cv2.resize(videoCapInfer.cur_frame, (w, h))
                 in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
                 in_frame = in_frame.reshape((n, c, h, w))

                 infer_start = datetime.datetime.now()
                 if is_async_mode:
                     exec_net.start_async(request_id=next_request_id, inputs={input_blob: in_frame})
                     # Async enabled and only one video capture
                     if(len(videoCaps) == 1):
                         videoCapResult = videoCapInfer
                     # Async enabled and more than one video capture
                     else:
                         # Get previous index
                         videoCapResult = videoCaps[idx - 1 if idx - 1 >= 0 else len(videoCaps) - 1]
                 else:
                     # Async disabled
                     exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
                     videoCapResult = videoCapInfer

                 if exec_net.requests[cur_request_id].wait(-1) == 0:
                     infer_end = datetime.datetime.now()
                     infer_duration = infer_end - infer_start
                     current_count = 0
                     # Parse detection results of the current request
                     res = exec_net.requests[cur_request_id].outputs[out_blob]
                     for obj in res[0][0]:
                         class_id = int(obj[1])
                         # Draw only objects when probability more than specified threshold
                         if (obj[2] > PROB_THRESHOLD and
                             videoCapResult.req_label in labels_map and
                             labels_map.index(videoCapResult.req_label) == class_id - 1):
                             current_count += 1
                             xmin = int(obj[3] * videoCapResult.initial_w)
                             ymin = int(obj[4] * videoCapResult.initial_h)
                             xmax = int(obj[5] * videoCapResult.initial_w)
                             ymax = int(obj[6] * videoCapResult.initial_h)
                             # Draw box
                             cv2.rectangle(videoCapResult.cur_frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 4, 16)

                     if videoCapResult.candidate_count is current_count:
                         videoCapResult.candidate_confidence += 1
                     else:
                         videoCapResult.candidate_confidence = 0
                         videoCapResult.candidate_count = current_count

                     if videoCapResult.candidate_confidence is FRAME_THRESHOLD:
                         videoCapResult.candidate_confidence = 0
                         if current_count > videoCapResult.last_correct_count:
                             videoCapResult.total_count += current_count - videoCapResult.last_correct_count

                         if current_count is not videoCapResult.last_correct_count:
                             if UI_OUTPUT:
                                 currtime = datetime.datetime.now().strftime("%H:%M:%S")
                                 fr = FrameInfo(videoCapResult.frames, current_count, currtime)
                                 videoCapResult.countAtFrame.append(fr)
                             new_objects = current_count - videoCapResult.last_correct_count
                             for _ in range(new_objects):
                                 string = "{} - {} detected on {}".format(time.strftime("%H:%M:%S"), videoCapResult.req_label, videoCapResult.cap_name)
                                 rolling_log.append(string)

                         videoCapResult.frames+=1
                         videoCapResult.last_correct_count = current_count
                     else:
                         videoCapResult.frames+=1

                     videoCapResult.cur_frame = cv2.resize(videoCapResult.cur_frame, (w, h))

                     if not UI_OUTPUT:
                         # Add log text to each frame
                         log_message = "Async mode is on." if is_async_mode else \
                                       "Async mode is off."
                         cv2.putText(videoCapResult.cur_frame, log_message, (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
                         log_message = "Total {} count: {}".format(videoCapResult.req_label, videoCapResult.total_count)
                         cv2.putText(videoCapResult.cur_frame, log_message, (10, h - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
                         log_message = "Current {} count: {}".format(videoCapResult.req_label, videoCapResult.last_correct_count)
                         cv2.putText(videoCapResult.cur_frame, log_message, (10, h - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
                         cv2.putText(videoCapResult.cur_frame, 'Infer wait: %0.3fs' % (infer_duration.total_seconds()), (10, h - 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

                         # Display inferred frame and stats
                         stats = numpy.zeros((statsHeight, statsWidth, 1), dtype = 'uint8')
                         for i, log in enumerate(rolling_log):
                             cv2.putText(stats, log, (10, i * 20 + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
                         #cv2.imshow(STATS_WINDOW_NAME, stats)
                         if idx == 0:
                             stats = cv2.cvtColor(stats, cv2.COLOR_GRAY2BGR)
                             #Write
                             statsVideo.write(stats)
                         end_time = datetime.datetime.now()
                         cv2.putText(videoCapResult.cur_frame, 'FPS: %0.2fs' % (1 / (end_time - videoCapResult.start_time).total_seconds()), (10, h - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
                         #cv2.imshow(videoCapResult.cap_name, videoCapResult.cur_frame)
                         videoCapResult.start_time = datetime.datetime.now()
                         #Write
                         videoCapResult.video.write(videoCapResult.cur_frame)
                      
            if frame_count%10 == 0: 
                progressUpdate(progress_file_path, time.time()-infer_start_time, frame_count, frames_sum) 


            # Wait if necessary for the required time
            #key = cv2.waitKey(waitTime)
            key = cv2.waitKey(1)
            
            # Esc key pressed
            if key == 27:
                cv2.destroyAllWindows()
                del exec_net
                #del plugin
                del ie
                print("Finished")
                return
            # Tab key pressed
            if key == 9:
                is_async_mode = not is_async_mode
                print("Switched to {} mode".format("async" if is_async_mode else "sync"))

            if is_async_mode:
                # Swap infer request IDs
                cur_request_id, next_request_id = next_request_id, cur_request_id

            # Loop video if LOOP_VIDEO = True and input isn't live from USB camera
            if LOOP_VIDEO and not videoCapInfer.is_cam:
                vfps = int(round(videoCapInfer.cap.get(cv2.CAP_PROP_FPS)))
                # If a video capture has ended restart it
                if (videoCapInfer.cur_frame_count > videoCapInfer.cap.get(cv2.CAP_PROP_FRAME_COUNT) - int(round(vfps / minFPS))):
                    videoCapInfer.cur_frame_count = 0
                    videoCapInfer.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

        if no_more_data:
            progressUpdate(progress_file_path, time.time()-infer_start_time, frames_sum, frames_sum)
            break
#End of while loop--------------------
    no_more_data = True
    t2 = round(time.time()-infer_start_time,2)
    for videos in videoCaps:
        print(videos.length)
        print(videos.closed)
    print("End loop")
    print("Total time {0}".format(t2))
    print("Total frame count {0}".format(frame_count))
    print("fps {0}".format(frame_count/t2))
    stats = {}
    stats['time'] = str(t2) # Total Time
    stats['frames'] = str(frame_count)
    stats['fps'] = str(round(frame_count / t2,2))
    with open(os.path.join(output_dir, job_id, 'stats.json'), 'w') as json_file:
        json.dump(stats, json_file)

    for vc in videoCaps:
        print("Frames processed {}".format(vc.cur_frame_count))
        print("Frames count {}".format(vc.length))

    for vc in videoCaps:
        vc.video.release()
        vc.cap.release()

        if no_more_data:
            break
コード例 #14
0
def intruder_detector():
    """
    Process the input source frame by frame and detects intruder, if any.

    :return status: 0 on success, negative value on failure
    """
    global CONF_CANDIDATE_CONFIDENCE
    global LOG_WIN_HEIGHT
    global LOG_WIN_WIDTH
    global CONF_FILE
    global video_caps
    global conf_labels_file_path

    parse_args()
    if not os.path.isfile(CONF_FILE):
        return -12, ""

    if not os.path.isfile(conf_labels_file_path):
        return -13, ""

    # Creates subdirectory to save output snapshots
    pathlib.Path(os.getcwd() + '/output/').mkdir(parents=True, exist_ok=True)

    # Read the configuration file
    ret, req_labels = get_input()
    if ret != 0:
        return ret, req_labels[0]

    if not video_caps:
        return -14, ''

    # Get the labels that are used in the application
    ret, label_names, used_labels = get_used_labels(req_labels)
    if ret != 0:
        return ret, ''
    if True not in used_labels:
        return -15, ''

    # Init a rolling log to store events
    rolling_log_size = int((LOG_WIN_HEIGHT - 15) / 20)
    log_list = collections.deque(maxlen=rolling_log_size)

    # Open a file for intruder logs
    log_file = open(LOG_FILE_PATH, 'w')
    if not log_file:
        return -16, ''

    # Initializing VideoWriter for each source
    for video_cap in video_caps:

        ret, ret_value = video_cap.init_vw(int(video_cap.input_height),
                                           int(video_cap.input_width))
        if ret != 0:
            return ret, ret_value
    # Initialise the class
    infer_network = Network()
    # Load the network to IE plugin to get shape of input layer
    n, c, h, w = infer_network.load_model(model_xml, TARGET_DEVICE, 1, 1, 0,
                                          CPU_EXTENSION)

    min_fps = min([i.vc.get(cv2.CAP_PROP_FPS) for i in video_caps])
    no_more_data = [False] * len(video_caps)
    start_time = time.time()
    inf_time = 0
    fourcc = cv2.VideoWriter_fourcc(*'avc1')
    job_id = os.environ['PBS_JOBID']
    job_id = job_id.rstrip().split('.')[0]
    statsVideo = cv2.VideoWriter(
        os.path.join(output_dir, str(job_id), 'Statistics.mp4'), fourcc,
        min_fps, (LOG_WIN_WIDTH, LOG_WIN_HEIGHT), True)
    progress_file_path = os.path.join(output_dir, str(job_id),
                                      'i_progress.txt')
    infer_start_time = time.time()
    # Main loop starts here. Loop over all the video captures
    while True:
        for idx, video_cap in enumerate(video_caps):
            # Get a new frame
            vfps = int(round(video_cap.vc.get(cv2.CAP_PROP_FPS)))
            for i in range(0, int(round(vfps / min_fps))):
                ret, video_cap.frame = video_cap.vc.read()
                video_cap.loop_frames += 1
                # If no new frame or error in reading a frame, exit the loop
                if not ret:
                    no_more_data[idx] = True
                    break
            if no_more_data[idx]:
                stream_end_frame = numpy.zeros((int(
                    video_cap.input_height), int(video_cap.input_width), 1),
                                               dtype='uint8')
                stream_end_message = "Stream from {} has ended.".format(
                    video_cap.cam_name)
                cv2.putText(stream_end_frame, stream_end_message,
                            (int(video_cap.input_width / 2) - 30,
                             int(video_cap.input_height / 2) - 30),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
                continue
            for i in range(video_cap.no_of_labels):
                video_cap.current_count[i] = 0
                video_cap.changed_count[i] = False

            # Resize to expected size (in model .xml file)
            # Input frame is resized to infer resolution
            in_frame = cv2.resize(video_cap.frame, (w, h))

            # PRE-PROCESS STAGE:
            # Convert image to format expected by inference engine
            # IE expects planar, convert from packed
            # Change data layout from HWC to CHW
            in_frame = in_frame.transpose((2, 0, 1))
            in_frame = in_frame.reshape((n, c, h, w))
            # Start asynchronous inference for specified request.
            inf_start = time.time()
            infer_network.exec_net(0, in_frame)
            # Wait for the result
            if infer_network.wait(0) == 0:
                inf_time = time.time() - inf_start
                # Results of the output layer of the network
                res = infer_network.get_output(0)
                for obj in res[0][0]:
                    label = int(obj[1]) - 1
                    # Draw the bounding box around the object when the probability is more than specified threshold
                    if obj[2] > CONF_THRESHOLD_VALUE and used_labels[label]:
                        video_cap.current_count[label] += 1
                        xmin = int(obj[3] * video_cap.input_width)
                        ymin = int(obj[4] * video_cap.input_height)
                        xmax = int(obj[5] * video_cap.input_width)
                        ymax = int(obj[6] * video_cap.input_height)
                        # Draw bounding box around the intruder detected
                        cv2.rectangle(video_cap.frame, (xmin, ymin),
                                      (xmax, ymax), (0, 255, 0), 4, 16)

                for i in range(video_cap.no_of_labels):
                    if video_cap.candidate_count[i] == video_cap.current_count[
                            i]:
                        video_cap.candidate_confidence[i] += 1
                    else:
                        video_cap.candidate_confidence[i] = 0
                        video_cap.candidate_count[i] = video_cap.current_count[
                            i]

                    if video_cap.candidate_confidence[
                            i] == CONF_CANDIDATE_CONFIDENCE:
                        video_cap.candidate_confidence[i] = 0
                        video_cap.changed_count[i] = True
                    else:
                        continue

                    if video_cap.current_count[
                            i] > video_cap.last_correct_count[i]:
                        video_cap.total_count[i] += video_cap.current_count[
                            i] - video_cap.last_correct_count[i]
                        det_objs = video_cap.current_count[
                            i] - video_cap.last_correct_count[i]
                        total_count = sum(video_cap.total_count)
                        for det_obj in range(det_objs):
                            current_time = time.strftime("%H:%M:%S")
                            log = "{} - Intruder {} detected on {}".format(
                                current_time, label_names[i],
                                video_cap.cam_name)
                            print(log)
                            log_list.append(log)
                            log_file.write(log + "\n")
                            event = Event(event_time=current_time,
                                          intruder=label_names[i],
                                          count=total_count,
                                          frame=video_cap.frame_count)
                            video_cap.events.append(event)

                        snapshot_name = "output/intruder_{}.png".format(
                            total_count)
                        cv2.imwrite(snapshot_name, video_cap.frame)
                    video_cap.last_correct_count[i] = video_cap.current_count[
                        i]
            # Create intruder log window, add logs to the frame and display it
            log_window = numpy.zeros((LOG_WIN_HEIGHT, LOG_WIN_WIDTH, 1),
                                     dtype='uint8')
            for i, log in enumerate(log_list):
                cv2.putText(log_window, log, (10, 20 * i + 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            log_window = cv2.cvtColor(log_window, cv2.COLOR_GRAY2BGR)
            statsVideo.write(log_window)
            video_cap.frame_count += 1

            # Video output
            inf_time_message = "Inference time: {:.3f} ms".format(inf_time *
                                                                  1000)
            cv2.putText(video_cap.frame, inf_time_message,
                        (10, int(video_cap.input_height) - 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            fps_time = time.time() - start_time
            fps_message = "FPS: {:.3f} fps".format(1 / fps_time)
            cv2.putText(video_cap.frame, fps_message,
                        (10, int(video_cap.input_height) - 10),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)

            # Display the video output
            video_cap.vw.write(video_cap.frame)
            if video_cap.frame_count % 10 == 0:
                progressUpdate(progress_file_path,
                               time.time() - infer_start_time,
                               video_cap.frame_count,
                               int(video_cap.vc.get(cv2.CAP_PROP_FRAME_COUNT)))
            start_time = time.time()

            # Loop video to mimic continuous input if LOOP_VIDEO flag is True
            if LOOP_VIDEO and not video_cap.is_cam:
                vfps = int(round(video_cap.vc.get(cv2.CAP_PROP_FPS)))
                # If a video capture has ended restart it
                if video_cap.loop_frames > video_cap.vc.get(
                        cv2.CAP_PROP_FRAME_COUNT) - int(round(vfps / min_fps)):
                    video_cap.loop_frames = 0
                    video_cap.vc.set(cv2.CAP_PROP_POS_FRAMES, 0)

        if False not in no_more_data:
            progressUpdate(progress_file_path,
                           time.time() - infer_start_time,
                           int(video_cap.vc.get(cv2.CAP_PROP_FRAME_COUNT)),
                           int(video_cap.vc.get(cv2.CAP_PROP_FRAME_COUNT)))
            break

    no_more_data = False
    t2 = time.time() - infer_start_time
    for videos in video_caps:
        stats = {}
        stats['time'] = str(round(t2, 2))
        stats['fps'] = str(round(videos.frame_count / t2, 2))
        stats['frames'] = str(videos.frame_count)

    with open(os.path.join(output_dir, str(job_id), 'stats.json'), 'w') as f:
        json.dump(stats, f)

    infer_network.clean()
    log_file.close()
    return 0, ''