示例#1
0
def main(args, detector):
    video, video_writer, frame_count = init_video_file_capture(
        args.file, 'detector_demo')

    if not os.path.exists(args.labels[0]):
        labels = args.labels
    else:
        labels = load_labels(args.labels[0])

    frame_num = len(frame_count)
    times = []

    for _ in tqdm(frame_count, desc='Processing frames'):
        frame_present, frame = video.read()
        if not frame_present:
            continue

        start_time = time.time()
        results = detection_network.run(frame)
        elapsed_ms = (time.time() - start_time) * 1000

        detections = decode_yolov3(netout=results, threshold=args.threshold)

        draw_bounding_boxes(frame, detections, labels)

        times.append(elapsed_ms)
        video_writer.write(frame)

    print('Finished processing frames')
    video.release(), video_writer.release()

    print("Average time(ms): ", sum(times) // frame_num)
    print("FPS: ",
          1000.0 / (sum(times) // frame_num))  # FPS = 1 / time to process loop
示例#2
0
    def detect(self, original_image):
        start_time = time.time()
        results = self.run(original_image)
        elapsed_ms = (time.time() - start_time) * 1000

        detections = decode_yolov3(netout = results, threshold = self._threshold)
        draw_bounding_boxes(original_image, detections, self.labels)

        fps  = 1 / elapsed_ms*1000
        print("Estimated frames per second : {0:.2f} Inference time: {1:.2f}".format(fps, elapsed_ms))

        return cv2.imencode('.jpg', original_image)[1].tobytes()
示例#3
0
def main(args):
    video, video_writer, frame_count = init_video_file_capture(
        args.video_file_path, args.output_video_file_path)

    executor = ArmnnNetworkExecutor(args.model_file_path,
                                    args.preferred_backends)
    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.input_binding_info)
    labels = dict_labels(args.label_path, include_rgb=True)

    for _ in tqdm(frame_count, desc='Processing frames'):
        frame_present, frame = video.read()
        if not frame_present:
            continue
        input_tensors = preprocess(frame, executor.input_binding_info)
        output_result = executor.run(input_tensors)
        detections = process_output(output_result)
        draw_bounding_boxes(frame, detections, resize_factor, labels)
        video_writer.write(frame)
    print('Finished processing frames')
    video.release(), video_writer.release()
示例#4
0
def main(args):
    video = init_video_stream_capture(args.video_source)
    executor = ArmnnNetworkExecutor(args.model_file_path,
                                    args.preferred_backends)

    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.input_binding_info)
    labels = dict_labels(args.label_path, include_rgb=True)

    while True:
        frame_present, frame = video.read()
        frame = cv2.flip(frame, 1)  # Horizontally flip the frame
        if not frame_present:
            raise RuntimeError('Error reading frame from video stream')
        input_tensors = preprocess(frame, executor.input_binding_info)
        print("Running inference...")
        output_result = executor.run(input_tensors)
        detections = process_output(output_result)
        draw_bounding_boxes(frame, detections, resize_factor, labels)
        cv2.imshow('PyArmNN Object Detection Demo', frame)
        if cv2.waitKey(1) == 27:
            print('\nExit key activated. Closing video...')
            break
    video.release(), cv2.destroyAllWindows()
示例#5
0
def main(args):
    enable_profile = args.profiling_enabled == "true"
    action_profiler = Profiling(enable_profile)
    overall_profiler = Profiling(enable_profile)
    overall_profiler.profiling_start()
    action_profiler.profiling_start()

    if args.tflite_delegate_path is not None:
        from network_executor_tflite import TFLiteNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends,
                           args.tflite_delegate_path)
    else:
        from network_executor import ArmnnNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends)

    executor = NetworkExecutor(*exec_input_args)
    action_profiler.profiling_stop_and_print_us("Executor initialization")

    action_profiler.profiling_start()
    video, video_writer, frame_count = init_video_file_capture(
        args.video_file_path, args.output_video_file_path)
    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.get_shape())
    action_profiler.profiling_stop_and_print_us("Video initialization")

    labels = dict_labels(args.label_path, include_rgb=True)

    if all(element is not None for element in [
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, args.style_image_path,
            args.style_transfer_class
    ]):
        style_image = cv2.imread(args.style_image_path)
        action_profiler.profiling_start()
        style_transfer_executor = style_transfer.StyleTransfer(
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, style_image,
            args.preferred_backends, args.tflite_delegate_path)
        action_profiler.profiling_stop_and_print_us(
            "Style Transfer Executor initialization")

    for _ in tqdm(frame_count, desc='Processing frames'):
        frame_present, frame = video.read()
        if not frame_present:
            continue
        model_name = args.model_name
        if model_name == "ssd_mobilenet_v1":
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), True)
        else:
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), False)

        action_profiler.profiling_start()
        output_result = executor.run([input_data])
        action_profiler.profiling_stop_and_print_us("Running inference")

        detections = process_output(output_result)

        if all(element is not None for element in [
                args.style_predict_model_file_path,
                args.style_transfer_model_file_path, args.style_image_path,
                args.style_transfer_class
        ]):
            action_profiler.profiling_start()
            frame = style_transfer.create_stylized_detection(
                style_transfer_executor, args.style_transfer_class, frame,
                detections, resize_factor, labels)
            action_profiler.profiling_stop_and_print_us(
                "Running Style Transfer")
        else:
            draw_bounding_boxes(frame, detections, resize_factor, labels)

        video_writer.write(frame)
    print('Finished processing frames')
    overall_profiler.profiling_stop_and_print_us("Total compute time")
    video.release(), video_writer.release()
示例#6
0
def main(args):

    enable_profile = args.profiling_enabled == "true"
    action_profiler = Profiling(enable_profile)
    action_profiler.profiling_start()

    if args.tflite_delegate_path is not None:
        from network_executor_tflite import TFLiteNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends,
                           args.tflite_delegate_path)
    else:
        from network_executor import ArmnnNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends)

    executor = NetworkExecutor(*exec_input_args)
    action_profiler.profiling_stop_and_print_us("Executor initialization")

    action_profiler.profiling_start()
    video = init_video_stream_capture(args.video_source)
    action_profiler.profiling_stop_and_print_us("Video initialization")
    model_name = args.model_name
    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.get_shape())
    labels = dict_labels(args.label_path, include_rgb=True)

    if all(element is not None for element in [
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, args.style_image_path,
            args.style_transfer_class
    ]):
        style_image = cv2.imread(args.style_image_path)
        action_profiler.profiling_start()
        style_transfer_executor = style_transfer.StyleTransfer(
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, style_image,
            args.preferred_backends, args.tflite_delegate_path)
        action_profiler.profiling_stop_and_print_us(
            "Style Transfer Executor initialization")

    while True:
        frame_present, frame = video.read()
        frame = cv2.flip(frame, 1)  # Horizontally flip the frame
        if not frame_present:
            raise RuntimeError('Error reading frame from video stream')

        action_profiler.profiling_start()
        if model_name == "ssd_mobilenet_v1":
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), True)
        else:
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), False)

        output_result = executor.run([input_data])
        if not enable_profile:
            print("Running inference...")
        action_profiler.profiling_stop_and_print_us("Running inference...")
        detections = process_output(output_result)
        if all(element is not None for element in [
                args.style_predict_model_file_path,
                args.style_transfer_model_file_path, args.style_image_path,
                args.style_transfer_class
        ]):
            action_profiler.profiling_start()
            frame = style_transfer.create_stylized_detection(
                style_transfer_executor, args.style_transfer_class, frame,
                detections, resize_factor, labels)
            action_profiler.profiling_stop_and_print_us(
                "Running Style Transfer")
        else:
            draw_bounding_boxes(frame, detections, resize_factor, labels)
        cv2.imshow('PyArmNN Object Detection Demo', frame)
        if cv2.waitKey(1) == 27:
            print('\nExit key activated. Closing video...')
            break
    video.release(), cv2.destroyAllWindows()