Esempio n. 1
0
 def run(self, image):
     if image.shape[1:2] != (self.input_height, self.input_width):
         img = cv2.resize(image, (self.input_width, self.input_height))
     img = preprocess(img)
     self.interpreter.set_tensor(self.tensor_index, img)
     self.interpreter.invoke()
     return self.get_output_tensors()
Esempio n. 2
0
def main(args):
    video, video_writer, frame_count = init_video_file_capture(
        args.video_file_path, args.output_video_file_path)

    executor = ArmnnNetworkExecutor(args.model_file_path,
                                    args.preferred_backends)
    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.input_binding_info)
    labels = dict_labels(args.label_path, include_rgb=True)

    for _ in tqdm(frame_count, desc='Processing frames'):
        frame_present, frame = video.read()
        if not frame_present:
            continue
        input_tensors = preprocess(frame, executor.input_binding_info)
        output_result = executor.run(input_tensors)
        detections = process_output(output_result)
        draw_bounding_boxes(frame, detections, resize_factor, labels)
        video_writer.write(frame)
    print('Finished processing frames')
    video.release(), video_writer.release()
Esempio n. 3
0
    def run_style_predict(self, style_image):
        """
            Creates bottleneck tensor for a given style image.

            Args:
                style_image: an image to create the style bottleneck

            Returns:
                style bottleneck tensor
        """
        # The style image has to be preprocessed to (1, 256, 256, 3)
        preprocessed_style_image = cv_utils.preprocess(
            style_image,
            self.style_predict_executor.get_data_type(),
            self.style_predict_executor.get_shape(),
            True,
            keep_aspect_ratio=False)
        # output[0] is the style bottleneck tensor
        style_bottleneck = self.style_predict_executor.run(
            [preprocessed_style_image])[0]

        return style_bottleneck
Esempio n. 4
0
    def run_style_transfer(self, content_image):
        """
            Runs inference for given content_image and style bottleneck to create a stylized image.

            Args:
                content_image:a content image to stylize
        """
        # The content image has to be preprocessed to (1, 384, 384, 3)
        preprocessed_style_image = cv_utils.preprocess(
            content_image,
            np.float32,
            self.style_transfer_executor.get_shape(),
            True,
            keep_aspect_ratio=False)

        # Transform content image. output[0] is the stylized image
        stylized_image = self.style_transfer_executor.run(
            [preprocessed_style_image, self.style_bottleneck])[0]

        post_stylized_image = style_transfer_postprocess(
            stylized_image, content_image.shape)

        return post_stylized_image
Esempio n. 5
0
def main(args):
    video = init_video_stream_capture(args.video_source)
    executor = ArmnnNetworkExecutor(args.model_file_path,
                                    args.preferred_backends)

    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.input_binding_info)
    labels = dict_labels(args.label_path, include_rgb=True)

    while True:
        frame_present, frame = video.read()
        frame = cv2.flip(frame, 1)  # Horizontally flip the frame
        if not frame_present:
            raise RuntimeError('Error reading frame from video stream')
        input_tensors = preprocess(frame, executor.input_binding_info)
        print("Running inference...")
        output_result = executor.run(input_tensors)
        detections = process_output(output_result)
        draw_bounding_boxes(frame, detections, resize_factor, labels)
        cv2.imshow('PyArmNN Object Detection Demo', frame)
        if cv2.waitKey(1) == 27:
            print('\nExit key activated. Closing video...')
            break
    video.release(), cv2.destroyAllWindows()
Esempio n. 6
0
def main(args):
    enable_profile = args.profiling_enabled == "true"
    action_profiler = Profiling(enable_profile)
    overall_profiler = Profiling(enable_profile)
    overall_profiler.profiling_start()
    action_profiler.profiling_start()

    if args.tflite_delegate_path is not None:
        from network_executor_tflite import TFLiteNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends,
                           args.tflite_delegate_path)
    else:
        from network_executor import ArmnnNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends)

    executor = NetworkExecutor(*exec_input_args)
    action_profiler.profiling_stop_and_print_us("Executor initialization")

    action_profiler.profiling_start()
    video, video_writer, frame_count = init_video_file_capture(
        args.video_file_path, args.output_video_file_path)
    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.get_shape())
    action_profiler.profiling_stop_and_print_us("Video initialization")

    labels = dict_labels(args.label_path, include_rgb=True)

    if all(element is not None for element in [
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, args.style_image_path,
            args.style_transfer_class
    ]):
        style_image = cv2.imread(args.style_image_path)
        action_profiler.profiling_start()
        style_transfer_executor = style_transfer.StyleTransfer(
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, style_image,
            args.preferred_backends, args.tflite_delegate_path)
        action_profiler.profiling_stop_and_print_us(
            "Style Transfer Executor initialization")

    for _ in tqdm(frame_count, desc='Processing frames'):
        frame_present, frame = video.read()
        if not frame_present:
            continue
        model_name = args.model_name
        if model_name == "ssd_mobilenet_v1":
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), True)
        else:
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), False)

        action_profiler.profiling_start()
        output_result = executor.run([input_data])
        action_profiler.profiling_stop_and_print_us("Running inference")

        detections = process_output(output_result)

        if all(element is not None for element in [
                args.style_predict_model_file_path,
                args.style_transfer_model_file_path, args.style_image_path,
                args.style_transfer_class
        ]):
            action_profiler.profiling_start()
            frame = style_transfer.create_stylized_detection(
                style_transfer_executor, args.style_transfer_class, frame,
                detections, resize_factor, labels)
            action_profiler.profiling_stop_and_print_us(
                "Running Style Transfer")
        else:
            draw_bounding_boxes(frame, detections, resize_factor, labels)

        video_writer.write(frame)
    print('Finished processing frames')
    overall_profiler.profiling_stop_and_print_us("Total compute time")
    video.release(), video_writer.release()
Esempio n. 7
0
def main(args):

    enable_profile = args.profiling_enabled == "true"
    action_profiler = Profiling(enable_profile)
    action_profiler.profiling_start()

    if args.tflite_delegate_path is not None:
        from network_executor_tflite import TFLiteNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends,
                           args.tflite_delegate_path)
    else:
        from network_executor import ArmnnNetworkExecutor as NetworkExecutor
        exec_input_args = (args.model_file_path, args.preferred_backends)

    executor = NetworkExecutor(*exec_input_args)
    action_profiler.profiling_stop_and_print_us("Executor initialization")

    action_profiler.profiling_start()
    video = init_video_stream_capture(args.video_source)
    action_profiler.profiling_stop_and_print_us("Video initialization")
    model_name = args.model_name
    process_output, resize_factor = get_model_processing(
        args.model_name, video, executor.get_shape())
    labels = dict_labels(args.label_path, include_rgb=True)

    if all(element is not None for element in [
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, args.style_image_path,
            args.style_transfer_class
    ]):
        style_image = cv2.imread(args.style_image_path)
        action_profiler.profiling_start()
        style_transfer_executor = style_transfer.StyleTransfer(
            args.style_predict_model_file_path,
            args.style_transfer_model_file_path, style_image,
            args.preferred_backends, args.tflite_delegate_path)
        action_profiler.profiling_stop_and_print_us(
            "Style Transfer Executor initialization")

    while True:
        frame_present, frame = video.read()
        frame = cv2.flip(frame, 1)  # Horizontally flip the frame
        if not frame_present:
            raise RuntimeError('Error reading frame from video stream')

        action_profiler.profiling_start()
        if model_name == "ssd_mobilenet_v1":
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), True)
        else:
            input_data = preprocess(frame, executor.get_data_type(),
                                    executor.get_shape(), False)

        output_result = executor.run([input_data])
        if not enable_profile:
            print("Running inference...")
        action_profiler.profiling_stop_and_print_us("Running inference...")
        detections = process_output(output_result)
        if all(element is not None for element in [
                args.style_predict_model_file_path,
                args.style_transfer_model_file_path, args.style_image_path,
                args.style_transfer_class
        ]):
            action_profiler.profiling_start()
            frame = style_transfer.create_stylized_detection(
                style_transfer_executor, args.style_transfer_class, frame,
                detections, resize_factor, labels)
            action_profiler.profiling_stop_and_print_us(
                "Running Style Transfer")
        else:
            draw_bounding_boxes(frame, detections, resize_factor, labels)
        cv2.imshow('PyArmNN Object Detection Demo', frame)
        if cv2.waitKey(1) == 27:
            print('\nExit key activated. Closing video...')
            break
    video.release(), cv2.destroyAllWindows()