Пример #1
0
def run_seg_traced_webcam_demo():
    """

    :return:
    :rtype:"""

    import torch
    import io

    load_path = (PROJECT_APP_PATH.user_data / "penn_fudan_segmentation" /
                 "seg_skip_fis").with_suffix(".traced")
    # print(load_path)
    # torch.jit.load(str(load_path))

    with open(str(load_path),
              "rb") as f:  # Load ScriptModule from io.BytesIO object
        buffer = io.BytesIO(f.read())

    model = torch.jit.load(buffer)  # Load all tensors to the original device

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    from matplotlib.pyplot import imshow

    with TorchDeviceSession(device=global_torch_device("cpu"), model=model):
        with TorchEvalSession(model):
            for image in tqdm(frame_generator(cv2.VideoCapture(0))):
                result = model(
                    transform(image).unsqueeze(0).to(global_torch_device()))[0]

                imshow(result[0][0].numpy(), vmin=0.0, vmax=1.0)
                show()
Пример #2
0
    def main(model_name: str = "maskrcnn_pennfudanped", score_threshold=0.55):
        base_path = PROJECT_APP_PATH.user_data / 'maskrcnn'
        dataset_root = Path.home() / "Data"

        torch_seed(3825)

        dataset = PennFudanDataset(dataset_root / "PennFudanPed",
                                   Split.Training)
        categories = dataset.categories

        if True:
            model = load_model(model_name=model_name,
                               model_directory=base_path / 'models')
        else:
            model = get_pretrained_instance_segmentation_maskrcnn(
                dataset.response_channels)

        model.to(global_torch_device())
        cpu_device = torch.device("cpu")

        with torch.no_grad():
            with TorchEvalSession(model):
                for image in tqdm(
                        to_tensor_generator(
                            frame_generator(cv2.VideoCapture(0)),
                            device=global_torch_device(),
                        )):
                    prediction = model(
                        # torch_vision_normalize_batch_nchw(
                        uint_hwc_to_chw_float_tensor(image).unsqueeze(0)
                        #    )
                    )[0]

                    (boxes, labels, scores) = (
                        prediction["boxes"].to(cpu_device).numpy(),
                        prediction["labels"].to(cpu_device).numpy(),
                        torch.sigmoid(
                            prediction["scores"]).to(cpu_device).numpy(),
                    )

                    indices = scores > score_threshold

                    cv2.namedWindow(model_name, cv2.WINDOW_NORMAL)
                    cv2.imshow(
                        model_name,
                        draw_bounding_boxes(
                            quick_to_pil_image(image),
                            boxes[indices],
                            labels=labels[indices],
                            scores=scores[indices],
                            categories=categories,
                        ))

                    if cv2.waitKey(1) == 27:
                        break  # esc to quit
Пример #3
0
    def asdasf():
        """ """
        from draugr.opencv_utilities import frame_generator, AsyncVideoStream
        from draugr.tqdm_utilities import progress_bar

        with AsyncVideoStream() as vc:
            with OpencvImageStream() as s:
                for i in progress_bar(
                    frame_generator(vc, coder=None), auto_total_generator=False
                ):
                    s.draw(i)
Пример #4
0
    def asdasf():
        """ """
        import cv2
        from draugr.opencv_utilities import frame_generator
        from draugr.tqdm_utilities import progress_bar
        from functools import partial
        from draugr.opencv_utilities import AsyncVideoStream

        with AsyncVideoStream() as vc:
            coder = partial(cv2.cvtColor, code=cv2.COLOR_BGR2RGB)
            with ImageStreamPlot(coder(next(vc))) as s:
                for i in progress_bar(frame_generator(vc, coder=coder),
                                      auto_total_generator=False):
                    s.draw(i)
Пример #5
0
def run_seg_traced_webcam_demo():
    """

:param categories:
:type categories:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param score_threshold:
:type score_threshold:
:param window_name:
:type window_name:
:return:
:rtype:
"""

    import torch
    import io

    torch.jit.load("seg_skip_fis.traced")

    with open(
        "seg_skip_fis.traced", "rb"
    ) as f:  # Load ScriptModule from io.BytesIO object
        buffer = io.BytesIO(f.read())

    model = torch.jit.load(buffer)  # Load all tensors to the original device

    transform = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]
    )

    with TorchDeviceSession(
        device=global_torch_device(cuda_if_available=False), model=model
    ):
        with TorchEvalSession(model):
            for image in tqdm(frame_generator(cv2.VideoCapture(0))):
                result = model(transform(image).unsqueeze(0).to(global_torch_device()))[
                    0
                ]
                print(result)
                from matplotlib.pyplot import imshow

                imshow(result[0][0].numpy(), vmin=0.0, vmax=1.0)
                show()
Пример #6
0
    def asd2():
        """ """
        import cv2
        import torch
        from PIL import Image
        from tqdm import tqdm

        from draugr.opencv_utilities import frame_generator
        from draugr.torch_utilities import global_torch_device, to_tensor_generator

        with torch.no_grad():
            for image in tqdm(
                to_tensor_generator(
                    frame_generator(cv2.VideoCapture(0)),
                    device=global_torch_device(),
                )
            ):
                cv2.namedWindow("window_name", cv2.WINDOW_NORMAL)
                cv2.imshow("window_name", numpy.array(quick_to_pil_image(image)))

                if cv2.waitKey(1) == 27:
                    break  # esc to quit
Пример #7
0
def run_webcam_demo(
    cfg: NOD,
    input_cfg: NOD,
    categories: List,
    model_ckpt: Path,
    score_threshold: float = 0.7,
    window_name: str = "SSD",
):
    """

:param categories:
:type categories:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param score_threshold:
:type score_threshold:
:param window_name:
:type window_name:
:return:
:rtype:
"""

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(input_cfg.image_size,
                              input_cfg.pixel_mean,
                              split=Split.Testing)
    model = SingleShotDectectionNms(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
    print(
        f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    with TorchEvalSession(model):
        for image in tqdm(frame_generator(cv2.VideoCapture(0))):
            result = model(
                transforms(image)[0].unsqueeze(0).to(global_torch_device()))
            height, width, *_ = image.shape

            result.boxes[:, 0::2] *= width / result.img_width.cpu().item()
            result.boxes[:, 1::2] *= height / result.img_height.cpu().item()
            (boxes, labels, scores) = (
                result.boxes.to(cpu_device).numpy(),
                result.labels.to(cpu_device).numpy(),
                result.scores.to(cpu_device).numpy(),
            )

            indices = scores > score_threshold

            cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
            cv2.imshow(
                window_name,
                draw_bounding_boxes(
                    image,
                    boxes[indices],
                    labels=labels[indices],
                    scores=scores[indices],
                    categories=categories,
                    score_font=ImageFont.truetype(
                        PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                        24,
                    ),
                ).astype(numpy.uint8),
            )
            if cv2.waitKey(1) == 27:
                break  # esc to quit
Пример #8
0
def export_detection_model(
    cfg: NOD,
    model_ckpt: Path,
    model_export_path: Path = Path("torch_model"),
    verbose: bool = True,
    onnx_export: bool = False,
    strict_jit: bool = False,
    ) -> None:
  """

:param verbose:
:type verbose:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param model_export_path:
:type model_export_path:
:return:
:rtype:
"""
  model = SingleShotDectectionNms(cfg)

  checkpointer = CheckPointer(
      model, save_dir=ensure_existence(PROJECT_APP_PATH.user_data / "results")
      )
  checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
  print(
      f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
      )

  model.post_init()
  model.to(global_torch_device())

  transforms = SSDTransform(
      cfg.input.image_size, cfg.input.pixel_mean, split=Split.Testing
      )
  model.eval()

  pre_quantize_model = False
  if pre_quantize_model:  # Accuracy may drop!
    if True:
      model = quantization.quantize_dynamic(model, dtype=torch.qint8)
    else:
      pass
      # model = quantization.quantize(model)

  frame_g = frame_generator(cv2.VideoCapture(0))
  for image in tqdm(frame_g):
    example_input = (transforms(image)[0].unsqueeze(0).to(global_torch_device()),)
    try:
      traced_script_module = torch.jit.script(
          model,
          # example_input,
          )
      exp_path = model_export_path.with_suffix(".compiled")
      traced_script_module.save(str(exp_path))
      print(f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}")
      sprint(
          f"Successfully exported JIT Traced model at {exp_path}", color="green"
          )
    except Exception as e_i:
      sprint(f"Torch JIT Trace export does not work!, {e_i}", color="red")

    break
Пример #9
0
def export_detection_model(
    cfg: NOD,
    model_checkpoint: Path,
    model_export_path: Path = Path("torch_model"),
    verbose: bool = True,
    onnx_export: bool = False,
    strict_jit: bool = False,
) -> None:
    """

    :param verbose:
    :type verbose:
    :param cfg:
    :type cfg:
    :param model_checkpoint:
    :type model_checkpoint:
    :param model_export_path:
    :type model_export_path:
    :return:
    :rtype:"""
    model = SingleShotDetection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_checkpoint, use_latest=model_checkpoint is None)
    print(
        f"Loaded weights from {model_checkpoint if model_checkpoint else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=SplitEnum.testing)
    model.eval()  # Important!

    fuse_quantize_model = False
    if fuse_quantize_model:
        modules_to_fuse = [
            ["conv", "bn", "relu"]
        ]  # Names of modules to fuse, maybe supply directly for architecture class/declaration
        model = torch.quantization.fuse_modules(
            model, modules_to_fuse=modules_to_fuse, inplace=False)

    pre_quantize_model = False
    if pre_quantize_model:  # Accuracy may drop!
        if True:
            model = quantization.quantize_dynamic(model, dtype=torch.qint8)
        else:
            pass
            # model = quantization.quantize(model)

    frame_g = frame_generator(cv2.VideoCapture(0))
    for image in tqdm(frame_g):
        example_input = (transforms(image)[0].unsqueeze(0).to(
            global_torch_device()), )
        try:
            if onnx_export:
                exp_path = model_export_path.with_suffix(".onnx")
                output = onnx.export(
                    model,
                    example_input,
                    str(exp_path),
                    verbose=verbose,
                    # export_params=True,  # store the trained parameter weights inside the model file
                    # opset_version=10,  # the onnx version to export the model to
                    # do_constant_folding=True,  # wether to execute constant folding for optimization
                    # input_names=["input"],  # the model's input names
                    # output_names=["output"],  # the model's output names
                    # dynamic_axes={
                    #  "input": {0: "batch_size"},  # variable lenght axes
                    #  "output": {0: "batch_size"},
                    #  }
                )
                sprint(f"Successfully exported ONNX model at {exp_path}",
                       color="blue")
            else:
                raise Exception("Just trace instead, ignore exception")
        except Exception as e:
            sprint(f"Torch ONNX export does not work, {e}", color="red")
            try:
                traced_script_module = torch.jit.trace(
                    model,
                    example_input,
                    # strict=strict_jit,
                    check_inputs=(
                        transforms(next(frame_g))[0].unsqueeze(0).to(
                            global_torch_device()),
                        transforms(next(frame_g))[0].unsqueeze(0).to(
                            global_torch_device()),
                    ),
                )
                exp_path = model_export_path.with_suffix(".traced")
                traced_script_module.save(str(exp_path))
                print(
                    f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}"
                )
                sprint(
                    f"Successfully exported JIT Traced model at {exp_path}",
                    color="green",
                )
            except Exception as e_i:
                sprint(f"Torch JIT Trace export does not work!, {e_i}",
                       color="red")

        break
    """
Пример #10
0
def run_traced_webcam_demo(
    input_cfg: NOD,
    categories: List,
    score_threshold: float = 0.7,
    window_name: str = "SSD",
    onnx_exported: bool = False,
):
    """

:param onnx_exported:
:type onnx_exported:
:param input_cfg:
:type input_cfg:
:param categories:
:type categories:
:param score_threshold:
:type score_threshold:
:param window_name:
:type window_name:
:return:
:rtype:
"""

    pass
    import torch

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(input_cfg.image_size,
                              input_cfg.pixel_mean,
                              split=Split.Testing)
    model = None

    if onnx_exported:
        import onnx

        onnx_model = onnx.load("torch_model.onnx")
        onnx.checker.check_model(onnx_model)

        import onnxruntime

        ort_session = onnxruntime.InferenceSession("torch_model.onnx")

        def to_numpy(tensor):
            return (tensor.detach().cpu().numpy()
                    if tensor.requires_grad else tensor.cpu().numpy())

        x = None

        # compute onnxruntime output prediction
        ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
        ort_outs = ort_session.run(None, ort_inputs)
    else:
        import torch
        import io

        torch.jit.load("torch_model.traced")

        with open("torch_model.traced",
                  "rb") as f:  # Load ScriptModule from io.BytesIO object
            buffer = io.BytesIO(f.read())

        model = torch.jit.load(
            buffer)  # Load all tensors to the original device
        """

buffer.seek(0)
torch.jit.load(buffer, map_location=torch.device('cpu'))     # Load all tensors onto CPU, using a device


buffer.seek(0)
model = torch.jit.load(buffer, map_location='cpu')     # Load all tensors onto CPU, using a string

# Load with extra files.
extra_files = torch._C.ExtraFilesMap()
extra_files['foo.txt'] = 'bar'
torch.jit.load('torch_model.traced', _extra_files=extra_files)
print(extra_files['foo.txt'])
#exit(0)
"""

    with TorchDeviceSession(
            device=global_torch_device(cuda_if_available=False), model=model):
        with TorchEvalSession(model):
            for image in tqdm(frame_generator(cv2.VideoCapture(0))):
                result = SSDOut(*model(
                    transforms(image)[0].unsqueeze(0).to(
                        global_torch_device())))
                height, width, *_ = image.shape

                result.boxes[:, 0::2] *= width / result.img_width.cpu().item()
                result.boxes[:,
                             1::2] *= height / result.img_height.cpu().item()
                (boxes, labels, scores) = (
                    result.boxes.to(cpu_device).numpy(),
                    result.labels.to(cpu_device).numpy(),
                    result.scores.to(cpu_device).numpy(),
                )

                indices = scores > score_threshold

                cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
                cv2.imshow(
                    window_name,
                    draw_bounding_boxes(
                        image,
                        boxes[indices],
                        labels=labels[indices],
                        scores=scores[indices],
                        categories=categories,
                        score_font=ImageFont.truetype(
                            PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                            24,
                        ),
                    ).astype(numpy.uint8),
                )
                if cv2.waitKey(1) == 27:
                    break  # esc to quit
Пример #11
0
def export_detection_model(
    model_export_path: Path = ensure_existence(
        PROJECT_APP_PATH.user_data / "penn_fudan_segmentation"
    )
    / "seg_skip_fis",
    SEED: int = 87539842,
) -> None:
    """

    :param model_export_path:
    :type model_export_path:
    :return:
    :rtype:"""

    model = OutputActivationModule(
        SkipHourglassFission(input_channels=3, output_heads=(1,), encoding_depth=1)
    )

    with TorchDeviceSession(device=global_torch_device("cpu"), model=model):
        with TorchEvalSession(model):

            seed_stack(SEED)

            # standard PyTorch mean-std input image normalization
            transform = transforms.Compose(
                [
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
                ]
            )

            frame_g = frame_generator(cv2.VideoCapture(0))

            for image in tqdm(frame_g):
                example_input = (
                    transform(image).unsqueeze(0).to(global_torch_device()),
                )

                try:
                    traced_script_module = torch.jit.trace(
                        model,
                        example_input,
                        # strict=strict_jit,
                        check_inputs=(
                            transform(next(frame_g))
                            .unsqueeze(0)
                            .to(global_torch_device()),
                            transform(next(frame_g))
                            .unsqueeze(0)
                            .to(global_torch_device()),
                        ),
                    )
                    exp_path = model_export_path.with_suffix(".traced")
                    traced_script_module.save(str(exp_path))
                    print(
                        f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}"
                    )
                    sprint(
                        f"Successfully exported JIT Traced model at {exp_path}",
                        color="green",
                    )
                except Exception as e_i:
                    sprint(f"Torch JIT Trace export does not work!, {e_i}", color="red")

                break