コード例 #1
0
    def __init__(self,
                 *,
                 cfg,
                 dataset_type: callable,
                 data_root: Path,
                 sub_datasets: Tuple,
                 split: Split = Split.Training):
        """

:param data_root:
:type data_root:
:param sub_datasets:
:type sub_datasets:
:param transform:
:type transform:
:param target_transform:
:type target_transform:
:param split:
:type split:
:return:
:rtype:
"""
        assert len(sub_datasets) > 0, "No data found!"

        img_transform = SSDTransform(
            image_size=cfg.input.image_size,
            pixel_mean=cfg.input.pixel_mean,
            split=split,
        )

        if split == Split.Training:
            annotation_transform = SSDAnnotationTransform(
                image_size=cfg.input.image_size,
                priors_cfg=cfg.model.box_head.priors,
                center_variance=cfg.model.box_head.center_variance,
                size_variance=cfg.model.box_head.size_variance,
                iou_threshold=cfg.model.box_head.iou_threshold,
            )
        else:
            annotation_transform = None

        datasets = []

        for dataset_name in sub_datasets:
            datasets.append(
                dataset_type(
                    data_root=data_root,
                    dataset_name=dataset_name,
                    split=split,
                    img_transform=img_transform,
                    annotation_transform=annotation_transform,
                ))

        # for testing, return a list of datasets
        if not split == Split.Training:
            self.sub_datasets = datasets
        else:
            dataset = datasets[0]
            if len(datasets) > 1:
                dataset = ConcatDataset(datasets)

            self.sub_datasets = [dataset]
コード例 #2
0
ファイル: ssd_webcam.py プロジェクト: sintefneodroid/vision
def run_webcam_demo(
    cfg: NOD,
    input_cfg: NOD,
    categories: List,
    model_ckpt: Path,
    score_threshold: float = 0.7,
    window_name: str = "SSD",
):
    """

:param categories:
:type categories:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param score_threshold:
:type score_threshold:
:param window_name:
:type window_name:
:return:
:rtype:
"""

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(input_cfg.image_size,
                              input_cfg.pixel_mean,
                              split=Split.Testing)
    model = SingleShotDectectionNms(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
    print(
        f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    with TorchEvalSession(model):
        for image in tqdm(frame_generator(cv2.VideoCapture(0))):
            result = model(
                transforms(image)[0].unsqueeze(0).to(global_torch_device()))
            height, width, *_ = image.shape

            result.boxes[:, 0::2] *= width / result.img_width.cpu().item()
            result.boxes[:, 1::2] *= height / result.img_height.cpu().item()
            (boxes, labels, scores) = (
                result.boxes.to(cpu_device).numpy(),
                result.labels.to(cpu_device).numpy(),
                result.scores.to(cpu_device).numpy(),
            )

            indices = scores > score_threshold

            cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
            cv2.imshow(
                window_name,
                draw_bounding_boxes(
                    image,
                    boxes[indices],
                    labels=labels[indices],
                    scores=scores[indices],
                    categories=categories,
                    score_font=ImageFont.truetype(
                        PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                        24,
                    ),
                ).astype(numpy.uint8),
            )
            if cv2.waitKey(1) == 27:
                break  # esc to quit
コード例 #3
0
def export_detection_model(
    cfg: NOD,
    model_ckpt: Path,
    model_export_path: Path = Path("torch_model"),
    verbose: bool = True,
    onnx_export: bool = False,
    strict_jit: bool = False,
    ) -> None:
  """

:param verbose:
:type verbose:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param model_export_path:
:type model_export_path:
:return:
:rtype:
"""
  model = SingleShotDectectionNms(cfg)

  checkpointer = CheckPointer(
      model, save_dir=ensure_existence(PROJECT_APP_PATH.user_data / "results")
      )
  checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
  print(
      f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
      )

  model.post_init()
  model.to(global_torch_device())

  transforms = SSDTransform(
      cfg.input.image_size, cfg.input.pixel_mean, split=Split.Testing
      )
  model.eval()

  pre_quantize_model = False
  if pre_quantize_model:  # Accuracy may drop!
    if True:
      model = quantization.quantize_dynamic(model, dtype=torch.qint8)
    else:
      pass
      # model = quantization.quantize(model)

  frame_g = frame_generator(cv2.VideoCapture(0))
  for image in tqdm(frame_g):
    example_input = (transforms(image)[0].unsqueeze(0).to(global_torch_device()),)
    try:
      traced_script_module = torch.jit.script(
          model,
          # example_input,
          )
      exp_path = model_export_path.with_suffix(".compiled")
      traced_script_module.save(str(exp_path))
      print(f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}")
      sprint(
          f"Successfully exported JIT Traced model at {exp_path}", color="green"
          )
    except Exception as e_i:
      sprint(f"Torch JIT Trace export does not work!, {e_i}", color="red")

    break
コード例 #4
0
ファイル: ssd_traced_export.py プロジェクト: aivclab/vision
def export_detection_model(
    cfg: NOD,
    model_checkpoint: Path,
    model_export_path: Path = Path("torch_model"),
    verbose: bool = True,
    onnx_export: bool = False,
    strict_jit: bool = False,
) -> None:
    """

    :param verbose:
    :type verbose:
    :param cfg:
    :type cfg:
    :param model_checkpoint:
    :type model_checkpoint:
    :param model_export_path:
    :type model_export_path:
    :return:
    :rtype:"""
    model = SingleShotDetection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_checkpoint, use_latest=model_checkpoint is None)
    print(
        f"Loaded weights from {model_checkpoint if model_checkpoint else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=SplitEnum.testing)
    model.eval()  # Important!

    fuse_quantize_model = False
    if fuse_quantize_model:
        modules_to_fuse = [
            ["conv", "bn", "relu"]
        ]  # Names of modules to fuse, maybe supply directly for architecture class/declaration
        model = torch.quantization.fuse_modules(
            model, modules_to_fuse=modules_to_fuse, inplace=False)

    pre_quantize_model = False
    if pre_quantize_model:  # Accuracy may drop!
        if True:
            model = quantization.quantize_dynamic(model, dtype=torch.qint8)
        else:
            pass
            # model = quantization.quantize(model)

    frame_g = frame_generator(cv2.VideoCapture(0))
    for image in tqdm(frame_g):
        example_input = (transforms(image)[0].unsqueeze(0).to(
            global_torch_device()), )
        try:
            if onnx_export:
                exp_path = model_export_path.with_suffix(".onnx")
                output = onnx.export(
                    model,
                    example_input,
                    str(exp_path),
                    verbose=verbose,
                    # export_params=True,  # store the trained parameter weights inside the model file
                    # opset_version=10,  # the onnx version to export the model to
                    # do_constant_folding=True,  # wether to execute constant folding for optimization
                    # input_names=["input"],  # the model's input names
                    # output_names=["output"],  # the model's output names
                    # dynamic_axes={
                    #  "input": {0: "batch_size"},  # variable lenght axes
                    #  "output": {0: "batch_size"},
                    #  }
                )
                sprint(f"Successfully exported ONNX model at {exp_path}",
                       color="blue")
            else:
                raise Exception("Just trace instead, ignore exception")
        except Exception as e:
            sprint(f"Torch ONNX export does not work, {e}", color="red")
            try:
                traced_script_module = torch.jit.trace(
                    model,
                    example_input,
                    # strict=strict_jit,
                    check_inputs=(
                        transforms(next(frame_g))[0].unsqueeze(0).to(
                            global_torch_device()),
                        transforms(next(frame_g))[0].unsqueeze(0).to(
                            global_torch_device()),
                    ),
                )
                exp_path = model_export_path.with_suffix(".traced")
                traced_script_module.save(str(exp_path))
                print(
                    f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}"
                )
                sprint(
                    f"Successfully exported JIT Traced model at {exp_path}",
                    color="green",
                )
            except Exception as e_i:
                sprint(f"Torch JIT Trace export does not work!, {e_i}",
                       color="red")

        break
    """
コード例 #5
0
def run_webcam_demo(
    cfg: NOD,
    categories: Sequence[str],
    model_checkpoint: Path,
    score_threshold: float = 0.5,
    window_name: str = "SSD",
):
    """

    :param categories:
    :type categories:
    :param cfg:
    :type cfg:
    :param model_checkpoint:
    :type model_checkpoint:
    :param score_threshold:
    :type score_threshold:
    :param window_name:
    :type window_name:
    :return:
    :rtype:"""

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=SplitEnum.testing)
    model = SingleShotDetection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_checkpoint, use_latest=model_checkpoint is None)
    print(
        f"Loaded weights from {model_checkpoint if model_checkpoint else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    with TorchEvalSession(model):
        for infos in tqdm(DictUnityEnvironment(connect_to_running=True)):
            info = next(iter(infos.values()))
            new_images = extract_all_cameras(info)
            image = next(iter(new_images.values()))[..., :3][..., ::-1]
            image = gamma_correct_float_to_byte(image)
            result = model(
                transforms(image)[0].unsqueeze(0).to(global_torch_device()))[0]
            height, width, *_ = image.shape

            result["boxes"][:, 0::2] *= width / result["img_width"]
            result["boxes"][:, 1::2] *= height / result["img_height"]
            (boxes, labels, scores) = (
                result["boxes"].to(cpu_device).numpy(),
                result["labels"].to(cpu_device).numpy(),
                result["scores"].to(cpu_device).numpy(),
            )

            indices = scores > score_threshold

            if show_image(
                    draw_bounding_boxes(
                        image,
                        boxes[indices],
                        labels=labels[indices],
                        scores=scores[indices],
                        categories=categories,
                    ).astype(numpy.uint8),
                    window_name,
                    wait=1,
            ):
                break  # esc to quit
コード例 #6
0
def run_traced_webcam_demo(
    input_cfg: NOD,
    categories: List,
    score_threshold: float = 0.7,
    window_name: str = "SSD",
    onnx_exported: bool = False,
):
    """

:param onnx_exported:
:type onnx_exported:
:param input_cfg:
:type input_cfg:
:param categories:
:type categories:
:param score_threshold:
:type score_threshold:
:param window_name:
:type window_name:
:return:
:rtype:
"""

    pass
    import torch

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(input_cfg.image_size,
                              input_cfg.pixel_mean,
                              split=Split.Testing)
    model = None

    if onnx_exported:
        import onnx

        onnx_model = onnx.load("torch_model.onnx")
        onnx.checker.check_model(onnx_model)

        import onnxruntime

        ort_session = onnxruntime.InferenceSession("torch_model.onnx")

        def to_numpy(tensor):
            return (tensor.detach().cpu().numpy()
                    if tensor.requires_grad else tensor.cpu().numpy())

        x = None

        # compute onnxruntime output prediction
        ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
        ort_outs = ort_session.run(None, ort_inputs)
    else:
        import torch
        import io

        torch.jit.load("torch_model.traced")

        with open("torch_model.traced",
                  "rb") as f:  # Load ScriptModule from io.BytesIO object
            buffer = io.BytesIO(f.read())

        model = torch.jit.load(
            buffer)  # Load all tensors to the original device
        """

buffer.seek(0)
torch.jit.load(buffer, map_location=torch.device('cpu'))     # Load all tensors onto CPU, using a device


buffer.seek(0)
model = torch.jit.load(buffer, map_location='cpu')     # Load all tensors onto CPU, using a string

# Load with extra files.
extra_files = torch._C.ExtraFilesMap()
extra_files['foo.txt'] = 'bar'
torch.jit.load('torch_model.traced', _extra_files=extra_files)
print(extra_files['foo.txt'])
#exit(0)
"""

    with TorchDeviceSession(
            device=global_torch_device(cuda_if_available=False), model=model):
        with TorchEvalSession(model):
            for image in tqdm(frame_generator(cv2.VideoCapture(0))):
                result = SSDOut(*model(
                    transforms(image)[0].unsqueeze(0).to(
                        global_torch_device())))
                height, width, *_ = image.shape

                result.boxes[:, 0::2] *= width / result.img_width.cpu().item()
                result.boxes[:,
                             1::2] *= height / result.img_height.cpu().item()
                (boxes, labels, scores) = (
                    result.boxes.to(cpu_device).numpy(),
                    result.labels.to(cpu_device).numpy(),
                    result.scores.to(cpu_device).numpy(),
                )

                indices = scores > score_threshold

                cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
                cv2.imshow(
                    window_name,
                    draw_bounding_boxes(
                        image,
                        boxes[indices],
                        labels=labels[indices],
                        scores=scores[indices],
                        categories=categories,
                        score_font=ImageFont.truetype(
                            PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                            24,
                        ),
                    ).astype(numpy.uint8),
                )
                if cv2.waitKey(1) == 27:
                    break  # esc to quit
コード例 #7
0
ファイル: ssd_demo.py プロジェクト: sintefneodroid/vision
def run_demo(cfg, class_names, model_ckpt, score_threshold, images_dir,
             output_dir):
    model = SingleShotDectection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
    print(
        f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    image_paths = list(images_dir.iterdir())

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=Split.Testing)
    model.eval()

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = numpy.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(global_torch_device()))[0]
        inference_time = time.time() - start

        result.boxes[:, 0::2] *= width / result.img_width
        result.boxes[:, 1::2] *= height / result.img_height
        (boxes, labels, scores) = (
            result.boxes.to(cpu_device).numpy(),
            result.labels.to(cpu_device).numpy(),
            result.scores.to(cpu_device).numpy(),
        )

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[
            indices]
        meters = " | ".join([
            f"objects {len(boxes):02d}",
            f"load {round(load_time * 1000):03d}ms",
            f"inference {round(inference_time * 1000):03d}ms",
            f"FPS {round(1.0 / inference_time)}",
        ])
        print(f"({i + 1:04d}/{len(image_paths):04d}) {image_name}: {meters}")

        drawn_image = draw_bounding_boxes(
            image,
            boxes,
            labels,
            scores,
            class_names,
            score_font=ImageFont.truetype(
                PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                24,
            ),
        ).astype(numpy.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))