Exemplo n.º 1
0
def stest_one_versus_many(model, data_dir, img_size):
    """ """
    data_iterator = iter(
        DataLoader(
            PairDataset(
                data_dir,
                transform=transforms.Compose([
                    transforms.Grayscale(),
                    transforms.Resize(img_size),
                    transforms.ToTensor(),
                ]),
                split=SplitEnum.testing,
            ),
            num_workers=0,
            batch_size=1,
            shuffle=True,
        ))
    x0, *_ = next(data_iterator)
    for i in range(10):
        _, x1, _ = next(data_iterator)
        dis = (torch.pairwise_distance(*model(
            to_tensor(x0, device=global_torch_device()),
            to_tensor(x1, device=global_torch_device()),
        )).cpu().item())
        boxed_text_overlay_plot(
            torchvision.utils.make_grid(torch.cat((x0, x1), 0)),
            f"Dissimilarity: {dis:.2f}",
        )
Exemplo n.º 2
0
def run_seg_traced_webcam_demo():
    """

    :return:
    :rtype:"""

    import torch
    import io

    load_path = (PROJECT_APP_PATH.user_data / "penn_fudan_segmentation" /
                 "seg_skip_fis").with_suffix(".traced")
    # print(load_path)
    # torch.jit.load(str(load_path))

    with open(str(load_path),
              "rb") as f:  # Load ScriptModule from io.BytesIO object
        buffer = io.BytesIO(f.read())

    model = torch.jit.load(buffer)  # Load all tensors to the original device

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    from matplotlib.pyplot import imshow

    with TorchDeviceSession(device=global_torch_device("cpu"), model=model):
        with TorchEvalSession(model):
            for image in tqdm(frame_generator(cv2.VideoCapture(0))):
                result = model(
                    transform(image).unsqueeze(0).to(global_torch_device()))[0]

                imshow(result[0][0].numpy(), vmin=0.0, vmax=1.0)
                show()
Exemplo n.º 3
0
def main():
    """ """
    from configs.mobilenet_v2_ssd320_voc0712 import base_cfg

    # from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
    # from configs.vgg_ssd300_coco_trainval35k import base_cfg
    # from .configs.vgg_ssd512_coco_trainval35k import base_cfg

    global_torch_device(override=global_torch_device("cpu"))

    parser = argparse.ArgumentParser(description="SSD Demo.")
    parser.add_argument(
        "--ckpt",
        type=str,
        default=PROJECT_APP_PATH.user_data / "ssd" / "models" /
        "mobilenet_v2_ssd320_voc0712.pth"
        # "mobilenet_v2_ssd320_voc0712.pth"
        # "vgg_ssd300_coco_trainval35k.pth"
        # "vgg_ssd512_coco_trainval35k.pth"
        ,
        help="Trained "
        "weights.",
    )
    args = parser.parse_args()

    export_detection_model(cfg=base_cfg, model_checkpoint=Path(args.ckpt))
Exemplo n.º 4
0
def main():
    from configs.mobilenet_v2_ssd320_voc0712 import base_cfg

    # from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
    # from configs.vgg_ssd300_coco_trainval35k import base_cfg
    # from .configs.vgg_ssd512_coco_trainval35k import base_cfg

    global_torch_device(override=global_torch_device(cuda_if_available=False))

    parser = argparse.ArgumentParser(description="SSD Demo.")
    parser.add_argument(
        "--ckpt",
        type=str,
        default=PROJECT_APP_PATH.user_data / "ssd" / "models" /
        "mobilenet_v2_ssd320_voc0712.pth"
        # "mobilenet_v2_ssd320_voc0712.pth"
        # "vgg_ssd300_coco_trainval35k.pth"
        # "vgg_ssd512_coco_trainval35k.pth"
        ,
        help="Use weights from path",
    )
    parser.add_argument("--score_threshold", type=float, default=0.7)
    args = parser.parse_args()

    run_traced_webcam_demo(
        input_cfg=base_cfg.input,
        categories=base_cfg.dataset_type.category_sizes,
        score_threshold=args.score_threshold,
    )
Exemplo n.º 5
0
def stest_many_versus_many(model, data_dir, img_size, threshold=0.5):
    """ """
    data_iterator = iter(
        DataLoader(
            PairDataset(
                data_dir,
                transform=transforms.Compose([
                    transforms.Grayscale(),
                    transforms.Resize(img_size),
                    transforms.ToTensor(),
                ]),
            ),
            num_workers=0,
            batch_size=1,
            shuffle=True,
        ))
    for i in range(10):
        x0, x1, is_diff = next(data_iterator)
        distance = (torch.pairwise_distance(*model(
            to_tensor(x0, device=global_torch_device()),
            to_tensor(x1, device=global_torch_device()),
        )).cpu().item())
        boxed_text_overlay_plot(
            torchvision.utils.make_grid(torch.cat((x0, x1), 0)),
            f"Truth: {'Different' if is_diff.cpu().item() else 'Alike'},"
            f" Dissimilarity: {distance:.2f},"
            f" Verdict: {'Different' if distance > threshold else 'Alike'}",
        )
Exemplo n.º 6
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     if self._no_side_effect:
         device = global_torch_device(override=self.prev_dev)
     else:
         device = global_torch_device(override=torch.device("cuda"))
     if self._model:
         self._model.to(device)
     return False
Exemplo n.º 7
0
def test_cpu():
    print(
        global_torch_device(override=global_torch_device(
            device_preference=True)))
    print(global_torch_device())
    with TorchCpuSession():
        print(global_torch_device())
    print(global_torch_device())
Exemplo n.º 8
0
    def main(model_name: str = "maskrcnn_pennfudanped", score_threshold=0.55):
        base_path = PROJECT_APP_PATH.user_data / 'maskrcnn'
        dataset_root = Path.home() / "Data"

        torch_seed(3825)

        dataset = PennFudanDataset(dataset_root / "PennFudanPed",
                                   Split.Training)
        categories = dataset.categories

        if True:
            model = load_model(model_name=model_name,
                               model_directory=base_path / 'models')
        else:
            model = get_pretrained_instance_segmentation_maskrcnn(
                dataset.response_channels)

        model.to(global_torch_device())
        cpu_device = torch.device("cpu")

        with torch.no_grad():
            with TorchEvalSession(model):
                for image in tqdm(
                        to_tensor_generator(
                            frame_generator(cv2.VideoCapture(0)),
                            device=global_torch_device(),
                        )):
                    prediction = model(
                        # torch_vision_normalize_batch_nchw(
                        uint_hwc_to_chw_float_tensor(image).unsqueeze(0)
                        #    )
                    )[0]

                    (boxes, labels, scores) = (
                        prediction["boxes"].to(cpu_device).numpy(),
                        prediction["labels"].to(cpu_device).numpy(),
                        torch.sigmoid(
                            prediction["scores"]).to(cpu_device).numpy(),
                    )

                    indices = scores > score_threshold

                    cv2.namedWindow(model_name, cv2.WINDOW_NORMAL)
                    cv2.imshow(
                        model_name,
                        draw_bounding_boxes(
                            quick_to_pil_image(image),
                            boxes[indices],
                            labels=labels[indices],
                            scores=scores[indices],
                            categories=categories,
                        ))

                    if cv2.waitKey(1) == 27:
                        break  # esc to quit
    def main():
        """

"""
        data_dir = Path.home() / "Data" / "mnist_png"
        train_batch_size = 64
        train_number_epochs = 100
        save_path = PROJECT_APP_PATH.user_data / "models"
        model_name = "pair_siamese_mnist"
        load_prev = True
        train = False
        img_size = (28, 28)
        model = PairRankingSiamese(img_size).to(global_torch_device())
        optimiser = optim.Adam(model.parameters(), lr=3e-4)

        if train:
            if load_prev:
                model, optimer = load_model_parameters(
                    model,
                    optimiser=optimiser,
                    model_name=model_name,
                    model_directory=save_path,
                )

            with TensorBoardPytorchWriter(PROJECT_APP_PATH.user_log /
                                          model_name /
                                          str(time.time())) as writer:
                # with CaptureEarlyStop() as _:
                with suppress(KeyboardInterrupt):
                    model = train_siamese(
                        model,
                        optimiser,
                        nn.BCELoss().to(global_torch_device()),
                        train_number_epochs=train_number_epochs,
                        data_dir=data_dir,
                        train_batch_size=train_batch_size,
                        model_name=model_name,
                        save_path=save_path,
                        writer=writer,
                        img_size=img_size,
                    )
            save_model_parameters(
                model,
                optimiser=optimiser,
                model_name=f"{model_name}",
                save_directory=save_path,
            )
        else:
            model = load_model_parameters(model,
                                          model_name=model_name,
                                          model_directory=save_path)
            print("loaded best val")
            stest_many_versus_many2(model, data_dir, img_size)
Exemplo n.º 10
0
def main():
    """ """
    from configs.vgg_ssd300_coco_trainval35k import base_cfg

    parser = argparse.ArgumentParser(
        description="SSD Evaluation on VOC and COCO dataset."
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--ckpt",
        help="The path to the checkpoint for test, default is the latest checkpoint.",
        default=PROJECT_APP_PATH.user_data
        / "ssd"
        / "models"
        / "mobilenet_v2_ssd320_voc0712.pth"
        # "mobilenet_v2_ssd320_voc0712.pth"
        # "vgg_ssd300_coco_trainval35k.pth"
        # "vgg_ssd512_coco_trainval35k.pth"
        ,
        type=str,
    )

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    set_benchmark_device_dist(distributed, args.local_rank)

    logger = setup_distributed_logger(
        "SSD", global_distribution_rank(), PROJECT_APP_PATH.user_data / "results"
    )
    logger.info(f"Using {num_gpus} GPUs")
    logger.info(args)

    device = torch.device(base_cfg.MODEL.DEVICE)
    global_torch_device(override=device)

    with TorchCacheSession():
        model = SingleShotDetection(base_cfg)
        checkpointer = CheckPointer(
            model,
            save_dir=PROJECT_APP_PATH.user_data / "results",
            logger=logging.getLogger("SSD.inference"),
        )

        checkpointer.load(args.ckpt, use_latest=args.ckpt is None)
        do_ssd_evaluation(
            base_cfg.data_dir,
            base_cfg,
            model.to(torch.device(base_cfg.MODEL.DEVICE)),
            distributed,
        )
Exemplo n.º 11
0
 def main(test_cuda=False):
     print("-" * 80)
     device = torch.device("cuda" if test_cuda else "cpu")
     ct = CenterLoss(10, 2, size_average=True).to(global_torch_device())
     y = torch.Tensor([0, 0, 2, 1]).to(global_torch_device())
     feat = torch.zeros(4, 2).to(global_torch_device()).requires_grad_()
     print(list(ct.parameters()))
     print(ct.centers.grad)
     out = ct(y, feat)
     print(out.item())
     out.backward()
     print(ct.centers.grad)
     print(feat.grad)
Exemplo n.º 12
0
    def main():
        """ """
        data_dir = Path.home() / "Data" / "mnist_png"
        train_batch_size = 64
        train_number_epochs = 100
        save_path = PROJECT_APP_PATH.user_data / "models"
        model_name = "triplet_siamese_mnist"
        load_prev = True
        train = False

        img_size = (28, 28)
        model = NLetConvNet(img_size).to(global_torch_device())
        optimiser = optim.Adam(model.parameters(), lr=3e-4)

        if train:
            if load_prev:
                model, optimiser = load_model_parameters(
                    model,
                    optimiser=optimiser,
                    model_name=model_name,
                    model_directory=save_path,
                )

            with TensorBoardPytorchWriter():
                # from draugr.stopping import CaptureEarlyStop

                # with CaptureEarlyStop() as _:
                with IgnoreInterruptSignal():
                    model = train_siamese(
                        model,
                        optimiser,
                        TripletMarginLoss().to(global_torch_device()),
                        train_number_epochs=train_number_epochs,
                        data_dir=data_dir,
                        train_batch_size=train_batch_size,
                        model_name=model_name,
                        save_path=save_path,
                        img_size=img_size,
                    )
            save_model_parameters(
                model,
                optimiser=optimiser,
                model_name=f"{model_name}",
                save_directory=save_path,
            )
        else:
            model = load_model_parameters(model,
                                          model_name=model_name,
                                          model_directory=save_path)
            print("loaded best val")
            stest_many_versus_many(model, data_dir, img_size)
Exemplo n.º 13
0
    def main_binary(p=Path.home() / "Data" / "Datasets" / "PennFudanPed"):

        dataset = PennFudanDataset(p, SplitEnum.training)

        global_torch_device(override=global_torch_device("cpu"))

        idx = -2
        img, mask = dataset[idx]
        print(img)
        print(img.shape, mask.shape)
        pyplot.imshow(float_chw_to_hwc_uint_tensor(img))
        pyplot.show()
        pyplot.imshow(mask.squeeze(0))
        pyplot.show()
Exemplo n.º 14
0
def run_seg_traced_webcam_demo():
    """

:param categories:
:type categories:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param score_threshold:
:type score_threshold:
:param window_name:
:type window_name:
:return:
:rtype:
"""

    import torch
    import io

    torch.jit.load("seg_skip_fis.traced")

    with open(
        "seg_skip_fis.traced", "rb"
    ) as f:  # Load ScriptModule from io.BytesIO object
        buffer = io.BytesIO(f.read())

    model = torch.jit.load(buffer)  # Load all tensors to the original device

    transform = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]
    )

    with TorchDeviceSession(
        device=global_torch_device(cuda_if_available=False), model=model
    ):
        with TorchEvalSession(model):
            for image in tqdm(frame_generator(cv2.VideoCapture(0))):
                result = model(transform(image).unsqueeze(0).to(global_torch_device()))[
                    0
                ]
                print(result)
                from matplotlib.pyplot import imshow

                imshow(result[0][0].numpy(), vmin=0.0, vmax=1.0)
                show()
Exemplo n.º 15
0
    def main():
        """ """
        from draugr.torch_utilities import global_torch_device

        torch.manual_seed(999)
        print("-" * 80)
        ct = CenterLoss(10, 2, size_average=True).to(global_torch_device())
        y = torch.Tensor([0, 0, 2, 1]).to(global_torch_device())
        feat = torch.zeros(4, 2).to(global_torch_device()).requires_grad_()
        print(list(ct.parameters()))
        print(ct.centers.grad)
        out = ct(y, feat)
        print(out.item())
        out.backward()
        print(ct.centers.grad)
        print(feat.grad)
Exemplo n.º 16
0
def train_model(
    model,
    optimiser,
    epoch_i: int,
    metric_writer: Writer,
    loader: DataLoader,
    log_interval=10,
):
    with TorchTrainSession(model):
        train_accum_loss = 0
        generator = tqdm(enumerate(loader))
        for batch_idx, (original, *_) in generator:
            original = original.to(global_torch_device())

            optimiser.zero_grad()
            reconstruction, mean, log_var = model(original)
            loss = loss_function(reconstruction, original, mean, log_var)
            loss.backward()
            optimiser.step()

            train_accum_loss += loss.item()
            metric_writer.scalar("train_loss", loss.item())

            if batch_idx % log_interval == 0:
                generator.set_description(
                    f"Train Epoch: {epoch_i}"
                    f" [{batch_idx * len(original)}/"
                    f"{len(loader.dataset)}"
                    f" ({100. * batch_idx / len(loader):.0f}%)]\t"
                    f"Loss: {loss.item() / len(original):.6f}")
            break
        print(f"====> Epoch: {epoch_i}"
              f" Average loss: {train_accum_loss / len(loader.dataset):.4f}")
Exemplo n.º 17
0
def inference(model: Module, data_iterator: Iterator, denoise: bool = True):
    """

:param model:
:type model:
:param data_iterator:
:type data_iterator:
"""
    with torch.no_grad():
        with TorchEvalSession(model):
            img, target = next(data_iterator)
            if denoise:
                model_input = img + torch.normal(mean=0.0,
                                                 std=0.1,
                                                 size=img.shape,
                                                 device=global_torch_device())
            else:
                model_input = img
            pred, *_ = model(torch.clamp(model_input, 0.0, 1.0))
            for i, (s, j) in enumerate(
                    zip(pred.cpu().numpy(),
                        model_input.cpu().numpy())):
                pyplot.imshow(j[0])
                pyplot.show()
                pyplot.imshow(s[0])
                pyplot.title(i)
                pyplot.show()
                break
Exemplo n.º 18
0
def plot_manifold(
        model: torch.nn.Module,
        *,
        out_path: Path = None,
        n_img_x: int = 20,
        n_img_y: int = 20,
        img_h: int = 28,
        img_w: int = 28,
        sample_range: Number = 1,
        device: Optional[torch.device] = global_torch_device(),
) -> None:
    """

    :param model:
    :param out_path:
    :param n_img_x:
    :param n_img_y:
    :param img_h:
    :param img_w:
    :param sample_range:
    :return:"""
    vectors = sample_2d_latent_vectors(sample_range, n_img_x,
                                       n_img_y).to(device)
    encodings = torch.sigmoid(model(vectors)).to("cpu")
    images = encodings.reshape(n_img_x * n_img_y, img_h, img_w, -1).numpy()
    images *= 255
    images = numpy.uint8(images)
    compiled = compile_encoding_image(images, (n_img_y, n_img_x))
    if out_path:
        from imageio import imwrite

        imwrite(str(out_path), compiled)
    return compiled
Exemplo n.º 19
0
def single_epoch_fitting(
    model: torch.nn.Module,
    optimiser,
    train_loader_,
    *,
    epoch: int = None,
    writer: Writer = None,
    device_: torch.device = global_torch_device()) -> None:
    accum_loss = 0
    num_batches = len(train_loader_)

    with TorchTrainSession(model):
        for batch_idx, (data, target) in tqdm(enumerate(train_loader_),
                                              desc='train batch #',
                                              total=num_batches):
            loss = nll_loss(
                model(data.to(device_)).squeeze(), target.to(device_)
            )  # negative log-likelihood for a tensor of size (batch x 1 x n_output)
            optimiser.zero_grad()
            loss.backward()
            optimiser.step()
            accum_loss += loss.item()

    if writer:
        writer.scalar('loss', accum_loss / num_batches, epoch)
Exemplo n.º 20
0
    def __init__(
        self,
        *,
        device: str = global_torch_device(True),
        gradient_clipping: TogglableLowHigh = TogglableLowHigh(False, -1.0, 1.0),
        gradient_norm_clipping: TogglableLowHigh = TogglableLowHigh(False, -1.0, 1.0),
        intrinsic_signal_provider_arch: IntrinsicSignalProvider = BraindeadIntrinsicSignalProvider,
        **kwargs,
    ):
        """

@param device:
@param gradient_clipping:
@param grad_clip_low:
@param grad_clip_high:
@param kwargs:
"""
        super().__init__(
            intrinsic_signal_provider_arch=intrinsic_signal_provider_arch, **kwargs
        )
        self._gradient_clipping = gradient_clipping
        self._gradient_norm_clipping = gradient_norm_clipping
        self._device = torch.device(
            device if torch.cuda.is_available() and device != "cpu" else "cpu"
        )
Exemplo n.º 21
0
def maskrcnn_evaluate(
        model: Module,
        data_loader: DataLoader,
        *,
        device=global_torch_device(),
        writer: Writer = None,
) -> CocoEvaluator:
    """

    Args:
      model:
      data_loader:
      device:
      writer:

    Returns:

    """
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    coco_evaluator = CocoEvaluator(
        get_coco_api_from_dataset(data_loader.dataset), get_iou_types(model))

    with torch.no_grad():
        with TorchEvalSession(model):

            for image, targets in tqdm.tqdm(data_loader):
                image = [img.to(device) for img in image]
                targets = [{k: v.to(device)
                            for k, v in t.items()} for t in targets]

                torch.cuda.synchronize(device)
                model_time = time.time()
                outputs = model(image)

                outputs = [{k: v.to(cpu_device)
                            for k, v in t.items()} for t in outputs]
                model_time = time.time() - model_time

                res = {
                    target["image_id"].item(): output
                    for target, output in zip(targets, outputs)
                }
                evaluator_time = time.time()
                coco_evaluator.update(res)
                evaluator_time = time.time() - evaluator_time
                if writer:
                    writer.scalar("model_time", model_time)
                    writer.scalar("evaluator_time", evaluator_time)

            coco_evaluator.synchronize_between_processes()
            coco_evaluator.accumulate()
            coco_evaluator.summarize()

    torch.set_num_threads(n_threads)

    return coco_evaluator
Exemplo n.º 22
0
def validate_model(model, valid_loader):
    with TorchDeviceSession(global_torch_device("cpu"), model):
        with torch.no_grad():
            with TorchCacheSession():
                with TorchEvalSession(model):
                    valid_masks = []
                    out_data = []
                    a = (256, 256)
                    tr = min(len(valid_loader.dataset) * 4, 2000)
                    probabilities = []
                    for sample_i, (data,
                                   target) in enumerate(tqdm(valid_loader)):
                        data = data.to(global_torch_device())
                        outpu, *_ = model(data)
                        for m, d, p in zip(
                                target.cpu().detach().numpy(),
                                data.cpu().detach().numpy(),
                                torch.sigmoid(outpu).cpu().detach().numpy(),
                        ):
                            out_data.append(cv2_resize(chw_to_hwc(d), a))
                            valid_masks.append(cv2_resize(m[0], a))
                            probabilities.append(cv2_resize(p[0], a))
                            sample_i += 1

                            if sample_i >= tr - 1:
                                break

                        if sample_i >= tr - 1:
                            break

                    min_a = min(3, len(out_data))
                    f, ax = pyplot.subplots(min_a, 3, figsize=(24, 12))

                    # assert len(valid_masks)>2, f'{len(valid_masks), tr}'
                    for i in range(min_a):
                        ax[0, i].imshow(out_data[i], vmin=0, vmax=1)
                        ax[0, i].set_title("Original", fontsize=14)

                        ax[1, i].imshow(valid_masks[i], vmin=0, vmax=1)
                        ax[1, i].set_title("Target", fontsize=14)

                        ax[2, i].imshow(probabilities[i], vmin=0, vmax=1)
                        ax[2, i].set_title("Prediction", fontsize=14)

                    pyplot.show()
Exemplo n.º 23
0
 def __init__(self,
              device: torch.device,
              model: Module = None,
              no_side_effect: bool = True):
     self._model = model
     self._no_side_effect = no_side_effect
     self._device = device
     if no_side_effect:
         self.prev_dev = global_torch_device()
Exemplo n.º 24
0
def threshold_grid_search(model, valid_loader, max_samples=2000):
    """ Grid Search for best Threshold """

    valid_masks = []
    count = 0
    tr = min(valid_loader.dataset.__len__(), max_samples)
    probabilities = numpy.zeros((tr, *CloudSegmentationDataset.image_size_T),
                                dtype=numpy.float32)
    for data, targets in tqdm(valid_loader):
        data = data.to(global_torch_device(), dtype=torch.float)
        predictions, *_ = model(data)
        predictions = torch.sigmoid(predictions)
        predictions = predictions.cpu().detach().numpy()
        targets = targets.cpu().detach().numpy()
        for p in range(data.shape[0]):
            pred, target = predictions[p], targets[p]
            for mask_ in target:
                valid_masks.append(mask_)
            for probability in pred:
                probabilities[count, :, :] = probability
                count += 1
            if count >= tr - 1:
                break
        if count >= tr - 1:
            break

    class_params = {}

    for class_id in CloudSegmentationDataset.categories.keys():
        print(CloudSegmentationDataset.categories[class_id])
        attempts = []
        for t in range(0, 100, 5):
            t /= 100
            for ms in [0, 100, 1200, 5000, 10000, 30000]:
                masks, d = [], []
                for i in range(class_id, len(probabilities), 4):
                    probability_ = probabilities[i]
                    predict, num_predict = threshold_mask(probability_, t, ms)
                    masks.append(predict)
                for i, j in zip(masks, valid_masks[class_id::4]):
                    if (i.sum() == 0) & (j.sum() == 0):
                        d.append(1)
                    else:
                        d.append(intersection_over_union(i, j))
                attempts.append((t, ms, numpy.mean(d)))

        attempts_df = pandas.DataFrame(attempts,
                                       columns=["threshold", "size", "dice"])
        attempts_df = attempts_df.sort_values("dice", ascending=False)
        print(attempts_df.head())
        best_threshold = attempts_df["threshold"].values[0]
        best_size = attempts_df["size"].values[0]
        class_params[class_id] = (best_threshold, best_size)

    return class_params
Exemplo n.º 25
0
    def main_instanced(p=Path.home() / "Data" / "Datasets" / "PennFudanPed"):

        dataset = PennFudanDataset(
            p,
            SplitEnum.training,
            return_variant=PennFudanDataset.PennFudanReturnVariantEnum.
            instanced,
        )

        global_torch_device(override=global_torch_device("cpu"))

        idx = -2
        img, mask = dataset[idx]
        print(img)
        print(img.shape, mask.shape)
        pyplot.imshow(float_chw_to_hwc_uint_tensor(img))
        pyplot.show()
        for m in mask:
            pyplot.imshow(m.squeeze(0))
            pyplot.show()
Exemplo n.º 26
0
def stest_many_versus_many2(model: Module,
                            data_dir: Path,
                            img_size: Tuple[int, int],
                            threshold=0.5):
    """

:param model:
:type model:
:param data_dir:
:type data_dir:
:param img_size:
:type img_size:
:param threshold:
:type threshold:
"""
    dataiter = iter(
        DataLoader(
            PairDataset(
                data_dir,
                transform=transforms.Compose([
                    transforms.Grayscale(),
                    transforms.Resize(img_size),
                    transforms.ToTensor(),
                ]),
            ),
            num_workers=4,
            batch_size=1,
            shuffle=True,
        ))
    for i in range(10):
        x0, x1, is_diff = next(dataiter)
        distance = (model(
            to_tensor(x0, device=global_torch_device()),
            to_tensor(x1, device=global_torch_device()),
        ).cpu().item())
        boxed_text_overlay_plot(
            torchvision.utils.make_grid(torch.cat((x0, x1), 0)),
            f"Truth: {'Different' if is_diff.cpu().item() else 'Alike'},"
            f" Dissimilarity: {distance:.2f},"
            f" Verdict: {'Different' if distance > threshold else 'Alike'}",
        )
Exemplo n.º 27
0
def neodroid_env_classification_generator(env, batch_size=64) -> Tuple:
    """

    :param env:
    :param batch_size:
    """
    while True:
        predictors = []
        class_responses = []
        while len(predictors) < batch_size:
            state = env.update()
            rgb_arr = state.sensor("RGB").value
            rgb_arr = Image.open(rgb_arr).convert("RGB")
            a_class = state.sensor("Class").value

            predictors.append(default_torch_transform(rgb_arr))
            class_responses.append(int(a_class))

        a = torch.stack(predictors).to(global_torch_device())
        b = torch.LongTensor(class_responses).to(global_torch_device())
        yield a, b
Exemplo n.º 28
0
def discount_rollout_signal_torch(signal: torch.Tensor,
                                  discounting_factor: float,
                                  *,
                                  device=global_torch_device(),
                                  non_terminal=None,
                                  batch_normalised=False,
                                  epsilon=1e-3) -> Any:
    """

x = [r1, r2, r3, ..., rN]
returns [r1 + r2*gamma + r3*gamma^2 + ...,
     r2 + r3*gamma + r4*gamma^2 + ...,
       r3 + r4*gamma + r5*gamma^2 + ...,
          ..., ..., rN]


# See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering
# Here, we have y[t] - discount*y[t+1] = x[t]
# or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]


C[i] = R[i] + discount * C[i+1]
signal.lfilter(b, a, x, axis=-1, zi=None)
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
              - a[1]*y[n-1] - ... - a[N]*y[n-N]

non_terminal if supplied lets you define a mask for masking terminal state signals.


@param signal:
@param discounting_factor:
@param device:
@param non_terminal:
@return:
"""

    discounted = torch.zeros_like(signal, device=device)
    R = torch.zeros(signal.shape[-1], device=device)
    NT = torch.ones_like(signal, device=device)
    if non_terminal is not None:
        NT = to_tensor(non_terminal, device=device)

    for i in reversed(range(signal.shape[0])):
        R = signal[i] + discounting_factor * R * NT[i]
        discounted[i] = R

    if batch_normalised:
        # WARNING! Sometimes causes NANs!
        discounted = (discounted - discounted.mean()) / (discounted.std() +
                                                         epsilon)

    return discounted
Exemplo n.º 29
0
def prepare_submission(model,
                       class_params,
                       test_loader,
                       submission_file_path="submission.csv"):
    """

    Args:
      model:
      class_params:
      test_loader:
      submission_file_path:
    """
    # encoded_pixels = []
    submission_i = 0
    number_of_pixels_saved = 0
    df: pandas.DataFrame = test_loader.dataset.data_frame

    with open(submission_file_path, mode="w") as f:
        f.write("Image_Label,EncodedPixels\n")
        for data, target, black_mask in tqdm(test_loader):
            data = data.to(global_torch_device(), dtype=torch.float)
            output, *_ = model(data)
            del data
            output = torch.sigmoid(output)
            output = output.cpu().detach().numpy()
            black_mask = black_mask.cpu().detach().numpy()
            a = df["Image_Label"]
            for category in output:
                for probability in category:
                    thr, min_size = (
                        class_params[submission_i % 4][0],
                        class_params[submission_i % 4][1],
                    )
                    predict, num_predict = threshold_mask(
                        probability, thr, min_size)
                    if num_predict == 0:
                        rle = ""
                        # encoded_pixels.append('')
                    else:
                        number_of_pixels_saved += numpy.sum(predict)
                        predict_masked = numpy.multiply(predict, black_mask)
                        number_of_pixels_saved -= numpy.sum(predict_masked)
                        rle = mask_to_run_length(predict_masked)
                        # encoded_pixels.append(rle)

                    f.write(f"{a[submission_i]},{rle}\n")
                    submission_i += 1

        # df['EncodedPixels'] = encoded_pixels
        # df.to_csv(submission_file_path, columns=['Image_Label', 'EncodedPixels'], index=False)

    print(f"Number of pixel saved {number_of_pixels_saved}")
Exemplo n.º 30
0
def maskrcnn_train_single_epoch(
    *,
    model: Module,
    optimiser: torch.optim.Optimizer,
    data_loader: DataLoader,
    device: torch.device = global_torch_device(),
    writer: Writer = None,
) -> None:
    """

    :param model:
    :param optimiser:
    :param data_loader:
    :param epoch_i:
    :param log_frequency:
    :param device:
    :param writer:
    :return:
    """
    model.to(device)
    with TorchTrainSession(model):

        for images, targets in tqdm.tqdm(data_loader, desc="Batch #"):
            images = [img.to(device) for img in images]
            targets = [{k: v.to(device)
                        for k, v in t.items()} for t in targets]

            # torch.cuda.synchronize(device)
            loss_dict = model(images, targets=targets)
            losses = sum(loss for loss in loss_dict.values())

            loss_dict_reduced = reduce_dict(
                loss_dict)  # reduce losses over all GPUs for logging purposes
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            loss_value = losses_reduced.item()

            if not math.isfinite(loss_value):
                print(f"Loss is {loss_value}, stopping training")
                print(loss_dict_reduced)
                sys.exit(1)

            optimiser.zero_grad()
            losses.backward()
            optimiser.step()

            if writer:
                for k, v in {
                        "loss": losses_reduced,
                        "lr": torch.optim.Optimizer.param_groups[0]["lr"],
                        **loss_dict_reduced,
                }.items():
                    writer.scalar(k, v)