Пример #1
0
    def segment(self, image):
        # don't track tensors with autograd during prediction
        with torch.no_grad():
            mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

            transform = Compose(
                [ImageToTensor(),
                 Normalize(mean=mean, std=std)])
            image = transform(image)

            batch = image.unsqueeze(0).to(self.device)

            output = self.net(batch)

            output = output.cpu().data.numpy()
            output = output.squeeze(0)

            mask = output.argmax(axis=0).astype(np.uint8)

            mask = Image.fromarray(mask, mode="P")

            palette = make_palette(*self.config["common"]["colors"])
            mask.putpalette(palette)

            return mask
Пример #2
0
    def segment(self, image):
        # don't track tensors with autograd during prediction
        with torch.no_grad():
            mean, std = self.dataset['stats']['mean'], self.dataset['stats'][
                'std']

            transform = Compose([
                ConvertImageMode(mode='RGB'),
                ImageToTensor(),
                Normalize(mean=mean, std=std)
            ])
            image = transform(image)

            batch = image.unsqueeze(0).to(self.device)

            output = self.net(batch)

            output = output.cpu().data.numpy()
            output = output.squeeze(0)

            mask = output.argmax(axis=0).astype(np.uint8)

            mask = Image.fromarray(mask, mode='P')

            palette = make_palette(*self.dataset['common']['colors'])
            mask.putpalette(palette)

            return mask
Пример #3
0
def get_dataset_loaders(model, dataset, workers):
    target_size = (model["common"]["image_size"],) * 2
    batch_size = model["common"]["batch_size"]
    path = dataset["common"]["dataset"]

    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = JointCompose(
        [
            JointTransform(ConvertImageMode("RGB"), ConvertImageMode("P")),
            JointTransform(Resize(target_size, Image.BILINEAR), Resize(target_size, Image.NEAREST)),
            JointTransform(CenterCrop(target_size), CenterCrop(target_size)),
            JointRandomHorizontalFlip(0.5),
            JointRandomRotation(0.5, 90),
            JointRandomRotation(0.5, 90),
            JointRandomRotation(0.5, 90),
            JointTransform(ImageToTensor(), MaskToTensor()),
            JointTransform(Normalize(mean=mean, std=std), None),
        ]
    )

    train_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "training", "images")], os.path.join(path, "training", "labels"), transform
    )

    val_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "validation", "images")], os.path.join(path, "validation", "labels"), transform
    )

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=workers)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=workers)

    return train_loader, val_loader
Пример #4
0
    def test_len(self):
        path = "tests/fixtures"
        target = "tests/fixtures/labels/"
        channels = [{"sub": "images", "bands": [1, 2, 3]}]

        transform = JointCompose(
            [JointTransform(ImageToTensor(), MaskToTensor())])
        dataset = SlippyMapTilesConcatenation(path, channels, target,
                                              transform)

        self.assertEqual(len(dataset), 3)
Пример #5
0
    def test_getitem(self):
        path = "tests/fixtures"
        target = "tests/fixtures/labels/"
        channels = [{"sub": "images", "bands": [1, 2, 3]}]

        transform = JointCompose(
            [JointTransform(ImageToTensor(), MaskToTensor())])
        dataset = SlippyMapTilesConcatenation(path, channels, target,
                                              transform)

        images, mask, tiles = dataset[0]
        self.assertEqual(tiles, mercantile.Tile(69105, 105093, 18))
        self.assertEqual(type(images), torch.Tensor)
        self.assertEqual(type(mask), torch.Tensor)
Пример #6
0
def get_dataset_loaders(model, dataset):
    target_size = (model["common"]["image_size"], ) * 2
    batch_size = model["common"]["batch_size"]
    path = dataset["common"]["dataset"]

    mean, std = dataset["stats"]["mean"], dataset["stats"]["std"]

    image_transform = Compose([
        ConvertImageMode("RGB"),
        Resize(target_size, Image.BILINEAR),
        CenterCrop(target_size),
        ImageToTensor(),
        Normalize(mean=mean, std=std),
    ])

    target_transform = Compose([
        ConvertImageMode("P"),
        Resize(target_size, Image.NEAREST),
        CenterCrop(target_size),
        MaskToTensor()
    ])

    train_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "training", "images")],
        [image_transform],
        os.path.join(path, "training", "labels"),
        target_transform,
    )

    val_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "validation", "images")],
        [image_transform],
        os.path.join(path, "validation", "labels"),
        target_transform,
    )

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, drop_last=True)

    return train_loader, val_loader
Пример #7
0
def main(args):
    dataset = load_config(args.dataset)
    path = dataset["common"]["dataset"]

    train_transform = Compose([ConvertImageMode(mode="RGB"), ImageToTensor()])

    train_dataset = SlippyMapTiles(os.path.join(path, "training", "images"),
                                   transform=train_transform)

    n = 0
    mean = np.zeros(3, dtype=np.float64)

    loader = DataLoader(train_dataset, batch_size=1)
    for images, tile in tqdm(loader, desc="Loading", unit="image", ascii=True):
        image = torch.squeeze(images)
        assert image.size(0) == 3, "channel first"

        image = np.array(image, dtype=np.float64)
        n += image.shape[1] * image.shape[2]

        mean += np.sum(image, axis=(1, 2))

    mean /= n
    mean.round(decimals=6, out=mean)
    print("mean: {}".format(mean.tolist()))

    std = np.zeros(3, dtype=np.float64)

    loader = DataLoader(train_dataset, batch_size=1)
    for images, tile in tqdm(loader, desc="Loading", unit="image", ascii=True):
        image = torch.squeeze(images)
        assert image.size(0) == 3, "channel first"

        image = np.array(image, dtype=np.float64)
        difference = np.transpose(image, (1, 2, 0)) - mean
        std += np.sum(np.square(difference), axis=(0, 1))

    std = np.sqrt(std / (n - 1))
    std.round(decimals=6, out=std)
    print("std: {}".format(std.tolist()))
Пример #8
0
def get_dataset_loaders(path, config, workers):

    # Values computed on ImageNet DataSet
    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = JointCompose([
        JointResize(config["model"]["image_size"]),
        JointRandomFlipOrRotate(config["model"]["data_augmentation"]),
        JointTransform(ImageToTensor(), MaskToTensor()),
        JointTransform(Normalize(mean=mean, std=std), None),
    ])

    train_dataset = SlippyMapTilesConcatenation(
        os.path.join(path, "training"),
        config["channels"],
        os.path.join(path, "training", "labels"),
        joint_transform=transform,
    )

    val_dataset = SlippyMapTilesConcatenation(
        os.path.join(path, "validation"),
        config["channels"],
        os.path.join(path, "validation", "labels"),
        joint_transform=transform,
    )

    batch_size = config["model"]["batch_size"]
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              drop_last=True,
                              num_workers=workers)
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            drop_last=True,
                            num_workers=workers)

    return train_loader, val_loader
Пример #9
0
def get_dataset_loaders(model, dataset):
    target_size = (model['common']['image_size'], ) * 2
    batch_size = model['common']['batch_size']
    path = dataset['common']['dataset']

    mean, std = dataset['stats']['mean'], dataset['stats']['std']

    image_transform = Compose([
        ConvertImageMode('RGB'),
        Resize(target_size, Image.BILINEAR),
        CenterCrop(target_size),
        ImageToTensor(),
        Normalize(mean=mean, std=std)])

    target_transform = Compose([
        ConvertImageMode('P'),
        Resize(target_size, Image.NEAREST),
        CenterCrop(target_size),
        MaskToTensor()])

    train_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, 'training', 'images')],
        [image_transform],
        os.path.join(path, 'training', 'labels'),
        target_transform)

    val_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, 'validation', 'images')],
        [image_transform],
        os.path.join(path, 'validation', 'labels'),
        target_transform)

    train_sampler = RandomSubsetSampler(train_dataset, dataset['samples']['training'])
    val_sampler = RandomSubsetSampler(val_dataset, dataset['samples']['validation'])

    train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, drop_last=True)

    return train_loader, val_loader
Пример #10
0
def main(args):
    model = load_config(args.model)
    dataset = load_config(args.dataset)

    cuda = model["common"]["cuda"]

    device = torch.device("cuda" if cuda else "cpu")

    def map_location(storage, _):
        return storage.cuda() if cuda else storage.cpu()

    if cuda and not torch.cuda.is_available():
        sys.exit("Error: CUDA requested but not available")

    num_classes = len(dataset["common"]["classes"])

    # https://github.com/pytorch/pytorch/issues/7178
    chkpt = torch.load(args.checkpoint, map_location=map_location)

    net = UNet(num_classes).to(device)
    net = nn.DataParallel(net)

    if cuda:
        torch.backends.cudnn.benchmark = True

    net.load_state_dict(chkpt["state_dict"])
    net.eval()

    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = Compose([
        ConvertImageMode(mode="RGB"),
        ImageToTensor(),
        Normalize(mean=mean, std=std)
    ])

    directory = BufferedSlippyMapDirectory(args.tiles,
                                           transform=transform,
                                           size=args.tile_size,
                                           overlap=args.overlap)
    loader = DataLoader(directory,
                        batch_size=args.batch_size,
                        num_workers=args.workers)

    # don't track tensors with autograd during prediction
    with torch.no_grad():
        for images, tiles in tqdm(loader,
                                  desc="Eval",
                                  unit="batch",
                                  ascii=True):
            images = images.to(device)
            outputs = net(images)

            # manually compute segmentation mask class probabilities per pixel
            probs = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()

            for tile, prob in zip(tiles, probs):
                x, y, z = list(map(int, tile))

                # we predicted on buffered tiles; now get back probs for original image
                prob = directory.unbuffer(prob)

                # Quantize the floating point probabilities in [0,1] to [0,255] and store
                # a single-channel `.png` file with a continuous color palette attached.

                assert prob.shape[
                    0] == 2, "single channel requires binary model"
                assert np.allclose(
                    np.sum(prob, axis=0), 1.
                ), "single channel requires probabilities to sum up to one"
                foreground = prob[1:, :, :]

                anchors = np.linspace(0, 1, 256)
                quantized = np.digitize(foreground, anchors).astype(np.uint8)

                palette = continuous_palette_for_color("pink", 256)

                out = Image.fromarray(quantized.squeeze(), mode="P")
                out.putpalette(palette)

                os.makedirs(os.path.join(args.probs, str(z), str(x)),
                            exist_ok=True)
                path = os.path.join(args.probs, str(z), str(x),
                                    str(y) + ".png")

                out.save(path, optimize=True)
Пример #11
0
def main(args):
    config = load_config(args.config)
    num_classes = len(config["classes"]["titles"])

    if torch.cuda.is_available():
        device = torch.device("cuda")
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device("cpu")

    def map_location(storage, _):
        return storage.cuda() if torch.cuda.is_available() else storage.cpu()

    # https://github.com/pytorch/pytorch/issues/7178
    chkpt = torch.load(args.checkpoint, map_location=map_location)

    net = UNet(num_classes).to(device)
    net = nn.DataParallel(net)

    net.load_state_dict(chkpt["state_dict"])
    net.eval()

    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = Compose([ImageToTensor(), Normalize(mean=mean, std=std)])

    directory = BufferedSlippyMapDirectory(args.tiles,
                                           transform=transform,
                                           size=args.tile_size,
                                           overlap=args.overlap)
    loader = DataLoader(directory,
                        batch_size=args.batch_size,
                        num_workers=args.workers)

    if args.masks_output:
        palette = make_palette(config["classes"]["colors"][0],
                               config["classes"]["colors"][1])
    else:
        palette = continuous_palette_for_color("pink", 256)

    # don't track tensors with autograd during prediction
    with torch.no_grad():
        for images, tiles in tqdm(loader,
                                  desc="Eval",
                                  unit="batch",
                                  ascii=True):
            images = images.to(device)
            outputs = net(images)

            # manually compute segmentation mask class probabilities per pixel
            probs = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()

            for tile, prob in zip(tiles, probs):
                x, y, z = list(map(int, tile))

                # we predicted on buffered tiles; now get back probs for original image
                prob = directory.unbuffer(prob)

                assert prob.shape[
                    0] == 2, "single channel requires binary model"
                assert np.allclose(
                    np.sum(prob, axis=0), 1.0
                ), "single channel requires probabilities to sum up to one"

                if args.masks_output:
                    image = np.around(prob[1:, :, :]).astype(
                        np.uint8).squeeze()
                else:
                    image = (prob[1:, :, :] * 255).astype(np.uint8).squeeze()

                out = Image.fromarray(image, mode="P")
                out.putpalette(palette)

                os.makedirs(os.path.join(args.probs, str(z), str(x)),
                            exist_ok=True)
                path = os.path.join(args.probs, str(z), str(x),
                                    str(y) + ".png")

                out.save(path, optimize=True)

    if args.web_ui:
        template = "leaflet.html" if not args.web_ui_template else args.web_ui_template
        tiles = [tile for tile, _ in tiles_from_slippy_map(args.tiles)]
        web_ui(args.probs, args.web_ui, tiles, tiles, "png", template)