Esempio n. 1
0
def main(args):
    dataset = load_config(args.dataset)

    path = dataset["common"]["dataset"]
    num_classes = len(dataset["common"]["classes"])

    train_transform = Compose([ConvertImageMode(mode="P"), MaskToTensor()])

    train_dataset = SlippyMapTiles(os.path.join(path, "training", "labels"), transform=train_transform)

    n = 0
    counts = np.zeros(num_classes, dtype=np.int64)

    loader = DataLoader(train_dataset, batch_size=1)
    for images, tile in tqdm(loader, desc="Loading", unit="image", ascii=True):
        image = torch.squeeze(images)

        image = np.array(image, dtype=np.uint8)
        n += image.shape[0] * image.shape[1]
        counts += np.bincount(image.ravel(), minlength=num_classes)

    # Class weighting scheme `w = 1 / ln(c + p)` see:
    # - https://arxiv.org/abs/1707.03718
    #     LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation
    # - https://arxiv.org/abs/1606.02147
    #     ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation

    probs = counts / n
    weights = 1 / np.log(1.02 + probs)

    weights.round(6, out=weights)
    print(weights.tolist())
Esempio n. 2
0
def get_dataset_loaders(model, dataset, workers):
    target_size = (model["common"]["image_size"],) * 2
    batch_size = model["common"]["batch_size"]
    path = dataset["common"]["dataset"]

    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = JointCompose(
        [
            JointTransform(ConvertImageMode("RGB"), ConvertImageMode("P")),
            JointTransform(Resize(target_size, Image.BILINEAR), Resize(target_size, Image.NEAREST)),
            JointTransform(CenterCrop(target_size), CenterCrop(target_size)),
            JointRandomHorizontalFlip(0.5),
            JointRandomRotation(0.5, 90),
            JointRandomRotation(0.5, 90),
            JointRandomRotation(0.5, 90),
            JointTransform(ImageToTensor(), MaskToTensor()),
            JointTransform(Normalize(mean=mean, std=std), None),
        ]
    )

    train_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "training", "images")], os.path.join(path, "training", "labels"), transform
    )

    val_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "validation", "images")], os.path.join(path, "validation", "labels"), transform
    )

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=workers)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=workers)

    return train_loader, val_loader
Esempio n. 3
0
    def test_len(self):
        inputs = ["tests/fixtures/images/"]
        input_transforms = [ToTensor()]

        target = "tests/fixtures/labels/"
        target_transform = MaskToTensor()

        dataset = SlippyMapTilesConcatenation(inputs, input_transforms, target, target_transform)

        self.assertEqual(len(dataset), 3)
Esempio n. 4
0
    def test_len(self):
        path = "tests/fixtures"
        target = "tests/fixtures/labels/"
        channels = [{"sub": "images", "bands": [1, 2, 3]}]

        transform = JointCompose(
            [JointTransform(ImageToTensor(), MaskToTensor())])
        dataset = SlippyMapTilesConcatenation(path, channels, target,
                                              transform)

        self.assertEqual(len(dataset), 3)
Esempio n. 5
0
    def test_getitem(self):
        inputs = ["tests/fixtures/images/"]
        input_transforms = [ToTensor()]

        target = "tests/fixtures/labels/"
        target_transform = MaskToTensor()

        dataset = SlippyMapTilesConcatenation(inputs, input_transforms, target, target_transform)

        images, mask, tiles = dataset[0]
        self.assertEqual(tiles[0], mercantile.Tile(69105, 105093, 18))
        self.assertEqual(type(images), torch.Tensor)
        self.assertEqual(type(mask), torch.Tensor)
Esempio n. 6
0
    def test_getitem(self):
        path = "tests/fixtures"
        target = "tests/fixtures/labels/"
        channels = [{"sub": "images", "bands": [1, 2, 3]}]

        transform = JointCompose(
            [JointTransform(ImageToTensor(), MaskToTensor())])
        dataset = SlippyMapTilesConcatenation(path, channels, target,
                                              transform)

        images, mask, tiles = dataset[0]
        self.assertEqual(tiles, mercantile.Tile(69105, 105093, 18))
        self.assertEqual(type(images), torch.Tensor)
        self.assertEqual(type(mask), torch.Tensor)
Esempio n. 7
0
def get_dataset_loaders(model, dataset):
    target_size = (model["common"]["image_size"], ) * 2
    batch_size = model["common"]["batch_size"]
    path = dataset["common"]["dataset"]

    mean, std = dataset["stats"]["mean"], dataset["stats"]["std"]

    image_transform = Compose([
        ConvertImageMode("RGB"),
        Resize(target_size, Image.BILINEAR),
        CenterCrop(target_size),
        ImageToTensor(),
        Normalize(mean=mean, std=std),
    ])

    target_transform = Compose([
        ConvertImageMode("P"),
        Resize(target_size, Image.NEAREST),
        CenterCrop(target_size),
        MaskToTensor()
    ])

    train_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "training", "images")],
        [image_transform],
        os.path.join(path, "training", "labels"),
        target_transform,
    )

    val_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, "validation", "images")],
        [image_transform],
        os.path.join(path, "validation", "labels"),
        target_transform,
    )

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, drop_last=True)

    return train_loader, val_loader
Esempio n. 8
0
def get_dataset_loaders(path, config, workers):

    # Values computed on ImageNet DataSet
    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    transform = JointCompose([
        JointResize(config["model"]["image_size"]),
        JointRandomFlipOrRotate(config["model"]["data_augmentation"]),
        JointTransform(ImageToTensor(), MaskToTensor()),
        JointTransform(Normalize(mean=mean, std=std), None),
    ])

    train_dataset = SlippyMapTilesConcatenation(
        os.path.join(path, "training"),
        config["channels"],
        os.path.join(path, "training", "labels"),
        joint_transform=transform,
    )

    val_dataset = SlippyMapTilesConcatenation(
        os.path.join(path, "validation"),
        config["channels"],
        os.path.join(path, "validation", "labels"),
        joint_transform=transform,
    )

    batch_size = config["model"]["batch_size"]
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              drop_last=True,
                              num_workers=workers)
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            drop_last=True,
                            num_workers=workers)

    return train_loader, val_loader
Esempio n. 9
0
def get_dataset_loaders(model, dataset):
    target_size = (model['common']['image_size'], ) * 2
    batch_size = model['common']['batch_size']
    path = dataset['common']['dataset']

    mean, std = dataset['stats']['mean'], dataset['stats']['std']

    image_transform = Compose([
        ConvertImageMode('RGB'),
        Resize(target_size, Image.BILINEAR),
        CenterCrop(target_size),
        ImageToTensor(),
        Normalize(mean=mean, std=std)])

    target_transform = Compose([
        ConvertImageMode('P'),
        Resize(target_size, Image.NEAREST),
        CenterCrop(target_size),
        MaskToTensor()])

    train_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, 'training', 'images')],
        [image_transform],
        os.path.join(path, 'training', 'labels'),
        target_transform)

    val_dataset = SlippyMapTilesConcatenation(
        [os.path.join(path, 'validation', 'images')],
        [image_transform],
        os.path.join(path, 'validation', 'labels'),
        target_transform)

    train_sampler = RandomSubsetSampler(train_dataset, dataset['samples']['training'])
    val_sampler = RandomSubsetSampler(val_dataset, dataset['samples']['validation'])

    train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, drop_last=True)

    return train_loader, val_loader