Exemplo n.º 1
0
def main(dataset_path, workers):
    transform = T.Compose([
        ApplyTo(
            ['image'],
            T.Compose([
                SplitInSites(),
                T.Lambda(
                    lambda xs: torch.stack([ToTensor()(x) for x in xs], 0)),
            ])),
        Extract(['image']),
    ])

    train_data = pd.read_csv(os.path.join(dataset_path, 'train.csv'))
    train_data['root'] = os.path.join(dataset_path, 'train')
    test_data = pd.read_csv(os.path.join(dataset_path, 'test.csv'))
    test_data['root'] = os.path.join(dataset_path, 'test')
    data = pd.concat([train_data, test_data])

    stats = {}
    for (exp, plate), group in tqdm(data.groupby(['experiment', 'plate'])):
        dataset = TestDataset(group, transform=transform)
        data_loader = torch.utils.data.DataLoader(dataset,
                                                  batch_size=32,
                                                  num_workers=workers)

        with torch.no_grad():
            images = [images for images, in data_loader]
            images = torch.cat(images, 0)
            mean = images.mean((0, 1, 3, 4))
            std = images.std((0, 1, 3, 4))
            stats[(exp, plate)] = mean, std

            del images, mean, std
            gc.collect()

    torch.save(stats, 'plate_stats.pth')
Exemplo n.º 2
0
    SplitInSites(),
    T.Lambda(lambda xs: torch.stack([to_tensor(x) for x in xs], 0)),
])
train_transform = T.Compose([
    ApplyTo(['image'],
            T.Compose([
                RandomSite(),
                Resize(config.resize_size),
                random_crop,
                RandomFlip(),
                RandomTranspose(),
                to_tensor,
                ChannelReweight(config.aug.channel_reweight),
            ])),
    normalize,
    Extract(['image', 'exp', 'label', 'id']),
])
eval_transform = T.Compose([
    ApplyTo(['image'], infer_image_transform),
    normalize,
    Extract(['image', 'exp', 'label', 'id']),
])
unsup_transform = T.Compose([
    ApplyTo(['image'],
            T.Compose([
                Resize(config.resize_size),
                random_crop,
                RandomFlip(),
                RandomTranspose(),
                SplitInSites(),
                T.Lambda(lambda xs: torch.stack([
Exemplo n.º 3
0
train_transform = T.Compose([
    ApplyTo(
        ['image'],
        T.Compose([
            RandomSite(),
            Resize(config.resize_size),
            random_crop,
            RandomFlip(),
            RandomTranspose(),
            RandomRotation(180),  # FIXME:
            ToTensor(),
            ChannelReweight(config.aug.channel_weight),
        ])),
    # NormalizeByRefStats(),
    Extract(['image', 'feat', 'label', 'id']),
])
eval_transform = T.Compose([
    ApplyTo(
        ['image'],
        T.Compose([
            RandomSite(),  # FIXME:
            Resize(config.resize_size),
            center_crop,
            ToTensor(),
        ])),
    # NormalizeByRefStats(),
    Extract(['image', 'feat', 'label', 'id']),
])
test_transform = T.Compose([
    ApplyTo(
Exemplo n.º 4
0
else:
    raise AssertionError('invalide normalization {}'.format(config.normalize))

train_transform = T.Compose([
    ApplyTo(['image', 'ref'],
            T.Compose([
                RandomSite(),
                Resize(config.resize_size),
                random_crop,
                RandomFlip(),
                RandomTranspose(),
                to_tensor,
                ChannelReweight(config.aug.channel_weight),
            ])),
    normalize,
    Extract(['image', 'ref', 'feat', 'exp', 'label', 'id']),
])
eval_transform = T.Compose([
    ApplyTo(
        ['image', 'ref'],
        T.Compose([
            RandomSite(),  # FIXME:
            Resize(config.resize_size),
            center_crop,
            to_tensor,
        ])),
    normalize,
    Extract(['image', 'ref', 'feat', 'exp', 'label', 'id']),
])
test_transform = T.Compose([
    ApplyTo(
Exemplo n.º 5
0
eval_image_transform = T.Compose([
    RandomSite(),
    Resize(config.resize_size),
    center_crop,
    to_tensor,
])
test_image_transform = T.Compose([
    Resize(config.resize_size),
    center_crop,
    SplitInSites(),
    T.Lambda(lambda xs: torch.stack([to_tensor(x) for x in xs], 0)),
])
test_transform = T.Compose([
    ApplyTo(['image'], infer_image_transform),
    normalize,
    Extract(['image', 'feat', 'exp', 'id']),
])


def update_transforms(p):
    if not config.progressive_resize:
        p = 1.

    assert 0. <= p <= 1.

    crop_size = round(224 + (config.crop_size - 224) * p)
    print('update transforms p: {:.2f}, crop_size: {}'.format(p, crop_size))
    random_crop.reset(crop_size)
    center_crop.reset(crop_size)

Exemplo n.º 6
0
    T.Lambda(lambda xs: torch.stack([to_tensor(x) for x in xs], 0)),
])

train_transform = T.Compose([
    ApplyTo(['image'],
            T.Compose([
                RandomSite(),
                Resize(config.resize_size),
                random_crop,
                RandomFlip(),
                RandomTranspose(),
                to_tensor,
                ChannelReweight(config.aug.channel_weight),
            ])),
    normalize,
    Extract(['image', 'feat', 'exp', 'plate', 'label', 'id']),
])
eval_transform = T.Compose([
    ApplyTo(['image'], infer_image_transform),
    normalize,
    Extract(['image', 'feat', 'exp', 'plate', 'label', 'id']),
])
test_transform = T.Compose([
    ApplyTo(['image'], infer_image_transform),
    normalize,
    Extract(['image', 'feat', 'exp', 'plate', 'id']),
])


def update_transforms(p):
    if not config.progressive_resize: