def __init__(self, name, split):
     self.name = name
     self.split = split
     self.path = os.path.join(settings.checkpoints,
                              name + '-split_{}'.format(split))
     self.net = RefineNet(SCSENoPoolResNextBase(se_resnext101_32x4d()),
                          num_features=128,
                          classifier=lambda c: RefineNetUpsampleClassifier(
                              c, scale_factor=2),
                          block=SCSERefineNetBlock)
     self.tta = [
         tta.Pipeline([tta.Pad((13, 14, 13, 14))]),
         tta.Pipeline([tta.Pad((13, 14, 13, 14)),
                       tta.Flip()])
     ]
 def __init__(self, name, split):
     self.name = name
     self.split = split
     self.path = os.path.join(settings.checkpoints, name + '-split_{}'.format(split))
     self.net = AuxDualHypercolumnCatRefineNet(
         SCSENoPoolResNextBase(se_resnext50_32x4d()),
         num_features=128,
         classifier=lambda c: SmallDropoutRefineNetUpsampleClassifier(2*c, scale_factor=2, dropout=0.1),
         block=SCSERefineNetBlock,
         crp=[IdentityCRP, CRP, CRP, CRP]
     )
     self.tta = [
         tta.Pipeline([tta.Pad((13, 14, 13, 14))]),
         tta.Pipeline([tta.Pad((13, 14, 13, 14)), tta.Flip()])
     ]
 def __init__(self, name, split):
     self.name = name
     self.split = split
     self.path = os.path.join(settings.checkpoints, name + '-split_{}'.format(split))
     self.net = DualHypercolumnCatRefineNet(
         NoPoolDPN92Base(dpn92()),
         num_features=128,
         block_multiplier=1,
         num_features_base=[256 + 80, 512 + 192, 1024 + 528, 2048 + 640],
         classifier=lambda c: SmallDropoutRefineNetUpsampleClassifier(2*128, scale_factor=2),
     )
     self.tta = [
         tta.Pipeline([tta.Pad((13, 14, 13, 14))]),
         tta.Pipeline([tta.Pad((13, 14, 13, 14)), tta.Flip()])
     ]
Exemplo n.º 4
0
 def __init__(self, name, split):
     self.name = name
     self.split = split
     self.path = os.path.join(settings.checkpoints,
                              name + '-split_{}'.format(split))
     self.net = RefineNet(NoPoolDPN107Base(dpn107()),
                          num_features=128,
                          block_multiplier=1,
                          num_features_base=[376, 1152, 2432, 2048 + 640],
                          classifier=lambda c: RefineNetUpsampleClassifier(
                              c, scale_factor=2))
     self.tta = [
         tta.Pipeline([tta.Pad((13, 14, 13, 14))]),
         tta.Pipeline([tta.Pad((13, 14, 13, 14)),
                       tta.Flip()])
     ]
Exemplo n.º 5
0
    def __init__(self, name, split):
        self.name = name
        self.split = split
        self.path = os.path.join(settings.checkpoints, name + '-split_{}'.format(split))
        self.net = DualHypercolumnCatRefineNet(
            SCSENoPoolResNextBase(se_resnet152()),
            num_features=128,
            classifier=lambda c: RefineNetUpsampleClassifier(2*c, scale_factor=2),
            block=SCSERefineNetBlock
        )
        self.tta = [
            tta.Pipeline([tta.Pad((13, 14, 13, 14))]),
            tta.Pipeline([tta.Pad((13, 14, 13, 14)), tta.Flip()])
        ]

        self.test_predictions = utils.TestPredictions('ensemble-{}'.format(split)).load()
    def __init__(self, name, split):
        self.name = name
        self.split = split
        self.path = os.path.join(settings.checkpoints, name + '-split_{}'.format(split))
        self.net = DualHypercolumnCatRefineNet(
            NoPoolDPN107Base(dpn107()),
            num_features=128,
            block_multiplier=1,
            num_features_base=[376, 1152, 2432, 2048 + 640],
            classifier=lambda c: SmallDropoutRefineNetUpsampleClassifier(2 * 128, scale_factor=2),
        )
        self.tta = [
            tta.Pipeline([tta.Pad((13, 14, 13, 14))]),
            tta.Pipeline([tta.Pad((13, 14, 13, 14)), tta.Flip()])
        ]

        self.test_predictions = utils.TestPredictions('ensemble_top_6_postprocessed-split_{}'.format(split)).load()
Exemplo n.º 7
0
    def __init__(self, name, split):
        self.name = name
        self.split = split
        self.path = os.path.join(settings.checkpoints,
                                 name + '-split_{}'.format(split))
        self.net = RefineNet(NoPoolDPN98Base(dpn98()),
                             num_features=128,
                             block_multiplier=1,
                             num_features_base=[336, 768, 1728, 2688],
                             classifier=lambda c: RefineNetUpsampleClassifier(
                                 c, scale_factor=2))
        self.optimizer = Adam(self.net.parameters(),
                              lr=1e-4,
                              weight_decay=1e-4)
        self.tta = [
            tta.Pipeline([tta.Pad((13, 14, 13, 14))]),
            tta.Pipeline([tta.Pad((13, 14, 13, 14)),
                          tta.Flip()])
        ]

        self.batch_size = 16
    def train(self, net, samples, optimizer, e):
        alpha = 2 * max(0, ((50 - e) / 50))
        criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha)

        transforms = generator.TransformationsGenerator([
            random.RandomFlipLr(),
            random.RandomAffine(
                image_size=101,
                translation=lambda rs: (rs.randint(-20, 20), rs.randint(-20, 20)),
                scale=lambda rs: (rs.uniform(0.85, 1.15), 1),
                **utils.transformations_options
            )
        ])

        dataset = datasets.ImageDataset(samples, settings.train, transforms)
        dataloader = DataLoader(
            dataset,
            num_workers=10,
            batch_size=16,
            shuffle=True
        )

        average_meter_train = meters.AverageMeter()

        with tqdm(total=len(dataloader), leave=False, ascii=True) as pbar, torch.enable_grad():
            net.train()

            padding = tta.Pad((13, 14, 13, 14))

            for images, masks_targets in dataloader:
                masks_targets = masks_targets.to(gpu)
                masks_predictions = padding.transform_backward(net(padding.transform_forward(images))).contiguous()

                loss = criterion(masks_predictions, masks_targets)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                average_meter_train.add('loss', loss.item())
                self.update_pbar(
                    torch.sigmoid(masks_predictions),
                    masks_targets,
                    pbar,
                    average_meter_train,
                    'Training epoch {}'.format(e)
                )

        train_stats = {'train_' + k: v for k, v in average_meter_train.get_all().items()}
        return train_stats
    def train(self, net, samples, optimizer, e):
        alpha = 2 * max(0, ((50 - e) / 50))
        criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha)

        transforms = generator.TransformationsGenerator([
            random.RandomFlipLr(),
            random.RandomAffine(image_size=101,
                                translation=lambda rs:
                                (rs.randint(-20, 20), rs.randint(-20, 20)),
                                scale=lambda rs: (rs.uniform(0.85, 1.15), 1),
                                **utils.transformations_options)
        ])

        samples_aux = list(
            set(samples).intersection(set(utils.get_aux_samples())))
        dataset_aux = datasets.ImageDataset(samples_aux, settings.train,
                                            transforms)

        dataset_pseudo = datasets.SemiSupervisedImageDataset(
            samples_test,
            settings.test,
            transforms,
            size=len(samples_test),
            test_predictions=self.test_predictions,
            momentum=0.0)

        dataset = datasets.ImageDataset(samples, settings.train, transforms)
        weight_train = len(dataset_pseudo) / len(dataset) * 2
        weight_aux = weight_train / 2
        weights = [weight_train] * len(dataset) + [weight_aux] * len(
            dataset_aux) + [1] * len(dataset_pseudo)
        dataloader = DataLoader(
            ConcatDataset([dataset, dataset_aux, dataset_pseudo]),
            num_workers=10,
            batch_size=16,
            sampler=WeightedRandomSampler(weights=weights, num_samples=3200))

        average_meter_train = meters.AverageMeter()

        with tqdm(total=len(dataloader), leave=False,
                  ascii=True) as pbar, torch.enable_grad():
            net.train()

            padding = tta.Pad((13, 14, 13, 14))

            for images, masks_targets in dataloader:
                masks_targets = masks_targets.to(gpu)
                masks_predictions = padding.transform_backward(
                    net(padding.transform_forward(images))).contiguous()

                loss = criterion(masks_predictions, masks_targets)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                average_meter_train.add('loss', loss.item())
                self.update_pbar(torch.sigmoid(masks_predictions),
                                 masks_targets, pbar, average_meter_train,
                                 'Training epoch {}'.format(e))

        train_stats = {
            'train_' + k: v
            for k, v in average_meter_train.get_all().items()
        }
        return train_stats