def train(self, net, samples, optimizer, e): alpha = 2 * max(0, ((50 - e) / 50)) criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha) transforms = generator.TransformationsGenerator([ random.RandomFlipLr(), random.RandomAffine( image_size=101, translation=lambda rs: (rs.randint(-10, 10), rs.randint(-10, 10)), scale=lambda rs: (rs.uniform(0.85, 1.15), 1), rotation=lambda rs: rs.randint(-5, 5), **utils.transformations_options ), transformations.Padding(((13, 14), (13, 14), (0, 0))) ]) transforms_image = generator.TransformationsGenerator([ random.RandomColorPerturbation(std=1) ]) dataset = datasets.ImageDataset(samples, settings.train, transforms, transforms_image=transforms_image) dataloader = DataLoader( dataset, num_workers=10, batch_size=16, shuffle=True ) average_meter_train = meters.AverageMeter() with tqdm(total=len(dataloader), leave=False) as pbar, torch.enable_grad(): net.train() for images, masks_targets in dataloader: masks_targets = masks_targets.to(gpu) masks_predictions, aux_pam, aux_cam = net(images) loss_pam = criterion(F.interpolate(aux_pam, size=128, mode='bilinear'), masks_targets) loss_cam = criterion(F.interpolate(aux_cam, size=128, mode='bilinear'), masks_targets) loss_segmentation = criterion(masks_predictions, masks_targets) loss = loss_segmentation + loss_pam + loss_cam loss.backward() optimizer.step() optimizer.zero_grad() average_meter_train.add('loss', loss.item()) self.update_pbar( torch.sigmoid(masks_predictions), masks_targets, pbar, average_meter_train, 'Training epoch {}'.format(e) ) train_stats = {'train_' + k: v for k, v in average_meter_train.get_all().items()} return train_stats
def train(self, net, samples, optimizer, e): alpha = 2 * max(0, ((100 - e) / 100)) criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha) transforms = generator.TransformationsGenerator([ random.RandomFlipLr(), random.RandomAffine(image_size=101, translation=lambda rs: (rs.randint(-20, 20), rs.randint(-20, 20)), scale=lambda rs: (rs.uniform(0.85, 1.15), 1), **utils.transformations_options), transformations.Padding(((13, 14), (13, 14), (0, 0))) ]) pseudo_dataset = datasets.SemiSupervisedImageDataset( samples_test, settings.test, transforms, size=len(samples_test), test_predictions=self.test_predictions, momentum=0.0) dataset = datasets.ImageDataset(samples, settings.train, transforms) weights = [len(pseudo_dataset) / len(dataset) * 2 ] * len(dataset) + [1] * len(pseudo_dataset) dataloader = DataLoader(ConcatDataset([dataset, pseudo_dataset]), num_workers=10, batch_size=16, sampler=WeightedRandomSampler( weights=weights, num_samples=3200)) average_meter_train = meters.AverageMeter() with tqdm(total=len(dataloader), leave=False) as pbar, torch.enable_grad(): net.train() for images, masks_targets in dataloader: masks_targets = masks_targets.to(gpu) masks_predictions = net(images) loss = criterion(masks_predictions, masks_targets) loss.backward() optimizer.step() optimizer.zero_grad() average_meter_train.add('loss', loss.item()) self.update_pbar(torch.sigmoid(masks_predictions), masks_targets, pbar, average_meter_train, 'Training epoch {}'.format(e)) train_stats = { 'train_' + k: v for k, v in average_meter_train.get_all().items() } return train_stats
def train(self, net, samples, optimizer, e): alpha = 2 * max(0, ((50 - e) / 50)) criterion = losses.ELULovaszFocalWithLogitsLoss(alpha, 2 - alpha) transforms = generator.TransformationsGenerator([ random.RandomFlipLr(), random.RandomAffine( image_size=101, translation=lambda rs: (rs.randint(-20, 20), rs.randint(-20, 20)), scale=lambda rs: (rs.uniform(0.85, 1.15), 1), **utils.transformations_options ), transformations.Padding(((13, 14), (13, 14), (0, 0))) ]) samples_aux = list(set(samples).intersection(set(utils.get_aux_samples()))) dataset_aux = datasets.ImageDataset(samples_aux, settings.train, transforms) transforms_mosaic = generator.TransformationsGenerator([ random.RandomCrop(128) ]) pairs_mosaic = utils.get_mosaic_pairs() samples_mosaic = utils.get_mosaic_samples() samples_mosaic = [sample for sample, pair in zip(samples_mosaic, pairs_mosaic) if pair[0] in samples and pair[1] in samples] dataset_mosaic = datasets.ImageDataset(samples_mosaic, './data/mosaic_pairs', transforms_mosaic) dataset_pseudo = datasets.SemiSupervisedImageDataset( samples_test, settings.test, transforms, size=len(samples_test), test_predictions=self.test_predictions, momentum=0.0 ) dataset = datasets.ImageDataset(samples, settings.train, transforms) weight_train = len(dataset_pseudo) / len(dataset) * 2 weight_mosaic = weight_train weight_aux = weight_train / 2 weights = [weight_train] * len(dataset) + [weight_mosaic] * len(dataset_mosaic) + [weight_aux] * len(dataset_aux) + [1] * len(dataset_pseudo) dataloader = DataLoader( ConcatDataset([dataset, dataset_aux, dataset_mosaic, dataset_pseudo]), num_workers=10, batch_size=16, sampler=WeightedRandomSampler(weights=weights, num_samples=3200) ) average_meter_train = meters.AverageMeter() with tqdm(total=len(dataloader), leave=False, ascii=True) as pbar, torch.enable_grad(): net.train() padding = tta.Pad((13, 14, 13, 14)) for images, masks_targets in dataloader: masks_targets = masks_targets.to(gpu) masks_targets = padding.transform_backward(masks_targets).contiguous() masks_predictions = padding.transform_backward(net(images)).contiguous() loss = criterion(masks_predictions, masks_targets) loss.backward() optimizer.step() optimizer.zero_grad() average_meter_train.add('loss', loss.item()) self.update_pbar( torch.sigmoid(masks_predictions), masks_targets, pbar, average_meter_train, 'Training epoch {}'.format(e) ) train_stats = {'train_' + k: v for k, v in average_meter_train.get_all().items()} return train_stats