def train_boundaries(args): model = UNet2d(in_channels=1, out_channels=2, initial_features=64, final_activation="Sigmoid") patch_shape = (512, 512) # use the first 5 images for validation train_loader = get_covid_if_loader(args.input, patch_shape, sample_range=(5, None), download=True, boundaries=True, batch_size=args.batch_size) val_loader = get_covid_if_loader(args.input, patch_shape, sample_range=(0, 5), boundaries=True, batch_size=args.batch_size) loss = torch_em.loss.DiceLoss() trainer = torch_em.default_segmentation_trainer( name="covid-if-boundary-model", model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, device=torch.device("cuda"), mixed_precision=True, log_image_interval=50) trainer.fit(iterations=args.n_iterations)
def train_contrastive(args): model = get_model(args.embed_dim) patch_shape = [1, 736, 688] # can train with larger batch sizes for scatter batch_size = 4 if args.impl == 'scatter' else 1 train_loader = get_loader(split='train', patch_shape=patch_shape, batch_size=batch_size, n_samples=2500) val_loader = get_loader(split='val', patch_shape=patch_shape, batch_size=1, n_samples=100) loss = torch_em.loss.ContrastiveLoss(delta_var=.75, delta_dist=2., impl=args.impl) name = "embedding_model2d_" + args.impl + "_d" + str(args.embed_dim) trainer = torch_em.default_segmentation_trainer(name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=5e-5, mixed_precision=True, log_image_interval=50) if args.from_checkpoint: trainer.fit(args.iterations, 'latest') else: trainer.fit(args.iterations)
def train_affinities(args): model = get_model() train_loader = get_loader(args.input, True, n_samples=1000) val_loader = get_loader(args.input, False, n_samples=100) loss = torch_em.loss.LossWrapper( loss=torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask()) name = "affinity_model" trainer = torch_em.default_segmentation_trainer( name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50, optimizer_kwargs={"weight_decay": 0.0005}) if args.from_checkpoint: trainer.fit(args.n_iterations, "latest") else: trainer.fit(args.n_iterations)
def train_affinties(args): model = get_model(len(OFFSETS)) patch_shape = [1, 736, 688] batch_size = 4 train_loader = get_loader(split='train', patch_shape=patch_shape, batch_size=batch_size, n_samples=2500) val_loader = get_loader(split='val', patch_shape=patch_shape, batch_size=1, n_samples=100) loss = torch_em.loss.LossWrapper( torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask()) name = "affinity_model2d" trainer = torch_em.default_segmentation_trainer(name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50) if args.from_checkpoint: trainer.fit(args.n_iterations, 'latest') else: trainer.fit(args.n_iterations)
def train_shallow2deep(args): name = "isbi2d" # check if we need to train the rfs for preparation rf_folder = os.path.join("checkpoints", name, "rfs") have_rfs = len(glob(os.path.join(rf_folder, "*.pkl"))) == args.n_rfs if not have_rfs: prepare_shallow2deep_isbi(args, rf_folder) assert os.path.exists(rf_folder) model = UNet2d(in_channels=1, out_channels=1, final_activation="Sigmoid") train_loader = get_isbi_loader(args, "train", rf_folder) val_loader = get_isbi_loader(args, "val", rf_folder) dice_loss = torch_em.loss.DiceLoss() trainer = torch_em.default_segmentation_trainer(name, model, train_loader, val_loader, loss=dice_loss, metric=dice_loss, learning_rate=1.0e-4, device=args.device, log_image_interval=50) trainer.fit(args.n_iterations)
def train_affinities(args): model = get_model() patch_shape = [32, 320, 320] train_loader = get_loader( args.input, train=True, patch_shape=patch_shape, n_samples=1000 ) val_loader = get_loader( args.input, train=False, patch_shape=patch_shape, n_samples=50 ) loss = LossWrapper(loss=DiceLoss(), transform=ApplyAndRemoveMask()) name = "affinity_model" trainer = torch_em.default_segmentation_trainer( name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50 ) if args.from_checkpoint: trainer.fit(args.n_iterations, "latest") else: trainer.fit(args.n_iterations)
def train_boundaries(args): n_out = 2 model = UNet2d(in_channels=1, out_channels=n_out, initial_features=64, final_activation="Sigmoid") patch_shape = (512, 512) train_loader = get_livecell_loader(args.input, patch_shape, "train", download=True, boundaries=True, batch_size=args.batch_size) val_loader = get_livecell_loader(args.input, patch_shape, "val", boundaries=True, batch_size=args.batch_size) loss = torch_em.loss.DiceLoss() trainer = torch_em.default_segmentation_trainer( name="livecell-boundary-model", model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, device=torch.device("cuda"), mixed_precision=True, log_image_interval=50) trainer.fit(iterations=args.n_iterations)
def train_boundaries(args): model = UNet2d(in_channels=1, out_channels=2, initial_features=64, final_activation="Sigmoid") patch_shape = (1, 256, 256) train_loader = get_dsb_loader(args.input, patch_shape, split="train", download=True, boundaries=True, batch_size=args.batch_size) val_loader = get_dsb_loader(args.input, patch_shape, split="test", boundaries=True, batch_size=args.batch_size) loss = torch_em.loss.DiceLoss() # the trainer object that handles the training details # the model checkpoints will be saved in "checkpoints/dsb-boundary-model" # the tensorboard logs will be saved in "logs/dsb-boundary-model" trainer = torch_em.default_segmentation_trainer( name="dsb-boundary-model", model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, device=torch.device('cuda'), mixed_precision=True, log_image_interval=50) trainer.fit(iterations=args.n_iterations)
def train_affinities(args): n_out = len(OFFSETS) + 1 model = UNet2d(in_channels=3, out_channels=n_out, initial_features=64, final_activation="Sigmoid") patch_shape = (1, 512, 512) train_loader = get_monuseg_loader( args.input, patch_shape, roi=slice(0, 27), download=True, offsets=OFFSETS, batch_size=args.batch_size ) val_loader = get_monuseg_loader( args.input, patch_shape, roi=slice(27, None), offsets=OFFSETS, batch_size=args.batch_size ) loss = torch_em.loss.LossWrapper( torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask() ) # the trainer object that handles the training details # the model checkpoints will be saved in "checkpoints/dsb-boundary-model" # the tensorboard logs will be saved in "logs/dsb-boundary-model" trainer = torch_em.default_segmentation_trainer( name="monuseg-affinity-model", model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, device=torch.device("cuda"), mixed_precision=True, log_image_interval=50 ) trainer.fit(iterations=args.n_iterations)
def train_model(): patch_shape = (512, 512) batch_size = 4 loader = get_loader(patch_shape, batch_size) model = ResizeUNet(in_channels=1, out_channels=1, depth=3, initial_features=16) name = "diff-output-shape" trainer = torch_em.default_segmentation_trainer(name, model, loader, loader, logger=None) iterations = 5000 trainer.fit(iterations)
def train_boundaries(input_path, n_iterations, device): model = get_model() batch_size = 1 # shape of input patches (blocks) used for training patch_shape = [1, 512, 512] normalization = partial( torch_em.transform.raw.normalize, minval=0, maxval=255 ) roi_train = np.s_[:28, :, :] train_loader = get_isbi_loader( input_path, download=True, boundaries=True, patch_shape=patch_shape, rois=roi_train, batch_size=batch_size, raw_transform=normalization, num_workers=8*batch_size, shuffle=True ) roi_val = np.s_[28:, :, :] val_loader = get_isbi_loader( input_path, boundaries=True, patch_shape=patch_shape, rois=roi_val, batch_size=batch_size, raw_transform=normalization, num_workers=8*batch_size, shuffle=True ) loss = torch_em.loss.DiceLoss() name = "boundary-model" trainer = torch_em.default_segmentation_trainer( name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50, device=device ) trainer.fit(n_iterations)
def train_affinities(input_path, n_iterations, device, use_diagonal_offsets): model = get_model(use_diagonal_offsets) offsets = get_offsets(use_diagonal_offsets) # shape of input patches (blocks) used for training patch_shape = [1, 512, 512] batch_size = 1 normalization = partial(torch_em.transform.raw.normalize, minval=0, maxval=255) roi_train = np.s_[:28, :, :] train_loader = get_isbi_loader(input_path, download=True, offsets=offsets, patch_shape=patch_shape, rois=roi_train, batch_size=batch_size, raw_transform=normalization, num_workers=8 * batch_size, shuffle=True) roi_val = np.s_[28:, :, :] val_loader = get_isbi_loader(input_path, download=False, offsets=offsets, patch_shape=patch_shape, rois=roi_val, batch_size=batch_size, raw_transform=normalization, num_workers=8 * batch_size, shuffle=True) loss = torch_em.loss.LossWrapper( torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask()) name = "affinity-model" if use_diagonal_offsets: name += "_diagonal_offsets" trainer = torch_em.default_segmentation_trainer(name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50, device=device) trainer.fit(n_iterations)
def train_affinities(args, samples): large_model = bool(args.large_model) model = get_model(large_model) # patch shapes: if large_model: # largest possible shape for A100 with mixed training and large model patch_shape = [32, 256, 256] else: # largest possible shape for 2080Ti with mixed training # patch_shape = [32, 320, 320] patch_shape = [32, 256, 256] splits = ["train", "val"] if args.train_on_val else ["train"] train_loader = get_loader(args.input, samples, splits, patch_shape, batch_size=args.batch_size, n_samples=1000) splits = ["val"] val_loader = get_loader(args.input, samples, splits, patch_shape, batch_size=args.batch_size, n_samples=100) loss = torch_em.loss.LossWrapper( loss=torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask()) tag = "large" if large_model else "default" if args.train_on_val: tag += "_train_on_val" name = f"affinity_model_{tag}_{'_'.join(samples)}" trainer = torch_em.default_segmentation_trainer(name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50) if args.from_checkpoint: trainer.fit(args.n_iterations, "latest") else: trainer.fit(args.n_iterations)
def train_embeddings(args, datasets): large_model = bool(args.large_model) model = get_model(large_model) # patch shapes: if large_model: # largest possible shape for A100 with mixed training and large model # patch_shape = [32, 320, 320] patch_shape = [32, 256, 256] else: # largest possible shape for 2080Ti with mixed training patch_shape = [24, 192, 192] train_sets = [f'{ds}_train' for ds in datasets] val_sets = [f'{ds}_val' for ds in datasets] if args.train_on_val: train_sets += val_sets train_loader = get_loader(datasets=train_sets, patch_shape=patch_shape, n_samples=1000) val_loader = get_loader(datasets=val_sets, patch_shape=patch_shape, n_samples=100) loss = torch_em.loss.ContrastiveLoss(delta_var=.75, delta_dist=2., impl='scatter') tag = 'large' if large_model else 'default' if args.train_on_val: tag += '_train_on_val' name = f"embedding_model_{tag}_{'_'.join(datasets)}" trainer = torch_em.default_segmentation_trainer(name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=5e-5, mixed_precision=True, log_image_interval=50) if args.from_checkpoint: trainer.fit(args.iterations, 'latest') else: trainer.fit(args.iterations)
def train_boundaries(args): model = UNet2d(in_channels=1, out_channels=8, initial_features=64) patch_shape = (1, 256, 256) train_loader = get_dsb_loader( args.input, patch_shape, split="train", download=True, batch_size=args.batch_size, label_dtype=torch.int64, label_transform=torch_em.transform.label_consecutive, num_workers=4, ) val_loader = get_dsb_loader( args.input, patch_shape, split="test", batch_size=args.batch_size, label_dtype=torch.int64, label_transform=torch_em.transform.label_consecutive, num_workers=4, ) delta_var = 0.75 delta_dist = 2.0 pmaps_threshold = 0.9 aux_loss = "dice" loss = spoco.SPOCOLoss(delta_var, delta_dist, aux_loss=aux_loss) metric = spoco.SPOCOMetric(delta_dist, pmaps_threshold=pmaps_threshold) trainer = torch_em.default_segmentation_trainer( name="dsb-spoco-model", model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=metric, learning_rate=1e-4, device=args.device, mixed_precision=True, log_image_interval=50, trainer_class=SPOCOTrainer, ) trainer.fit(iterations=args.n_iterations)
def train_model(): patch_shape = (512, 512) batch_size = 4 loader = get_covid_if_loader(DATA_FOLDER, patch_shape, batch_size=batch_size, download=True, binary=True) model = UNet2d(in_channels=1, out_channels=1, depth=3, initial_features=16) name = "diff-output-shape" trainer = torch_em.default_segmentation_trainer(name, model, loader, loader, logger=None) iterations = 5000 trainer.fit(iterations)
def train_pseudo_label(args): name = "cremi2d-pseudo-label-isbi" model = UNet2d(in_channels=1, out_channels=1, final_activation="Sigmoid") train_loader = get_pseudolabel_loader(args, "train", "isbi2d") val_loader = get_pseudolabel_loader(args, "val", "isbi2d") dice_loss = torch_em.loss.DiceLoss() trainer = torch_em.default_segmentation_trainer(name, model, train_loader, val_loader, loss=dice_loss, metric=dice_loss, learning_rate=1.0e-4, device=args.device, log_image_interval=50) trainer.fit(args.n_iterations)
def train_affinities(args): large_model = bool(args.large_model) model = get_model(large_model) # patch shapes: if large_model: # largest possible shape for A100 with mixed training and large model # patch_shape = [32, 320, 320] patch_shape = [32, 256, 256] else: patch_shape = [32, 360, 360] samples, prefix = normalize_samples(args.samples) train_loader = get_loader(args.input, samples, is_train=True, patch_shape=patch_shape, n_samples=1000) val_loader = get_loader(args.input, samples, is_train=False, patch_shape=patch_shape, n_samples=100) loss = LossWrapper(loss=DiceLoss(), transform=ApplyAndRemoveMask()) tag = "large" if large_model else "default" if prefix is not None: tag += f"_{prefix}" name = f"affinity_model_{tag}" trainer = torch_em.default_segmentation_trainer(name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50) if args.from_checkpoint: trainer.fit(args.n_iterations, "latest") else: trainer.fit(args.n_iterations)
def train_boundaries(args): model = get_model() train_loader = get_loader(args.input, True, n_samples=1000) val_loader = get_loader(args.input, False, n_samples=100) name = "boundary_model" trainer = torch_em.default_segmentation_trainer( name=name, model=model, train_loader=train_loader, val_loader=val_loader, learning_rate=1e-4, mixed_precision=True, log_image_interval=50, optimizer_kwargs={"weight_decay": 0.0005}) if args.from_checkpoint: trainer.fit(args.n_iterations, "latest") else: trainer.fit(args.n_iterations)
def train_affinities(args): model = get_model() patch_shape = [256, 256] train_loader = get_loader(args, "train", patch_shape=patch_shape) val_loader = get_loader(args, "val", patch_shape=patch_shape) loss = torch_em.loss.LossWrapper( torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask()) name = "affinity-model" trainer = torch_em.default_segmentation_trainer(name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, mixed_precision=True, log_image_interval=50, device=args.device) trainer.fit(args.n_iterations)
def train_affinties(input_path, use_diagonal_offsets): model = get_model(use_diagonal_offsets) # shape of input patches (blocks) used for training patch_shape = [4, 384, 384] train_loader = get_loader( input_path, patch_shape=patch_shape, roi=np.s_[:26, :, :], use_diagonal_offsets=use_diagonal_offsets ) val_loader = get_loader( input_path, patch_shape=patch_shape, roi=np.s_[26:, :, :], use_diagonal_offsets=use_diagonal_offsets ) loss = torch_em.loss.LossWrapper( torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask() ) name = 'affinity-model-3d' if use_diagonal_offsets: name += '_diagonal_offsets' trainer = torch_em.default_segmentation_trainer( name=name, model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=5e-5, mixed_precision=True, log_image_interval=50 ) trainer.fit(int(1e4))
def train_affinties(): model = get_model() # shape of input patches (blocks) used for training patch_shape = [128, 128, 128] train_loader = get_loader('train', patch_shape=patch_shape, n_samples=500) val_loader = get_loader('val', patch_shape=patch_shape, n_samples=50) loss = torch_em.loss.LossWrapper( torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask()) trainer = torch_em.default_segmentation_trainer(name='affinity-model', model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=5e-5, mixed_precision=True, log_image_interval=50) trainer.fit(int(5e4))
def train_affinities(args): n_out = len(OFFSETS) + 1 model = UNet2d(in_channels=1, out_channels=n_out, initial_features=64, final_activation="Sigmoid") patch_shape = (512, 512) # use the first 5 images for validation train_loader = get_covid_if_loader(args.input, patch_shape, sample_range=(5, None), download=True, offsets=OFFSETS, batch_size=args.batch_size) val_loader = get_covid_if_loader(args.input, patch_shape, sample_range=(0, 5), offsets=OFFSETS, batch_size=args.batch_size) loss = torch_em.loss.LossWrapper( torch_em.loss.DiceLoss(), transform=torch_em.loss.ApplyAndRemoveMask()) trainer = torch_em.default_segmentation_trainer( name="covid-if-affinity-model", model=model, train_loader=train_loader, val_loader=val_loader, loss=loss, metric=loss, learning_rate=1e-4, device=torch.device("cuda"), mixed_precision=True, log_image_interval=50) trainer.fit(iterations=args.n_iterations)