Ejemplo n.º 1
0
target_res = 1
output_shape = 160
data_res = np.array([6, 1, 1
                     ])  # acquisition resolution of the data we want to mimick
thickness = np.array([4, 1,
                      1])  # slice thickess of the data we want tot mimick
downsample = True  # downsample to acquisition resolution before resampling to target resolution
blur_range = 1.5

# training parameters
wl2_epochs = 5
dice_epochs = 100
steps_per_epoch = 1000
include_background = True

training(labels_dir=path_label_map,
         model_dir=path_model_dir,
         path_generation_labels=generation_labels,
         path_segmentation_labels=segmentation_labels,
         target_res=target_res,
         output_shape=output_shape,
         path_generation_classes=generation_classes,
         data_res=data_res,
         thickness=thickness,
         downsample=downsample,
         blur_range=blur_range,
         wl2_epochs=wl2_epochs,
         dice_epochs=dice_epochs,
         steps_per_epoch=steps_per_epoch,
         include_background=include_background)
Ejemplo n.º 2
0
from SynthSeg.training import training

# path training label maps
path_training_label_maps = '../../data/training_label_maps'
# path of directory where to save the models during training
path_model_dir = '../../models/SynthSeg_training'

# set path to generation labels
generation_labels = '../../data/labels_classes_priors/generation_labels.npy'
# set path to segmentation labels (i.e. the ROI to segment and to compute the loss on)
segmentation_labels = '../../data/labels_classes_priors/segmentation_labels.npy'

# generation parameters
target_res = 1  # resolution of the output segmentation
output_shape = 160  # tune this to the size of your GPU

# training parameters
wl2_epochs = 5
dice_epochs = 150
steps_per_epoch = 1000

training(labels_dir=path_training_label_maps,
         model_dir=path_model_dir,
         path_generation_labels=generation_labels,
         path_segmentation_labels=segmentation_labels,
         target_res=target_res,
         output_shape=output_shape,
         wl2_epochs=wl2_epochs,
         dice_epochs=dice_epochs,
         steps_per_epoch=steps_per_epoch)
Ejemplo n.º 3
0
                    action='store_true',
                    dest="return_gradients")

# -------------------------------------------- UNet architecture parameters --------------------------------------------
parser.add_argument("--n_levels", type=int, dest="n_levels", default=5)
parser.add_argument("--conv_per_level",
                    type=int,
                    dest="nb_conv_per_level",
                    default=2)
parser.add_argument("--conv_size", type=int, dest="conv_size", default=3)
parser.add_argument("--unet_feat",
                    type=int,
                    dest="unet_feat_count",
                    default=24)
parser.add_argument("--feat_mult", type=int, dest="feat_multiplier", default=2)
parser.add_argument("--activation", type=str, dest="activation", default='elu')

# ------------------------------------------------- Training parameters ------------------------------------------------
parser.add_argument("--lr", type=float, dest="lr", default=1e-4)
parser.add_argument("--lr_decay", type=float, dest="lr_decay", default=0)
parser.add_argument("--wl2_epochs", type=int, dest="wl2_epochs", default=5)
parser.add_argument("--dice_epochs", type=int, dest="dice_epochs", default=300)
parser.add_argument("--steps_per_epoch",
                    type=int,
                    dest="steps_per_epoch",
                    default=1000)
parser.add_argument("--checkpoint", type=str, dest="checkpoint", default=None)

args = parser.parse_args()
training(**vars(args))
Ejemplo n.º 4
0
training(path_training_label_maps,
         path_model_dir,
         generation_labels=path_generation_labels,
         segmentation_labels=path_segmentation_labels,
         n_neutral_labels=n_neutral_labels,
         batchsize=batchsize,
         n_channels=n_channels,
         target_res=target_res,
         output_shape=output_shape,
         prior_distributions=prior_distributions,
         generation_classes=path_generation_classes,
         flipping=flipping,
         scaling_bounds=scaling_bounds,
         rotation_bounds=rotation_bounds,
         shearing_bounds=shearing_bounds,
         translation_bounds=translation_bounds,
         nonlin_std=nonlin_std,
         randomise_res=randomise_res,
         blur_range=blur_range,
         bias_field_std=bias_field_std,
         n_levels=n_levels,
         nb_conv_per_level=nb_conv_per_level,
         conv_size=conv_size,
         unet_feat_count=unet_feat_count,
         feat_multiplier=feat_multiplier,
         activation=activation,
         lr=lr,
         lr_decay=lr_decay,
         wl2_epochs=wl2_epochs,
         dice_epochs=dice_epochs,
         steps_per_epoch=steps_per_epoch)