def __init__(self): # ID and Name self.id = 109 self.experiment_name = "pancreas_2D_utrans_{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + self.experiment_name + '/' self.datapath = "/local/DEEPLEARNING/TCIA/" self.split = 0 # GPU self.gpu = '0' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x) for x in self.channels] self.net = UNetTransformer(filters=self.channels, n_classes=2, in_channels=1, n_heads=1, dim='2d', bn=True, up_mode='deconv') self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) self.model_path = './checkpoints/models/pancreas2D_utrans_kaiming_deconv.pth' self.load_model() self.n_classes = 2 # self.transform = tf.Compose([ # tf.RandomAffine(degrees = 10, # scale = (0.9,1.1), # translate = (0.1, 0.1)), # tf.ToTensor(), # ]) self.transform = True # Training self.start_epoch = 0 self.epoch = 25 self.loss = torch.nn.CrossEntropyLoss() self.batchsize = 3 self.lr_rate = 1e-4 self.final_lr_rate = 1e-5 self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr_rate) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 # self.lr_scheduler = get_scheduler(self.optimizer, "constant", self.lr_rate) # self.lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, 0.99) self.decay = (self.lr_rate / self.final_lr_rate - 1) / self.epoch self.lr_scheduler = get_scheduler(self.optimizer, "po", self.lr_rate, self.decay) # Other self.classes_name = ['background', 'pancreas']
def __init__(self): # ID and Name self.experiment_name = "unet_3D" self.id = 1 # System self.checkpointsBasePath = "./models/checkpoints" self.labelpath = "/local/SSD_DEEPLEARNING/brats/processed/data_3D_size_240_240_155_res_1.0_1.0_1.0.hdf5" self.datapath = "/local/SSD_DEEPLEARNING/brats/processed/data_3D_size_160_192_155_res_1.0_1.0_1.0.hdf5" # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x / 4) for x in self.channels] self.net = unet_3D( self.channels, n_classes=3, in_channels=4, interpolation=( 240, 240, 155)) #self.channels, 3, interpolation = (240,240,155)) # Data self.nn_augmentation = False self.soft_augmentation = False self.do_rotate = False self.rot_degrees = 20 self.do_scale = False self.scale_factor = False self.do_flip = False self.do_elastic_aug = False self.sigma = 10 self.do_intensity_shift = False self.max_intensity_shift = 0.1 # Training self.train_original_classes = False self.epoch = 1000 def loss(outputs, labels): return bratsUtils.bratsDiceLoss(outputs, labels, nonSquared=True) self.loss = loss self.batchsize = 2 # self.optimizer = optim.SGD(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo self.optimizer = optim.Adam(self.net.parameters(), lr=0.0001, weight_decay=1e-5) self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_e1000") self.lr_scheduler = get_scheduler(self.optimizer, "multistep")
def __init__(self): # ID and Name self.id = 206 self.experiment_name = "tcia_revunet_03_d3_e1000_CE_adam_wd0_da_f1_lr5_gr1_id{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' self.labelpath = "/local/SSD_DEEPLEARNING/PANCREAS_MULTI_RES/160_160_64/" self.datapath = self.labelpath self.im_dim = (160,160,64) # GPU self.gpu = '2' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x) for x in self.channels] self.n_classes = 2 self.n_groups = 1 self.net = RevUnet3D(1, self.channels, self.n_classes , depth = 3 ,interpolation = None, groups = self.n_groups)#(512,512,198)) # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99)) self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) self.model_path = './checkpoints/models/revunet_tcia_160_160_64_d3_gr1.pth' self.load_model() self.split = 1 max_displacement = 5,5,5 deg = (0,5,10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.train_original_classes = False self.start_epoch = 0 self.epoch = 1000 self.loss = torch.nn.CrossEntropyLoss() # self.loss = SoftDiceLoss(self.n_classes) self.hot = 0 self.batchsize = 2 self.lr_rate = 5e-5 #5e-4 # 1e-2 #5e-5 self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate, weight_decay=0) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "multistep") self.lr_scheduler = get_scheduler(self.optimizer, "constant", self.lr_rate) # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate) # Other self.classes_name = ['background','pancreas'] self.look_small = False
def __init__(self): # ID and Name self.id = "506b95" self.experiment_name = "ma_crosstr_v{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' self.labelpath = '/local/DEEPLEARNING/MULTI_ATLAS/MULTI_ATLAS/nnUNet_preprocessed/Task017_BCV/nnUNetData_plans_v2.1_stage1/' self.datapath = self.labelpath self.input_shape = [512,512,256] # self.filters = [16, 32, 64, 128] # self.filters = [64, 192, 448, 704] # self.filters = [16, 32, 64, 128, 256] self.filters = [32, 64, 128, 256, 512] d_model = self.filters[-1] # skip_idx = [1,3,5,6] # self.patch_size=(128,128,128) self.patch_size=(192,192,48) # n_layers=6 self.clip = False self.patched = True # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # torch.backends.cudnn.benchmark = False # Model number_of_cross_heads = 8 number_of_self_heads = 8 number_of_self_layer = 6 self.n_classes = 14 self.net = CrossPatch3DTr(filters=self.filters,patch_size=[1,1,1], d_model=d_model,n_classes=self.n_classes, n_cheads=1,n_sheads=number_of_self_heads, bn=True,up_mode='deconv', n_strans=number_of_self_layer, do_cross=True, enc_grad=False) self.net.inference_apply_nonlin = softmax_helper self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) # self.model_path = './checkpoints/models/deep_crosstr.pth' self.model_path = './checkpoints/models/506/modlast.pt' max_displacement = 5,5,5 deg = (0,5,10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.start_epoch = 1000 self.epoch = 2000 # self.loss = torch.nn.CrossEntropyLoss() self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {}) self.ds_scales = ((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25), (0.125,0.125,0.125)) ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = 4 # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.batchsize = 2 self.lr_rate = 1e-3 self.load_lr = False self.load_model() self.net.reinit_decoder() self.net.reinit_crostrans(dim=d_model, depth=1, heads=number_of_cross_heads, dim_head=1024, mlp_dim=1024, dropout = 0.1) self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-5, momentum=0.99, nesterov=True) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 # self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch) # Other self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland']
def __init__(self): # ID and Name self.experiment_name = "atlas_revunet_3D_016_with_tcia_conf" self.id = 40 self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5" # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5" self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5" self.datapath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5" # GPU self.gpu = '2' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x) for x in self.channels] self.net = RevUnet3D(1, self.channels, 14, depth = 1 ,interpolation = None)#(512,512,198)) # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99)) self.n_parameters = count_parameters(self.net) self.model_path = './checkpoints/models/atlas_80_80_32_d1.pth' self.load_model() self.n_classes = 14 self.nn_augmentation = False self.soft_augmentation = False self.do_rotate = False self.rot_degrees = 20 self.do_scale = False self.scale_factor = False self.do_flip = False self.do_elastic_aug = False self.sigma = 10 self.do_intensity_shift = False self.max_intensity_shift = 0.1 # Training self.train_original_classes = False self.epoch = 300 def loss(outputs, labels): return atlasUtils.atlasDiceLoss(outputs, labels, n_classe = self.n_classes) self.loss = loss self.hot = 1 # self.loss = SoftDiceLoss(self.n_classes) self.batchsize = 1 # self.optimizer = optim.Ada(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo # self.optimizer = optim.Adam(self.net.parameters(), lr = 5e-4, weight_decay=1e-5) self.lr_rate = 0.01#5e-3 # self.optimizer = optim.SGD(self.net.parameters(), # lr=self.lr_rate) self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr_rate, momentum=0.9, nesterov=True, weight_decay=5e-4) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "multistep") self.lr_scheduler = get_scheduler(self.optimizer, "multistep", self.lr_rate) # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate) # Other self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland'] self.look_small = False
def set_scheduler(self, opts, epoch=-1): self.scheduler_G = get_scheduler(self.optimizer_G, opts) self.scheduler_D = get_scheduler(self.optimizer_D, opts)
def __init__(self): # ID and Name self.id = 601 self.experiment_name = "ma_cotr_v{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' # self.labelpath = "/local/DEEPLEARNING/MULTI_ATLAS/multi_atlas//512_512_256/" self.labelpath = '/local/DEEPLEARNING/VP_multiorgan_v2/' self.datapath = self.labelpath self.input_shape = [512,512,256] # filters = [4, 8, 16, 32] # skip_idx = [1,3,5,6] # self.patch_size=(128,128,128) self.patch_size=(192,192,48) # n_layers=6 self.clip = True self.patched = True # GPU self.gpu = '0' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.n_classes = 8 self.net = ResTranUnet(norm_cfg='IN', activation_cfg='LeakyReLU', img_size=self.patch_size, num_classes=self.n_classes, weight_std=False, deep_supervision=True) self.net.inference_apply_nonlin = softmax_helper self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) self.model_path = './checkpoints/models/vp_cotr.pth' # self.model_path = './checkpoints/models/403/mod.pt' max_displacement = 5,5,5 deg = (0,5,10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.start_epoch = 0 self.epoch = 1000 # self.loss = torch.nn.CrossEntropyLoss() self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {}) self.ds_scales = ((1, 1, 1), (0.5, 0.5, 1), (0.25, 0.25, 0.5)) ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = 4 # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.batchsize = 2 self.lr_rate = 2e-2 # self.final_lr_rate = 1e-5 # self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate) self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-5, momentum=0.99) self.optimizer.zero_grad() self.validate_every_k_epochs = 10 # self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch) self.load_model() # Other self.classes_name = ["Background", "Liver","Gallbladder","Spleen","Left_Kidney","Right_Kidney","Pancreas","Stomach"]
def __init__(self): # ID and Name self.id = 130 self.experiment_name = "multi_atlas_revunet_03_d3_e1000_CE_adam_wd0_bs1_da_id{}".format( self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5" # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5" self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_160_160_64_res_0.3.hdf5" self.datapath = self.labelpath # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.start_epoch = 986 self.epoch = 2000 self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x) for x in self.channels] self.net = RevUnet3D(1, self.channels, 14, depth=3, interpolation=None) #(512,512,198)) # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99)) self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) # self.model_path = './checkpoints/models/revunet_atlas_160_160_64_d3.pth' self.model_path = './checkpoints/models/130/mod.pt' self.n_classes = 14 self.n_classes = 14 max_displacement = 5, 5, 5 deg = (0, 5, 10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.train_original_classes = False # def loss(outputs, labels): # return atlasUtils.atlasDiceLoss(outputs, labels, n_classe = self.n_classes) # self.loss = loss self.loss = torch.nn.CrossEntropyLoss() self.hot = 0 # self.loss = SoftDiceLoss(self.n_classes) self.batchsize = 1 # self.optimizer = optim.Ada(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo # self.optimizer = optim.Adam(self.net.parameters(), lr = 5e-4, weight_decay=1e-5) self.lr_rate = 5e-5 # self.optimizer = optim.SGD(self.net.parameters(), # lr=self.lr_rate) self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr_rate, weight_decay=0) # self.optimizer = optim.SGD(self.net.parameters(), # lr=self.lr_rate, # momentum=0.9, # nesterov=True, # weight_decay=5e-4) self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "multistep") self.lr_scheduler = get_scheduler(self.optimizer, "constant", self.lr_rate) # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate) # Other self.classes_name = [ 'background', 'spleen', 'right kidney', 'left kidney', 'gallbladder', 'esophagus', 'liver', 'stomach', 'aorta', 'inferior vena cava', 'portal vein and splenic vein', 'pancreas', 'right adrenal gland', 'left adrenal gland' ] self.look_small = False self.load_model() self.optimizer.zero_grad()
def __init__(self): # ID and Name self.experiment_name = "atlas_unet_3D" self.id = 5 # System self.checkpointsBasePath = "./models/checkpoints" self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5" self.datapath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5" # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x / 1) for x in self.channels] self.net = unet_3D( self.channels, n_classes=13, in_channels=1, interpolation=( 512, 512, 198)) #1, self.channels, 12, interpolation = (512,512,198)) # Data self.nn_augmentation = False self.soft_augmentation = False self.do_rotate = False self.rot_degrees = 20 self.do_scale = False self.scale_factor = False self.do_flip = False self.do_elastic_aug = False self.sigma = 10 self.do_intensity_shift = False self.max_intensity_shift = 0.1 # Training self.train_original_classes = False self.epoch = 1000 def loss(outputs, labels): return atlasUtils.atlasDiceLoss(outputs, labels, nonSquared=True) self.loss = loss self.batchsize = 1 # self.optimizer = optim.Ada(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo self.optimizer = optim.Adam(self.net.parameters(), lr=5e-4, weight_decay=1e-5) self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] self.lr_scheduler = get_scheduler(self.optimizer, "multistep") # Other self.classes_name = [ 'spleen', 'right kidney', 'left kidney', 'gallbladder', 'esophagus', 'liver', 'stomach', 'aorta', 'inferior vena cava', 'portal vein and splenic vein', 'pancreas', 'right adrenal gland', 'left adrenal gland' ] self.look_small = False
def __init__(self): # ID and Name self.id = -1 self.experiment_name = "ma_cotr_pred_v{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' self.labelpath = "/local/DEEPLEARNING/MULTI_ATLAS/multi_atlas//512_512_256/" self.datapath = self.labelpath self.input_shape = [512,512,256] # filters = [4, 8, 16, 32] # skip_idx = [1,3,5,6] self.patch_size=(128,128,128) # self.patch_size=(192,192,48) # n_layers=6 self.clip = True self.patched = True # GPU self.gpu = '0' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.n_classes = 14 self.net = ResTranUnet(norm_cfg='IN', activation_cfg='LeakyReLU', img_size=self.patch_size, num_classes=self.n_classes, weight_std=False, deep_supervision=False) self.net.inference_apply_nonlin = softmax_helper self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) # self.model_path = './checkpoints/models/cotr.pth' self.model_path = './checkpoints/models/400/mod.pt' max_displacement = 5,5,5 deg = (0,5,10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.start_epoch = 1000 self.epoch = 1000 self.loss = torch.nn.CrossEntropyLoss() self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {}) self.batchsize = 2 self.lr_rate = 2e-2 # self.final_lr_rate = 1e-5 # self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate) self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-5, momentum=0.99) self.optimizer.zero_grad() self.validate_every_k_epochs = 10 # self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch) self.load_model() # Other self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland']
def __init__(self): # ID and Name self.id = 301 self.experiment_name = "tcia_revunet_small_3D_016_split3_wd0_CE_adam_lr5_id{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' self.datapath ="/local/SSD_DEEPLEARNING/PANCREAS_MULTI_RES/80_80_32/" self.im_dim = (80,80,32) # GPU self.gpu = '2' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.n_classes = 2 self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x) for x in self.channels] self.net = RevUnet3D(1, self.channels, self.n_classes, depth = 2 ,interpolation = None)#(512,512,198)) # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99)) self.n_parameters = count_parameters(self.net) self.nn_augmentation = False self.soft_augmentation = False self.do_rotate = False self.rot_degrees = 20 self.do_scale = False self.scale_factor = False self.do_flip = False self.do_elastic_aug = False self.sigma = 10 self.do_intensity_shift = False self.max_intensity_shift = 0.1 self.split = 3 self.hot = 0 # Training self.train_original_classes = False self.epoch = 1000 # def loss(outputs, labels): # return atlasUtils.atlasDiceLoss(outputs, labels, nonSquared=True, n_classe = self.n_classes) # self.loss = loss self.loss = torch.nn.CrossEntropyLoss() # self.loss = SoftDiceLoss(self.n_classes) self.batchsize = 2 # self.optimizer = optim.Ada(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo # self.optimizer = optim.Adam(self.net.parameters(), lr = 5e-4, weight_decay=1e-5) self.lr_rate = 5e-5 # self.optimizer = optim.SGD(self.net.parameters(), # lr=self.lr_rate) self.optimizer = optim.Adam(self.net.parameters(), lr = 5e-4, weight_decay=0) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "multistep") # self.lr_scheduler = get_scheduler(self.optimizer, "multistep", self.lr_rate) self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate) # Other self.classes_name = ['background','pancreas']#,'right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland'] self.look_small = False
def __init__(self): # ID and Name self.id = 301 self.experiment_name = "pancreas_unetr_v{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' self.labelpath = "/local/DEEPLEARNING/PANCREAS_MULTI_RES/512_512_256/" self.datapath = self.labelpath self.input_shape = [512,512,256] filters = [64, 128, 256, 512] skip_idx = [3,6,9,12] patch_size=(16,16,16) self.data_patch = [128,128,128] n_layers=12 self.patched = True # GPU self.gpu = '0' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.n_classes = 2 self.net = UNETR(input_shape=self.data_patch,filters=filters,patch_size=patch_size, n_layers=n_layers, skip_idx=skip_idx) self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) max_displacement = 5,5,5 deg = (0,5,10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.start_epoch = 0 self.train_original_classes = False self.epoch = 25 # self.model_path = './checkpoints/models/unetr.pth' self.model_path = './checkpoints/models/unetr.pth' self.load_model() self.split = 1 self.loss = torch.nn.CrossEntropyLoss() self.hot = 0 self.batchsize = 1 self.lr_rate = 1e-4 self.final_lr_rate = 1e-5 self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch self.lr_scheduler = get_scheduler(self.optimizer, "po", self.lr_rate, self.decay) # Other self.classes_name = ['background','pancreas'] self.look_small = False
def set_scheduler(self, opts, epoch=-1): self.schedulers = [ get_scheduler(optimizer, opts, last_epoch=epoch) for optimizer in self.optimizers ]
def __init__(self): # ID and Name self.id = 700 self.experiment_name = "multi_atlas_iunet_r1_d3_e2000_CE_adam_wd0_bs1_da_lr5_grdef_id{}".format( self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5" # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5" self.labelpath = "/local/DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_208_res_1.hdf5" self.datapath = self.labelpath # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.archi = [2, 2, 2, 2] self.n_classes = 14 self.in_channels = 2 self.net = iUNet_3D(self.in_channels, self.n_classes, self.archi) self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) self.model_path = './checkpoints/models/iunet_atlas_512_512_208_ar2_ic2.pth' self.load_model() self.n_classes = 14 max_displacement = 5, 5, 5 deg = (0, 5, 10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.train_original_classes = False self.start_epoch = 0 self.epoch = 2000 self.loss = torch.nn.CrossEntropyLoss() self.hot = 0 self.batchsize = 1 self.lr_rate = 5e-5 self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr_rate, weight_decay=0) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 self.lr_scheduler = get_scheduler(self.optimizer, "constant", self.lr_rate) # Other self.classes_name = [ 'background', 'spleen', 'right kidney', 'left kidney', 'gallbladder', 'esophagus', 'liver', 'stomach', 'aorta', 'inferior vena cava', 'portal vein and splenic vein', 'pancreas', 'right adrenal gland', 'left adrenal gland' ] self.look_small = False
def __init__(self): # ID and Name self.id = 305 self.experiment_name = "ma_unetr_v{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' self.labelpath = "/local/DEEPLEARNING/MULTI_ATLAS/multi_atlas//512_512_256/" self.datapath = self.labelpath self.input_shape = [512, 512, 256] filters = [64, 128, 256, 512] skip_idx = [3, 6, 9, 12] self.patch_size = (192, 192, 48) n_layers = 12 # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.n_classes = 14 self.net = UNETR(input_shape=self.patch_size, n_classes=self.n_classes, filters=filters, patch_size=(16, 16, 16), n_layers=n_layers, skip_idx=skip_idx) self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) self.model_path = './checkpoints/models/ma_unetr.pth' # self.model_path = './checkpoints/models/300/mod.pth' max_displacement = 5, 5, 5 deg = (0, 5, 10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.start_epoch = 0 self.epoch = 1000 self.loss = torch.nn.CrossEntropyLoss() self.batchsize = 2 self.lr_rate = 2e-2 # self.final_lr_rate = 1e-5 # self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate) self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr_rate, weight_decay=3e-5, momentum=0.99) self.optimizer.zero_grad() self.validate_every_k_epochs = 10 # self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch) self.load_model() # Other self.classes_name = [ 'background', 'spleen', 'right kidney', 'left kidney', 'gallbladder', 'esophagus', 'liver', 'stomach', 'aorta', 'inferior vena cava', 'portal vein and splenic vein', 'pancreas', 'right adrenal gland', 'left adrenal gland' ]
def __init__(self, img_size=64, nz=100, lr_D=2e-4, lr_G=1e-4, beta1=0.5, beta2=0.999, use_gpu=True, loss_criterion="BCE", use_schedulerD=False, use_schedulerG=False, meterD=AverageValueMeter(), meterG=AverageValueMeter(), add_noise=True, use_label_smoothing=False, **kwargs): if "config" not in vars(self): self.config = edict() self.meterD = meterD self.meterG = meterG self.meterD2 = MovingAverageValueMeter(10) self.meterG2 = MovingAverageValueMeter(10) self.device = torch.device("cuda:0" if ( torch.cuda.is_available()) else "cpu") self.config.use_label_smoothing = use_label_smoothing if self.config.use_label_smoothing: self.real_label = kwargs["smooth_label"] else: self.real_label = 1 self.fake_label = 0 self.fake_real_label = 1 self.epochs_trained = 0 self.config.img_size = img_size self.config.nz = nz self.config.loss_criterion = loss_criterion self.config.add_noise = add_noise self.config.lr_D = float(lr_D) self.config.lr_G = float(lr_G) self.config.beta1 = beta1 self.config.beta2 = beta2 self.netG = self._get_netG() self.netD = self._get_netD() self.optimizerG = self._get_optimizerG() self.optimizerD = self._get_optimizerD() self.loss_criterion = get_loss_criterion(self.config.loss_criterion) # self.loss_criterion = WGAN_GP(self.netD, use_gp=False) # Schedulers configuration self.config.use_schedulerD = use_schedulerD if self.config.use_schedulerD: self.config.schedulerD_name = kwargs["schedulerD_name"] self.config.sD_c = kwargs["sD_c"] self.schedulerD = get_scheduler(self.config.schedulerD_name, self.optimizerD, **self.config.sD_c) self.config.use_schedulerG = use_schedulerG if self.config.use_schedulerG: self.config.schedulerG_name = kwargs["schedulerG_name"] self.config.sG_c = kwargs["sG_c"] self.schedulerG = get_scheduler(self.config.schedulerG_name, self.optimizerG, **self.config.sG_c) self._update_device()