def __init__(self): # ID and Name self.experiment_name = "unet_3D" self.id = 1 # System self.checkpointsBasePath = "./models/checkpoints" self.labelpath = "/local/SSD_DEEPLEARNING/brats/processed/data_3D_size_240_240_155_res_1.0_1.0_1.0.hdf5" self.datapath = "/local/SSD_DEEPLEARNING/brats/processed/data_3D_size_160_192_155_res_1.0_1.0_1.0.hdf5" # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x / 4) for x in self.channels] self.net = unet_3D( self.channels, n_classes=3, in_channels=4, interpolation=( 240, 240, 155)) #self.channels, 3, interpolation = (240,240,155)) # Data self.nn_augmentation = False self.soft_augmentation = False self.do_rotate = False self.rot_degrees = 20 self.do_scale = False self.scale_factor = False self.do_flip = False self.do_elastic_aug = False self.sigma = 10 self.do_intensity_shift = False self.max_intensity_shift = 0.1 # Training self.train_original_classes = False self.epoch = 1000 def loss(outputs, labels): return bratsUtils.bratsDiceLoss(outputs, labels, nonSquared=True) self.loss = loss self.batchsize = 2 # self.optimizer = optim.SGD(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo self.optimizer = optim.Adam(self.net.parameters(), lr=0.0001, weight_decay=1e-5) self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_e1000") self.lr_scheduler = get_scheduler(self.optimizer, "multistep")
def main(): # print(convert_byte(1000*1000*1000*3*4)) # print(convert_byte(1000*1000*1000*4*4)) # print(convert_byte(1000*1000*1000*5*4)) # exit(0) inchan = 1 chanscale = 2 chans = [i // chanscale for i in [64, 128, 256, 512, 1024]] outsize = 14 # interp = (512,512,198) mod = unet_3D(chans, n_classes=outsize, in_channels=inchan, interpolation=None) layers = get_mod_details(mod) fact = 0.1 # s = (80,80,32) # s = (112,112,48) s = (256, 256, 112) # x = torch.from_numpy(np.random.rand(1,1,int(round(512*fact)),int(round(512*fact)),int(round(198*fact)))).float() # y = torch.from_numpy(np.random.rand(1,outsize,int(round(512*fact)),int(round(512*fact)),int(round(198*fact)))).float() # argmax = torch.from_numpy(np.random.rand(1,outsize,int(round(512*fact)),int(round(512*fact)),int(round(198*fact)))).float() x = torch.from_numpy(np.random.rand(1, 1, s[0], s[1], s[2])).float() y = torch.from_numpy(np.random.rand(1, outsize, s[0], s[1], s[2])).float() argmax = torch.from_numpy(np.random.rand(1, outsize, s[0], s[1], s[2])).float() acts = get_activations_shapes_as_dict(layers, x) mod_m = model_memory(mod) lab_m = labels_mem(y) argm_m = validat_arg_memory(argmax) inp_m = labels_mem(x) cur_m = forward_memory_cosumption_with_peak(acts) - inp_m back_m = chans[0] * np.prod(s) * 4 * 2 - outsize * np.prod(s) * 4 * 3 # print(convert_byte(cur_m*2 + mod_m + lab_m)) # print(convert_byte(lab_m)) # print(convert_byte(cur_m),convert_byte(mod_m),convert_byte(lab_m)) # print(convert_byte(mod_m)) # print(convert_byte(mod_m+ labels_mem(x))) # print(convert_byte(mod_m+ labels_mem(x) + lab_m)) # print(convert_byte(mod_m+ labels_mem(x) + lab_m + (cur_m + lab_m))) print('model :', convert_byte(mod_m)) print('input :', convert_byte(mod_m + inp_m)) print('label :', convert_byte(mod_m + inp_m + lab_m)) print('forwa :', convert_byte(mod_m + inp_m + lab_m + cur_m)) print('backw :', convert_byte(mod_m + inp_m + lab_m + cur_m + back_m)) print( 'max :', convert_byte( max( mod_m + inp_m + lab_m + cur_m, mod_m + inp_m + lab_m + cur_m + back_m - 2 * chans[0] * np.prod(s) * 4)))
def __init__(self): # ID and Name self.id = 100 self.experiment_name = "multi_atlas_unet_016_e1000_CE_adam_wd6_da_id{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5" # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5" self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5" self.datapath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5" # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x) for x in self.channels] self.net = unet_3D(self.channels, n_classes=14, is_batchnorm=False, in_channels=1, interpolation = None)#1, self.channels, 12, interpolation = (512,512,198)) # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99)) self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) self.n_classes = 14 max_displacement = 5,5,5 deg = (0,5,10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.train_original_classes = False self.epoch = 1000 # def loss(outputs, labels): # return atlasUtils.atlasDiceLoss(outputs, labels, n_classe = self.n_classes) # self.loss = loss # self.loss = SoftDiceLoss(self.n_classes) self.loss = torch.nn.CrossEntropyLoss() self.hot = 0 self.batchsize = 1 # self.optimizer = optim.Ada(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo self.lr_rate = 5e-4 self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate, weight_decay=1e-6) # self.optimizer = optim.SGD(self.net.parameters(), # lr=self.lr_rate) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "multistep") self.lr_scheduler = get_scheduler(self.optimizer, "multistep", self.lr_rate) # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate) # Other self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland'] self.look_small = False
def __init__(self): # ID and Name self.experiment_name = "atlas_unet_3D" self.id = 5 # System self.checkpointsBasePath = "./models/checkpoints" self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5" self.datapath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5" # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x / 1) for x in self.channels] self.net = unet_3D( self.channels, n_classes=13, in_channels=1, interpolation=( 512, 512, 198)) #1, self.channels, 12, interpolation = (512,512,198)) # Data self.nn_augmentation = False self.soft_augmentation = False self.do_rotate = False self.rot_degrees = 20 self.do_scale = False self.scale_factor = False self.do_flip = False self.do_elastic_aug = False self.sigma = 10 self.do_intensity_shift = False self.max_intensity_shift = 0.1 # Training self.train_original_classes = False self.epoch = 1000 def loss(outputs, labels): return atlasUtils.atlasDiceLoss(outputs, labels, nonSquared=True) self.loss = loss self.batchsize = 1 # self.optimizer = optim.Ada(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo self.optimizer = optim.Adam(self.net.parameters(), lr=5e-4, weight_decay=1e-5) self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] self.lr_scheduler = get_scheduler(self.optimizer, "multistep") # Other self.classes_name = [ 'spleen', 'right kidney', 'left kidney', 'gallbladder', 'esophagus', 'liver', 'stomach', 'aorta', 'inferior vena cava', 'portal vein and splenic vein', 'pancreas', 'right adrenal gland', 'left adrenal gland' ] self.look_small = False
def __init__(self): # ID and Name self.id = 211 self.experiment_name = "tcia_unet_03_e1000_dice_sgd_wd6_da_f16_id{}".format( self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' self.labelpath = "/local/SSD_DEEPLEARNING/PANCREAS_MULTI_RES/160_160_64/" self.datapath = self.labelpath self.im_dim = (160, 160, 64) # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.n_classes = 2 self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x / 16) for x in self.channels] self.net = unet_3D( self.channels, n_classes=self.n_classes, is_batchnorm=False, in_channels=1, interpolation=None ) #1, self.channels, 12, interpolation = (512,512,198)) # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99)) self.n_parameters = count_parameters(self.net) print("N PARAMS : {}".format(self.n_parameters)) self.model_path = './checkpoints/models/unet_tcia_160_160_64_d3_f16.pth' self.load_model() self.split = 1 max_displacement = 5, 5, 5 deg = (0, 5, 10) scales = 0 self.transform = tio.Compose([ tio.RandomElasticDeformation(max_displacement=max_displacement), tio.RandomAffine(scales=scales, degrees=deg) ]) # Training self.train_original_classes = False self.epoch = 1000 # self.loss = torch.nn.CrossEntropyLoss() self.loss = SoftDiceLoss(self.n_classes) self.hot = 0 self.batchsize = 2 self.lr_rate = 1e-2 #5e-4 # self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate, weight_decay=1e-5) self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr_rate, momentum=0.9, nesterov=True, weight_decay=5e-4) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 self.lr_scheduler = get_scheduler(self.optimizer, "multistep", self.lr_rate) # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate) # Other self.classes_name = ['background', 'pancreas'] self.look_small = False
def __init__(self): # ID and Name self.id = 46 self.experiment_name = "atlas_unet_3D_016_id{}".format(self.id) self.debug = False # System self.checkpointsBasePath = "./checkpoints/" self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/' # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5" # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5" self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5" self.datapath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5" # GPU self.gpu = '1' os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu # Model self.channels = [64, 128, 256, 512, 1024] self.channels = [int(x) for x in self.channels] self.net = unet_3D( self.channels, n_classes=14, is_batchnorm=False, in_channels=1, interpolation=None ) #1, self.channels, 12, interpolation = (512,512,198)) # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99)) self.n_parameters = count_parameters(self.net) self.n_classes = 14 self.nn_augmentation = False self.soft_augmentation = False self.do_rotate = False self.rot_degrees = 20 self.do_scale = False self.scale_factor = False self.do_flip = False self.do_elastic_aug = False self.sigma = 10 self.do_intensity_shift = False self.max_intensity_shift = 0.1 # Training self.train_original_classes = False self.epoch = 300 def loss(outputs, labels): return atlasUtils.atlasDiceLoss(outputs, labels, n_classe=self.n_classes) self.loss = loss # self.loss = SoftDiceLoss(self.n_classes) self.hot = 1 self.batchsize = 1 # self.optimizer = optim.Ada(self.net.parameters(), # lr= 0.01, #to do # momentum=0.9, # nesterov=True, # weight_decay=1e-5) #todo # self.optimizer = optim.Adam(self.net.parameters(), lr = 5e-4, weight_decay=1e-5) self.lr_rate = 5e-3 self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr_rate) self.optimizer.zero_grad() self.validate_every_k_epochs = 1 # Scheduler list : [lambdarule_1] # self.lr_scheduler = get_scheduler(self.optimizer, "multistep") self.lr_scheduler = get_scheduler(self.optimizer, "multistep", self.lr_rate) # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate) # Other self.classes_name = [ 'background', 'spleen', 'right kidney', 'left kidney', 'gallbladder', 'esophagus', 'liver', 'stomach', 'aorta', 'inferior vena cava', 'portal vein and splenic vein', 'pancreas', 'right adrenal gland', 'left adrenal gland' ] self.look_small = False