def main(): args = get_arguments() utils.make_dirs(args.save) name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr() # TODO visual3D_temp.Basewriter package writer = SummaryWriter(log_dir='../runs/' + name_model, comment=name_model) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=11, skip_index_after=args.classes) if args.cuda: torch.cuda.manual_seed(seed) model = model.cuda() print("Model transferred in GPU.....") print("START TRAINING...") for epoch in range(1, args.nEpochs + 1): train_stats = train.train_dice(args, epoch, model, training_generator, optimizer, criterion) val_stats = train.test_dice(args, epoch, model, val_generator, criterion) #old utils.write_train_val_score(writer, epoch, train_stats, val_stats) model.save_checkpoint(args.save, epoch, val_stats[0], optimizer=optimizer)
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='/home/mulns/My_project/VV/MedicalZooPytorch/datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: # model=torch.nn.DataParallel(model) model = model.cuda() print("Model transferred in GPU.....") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training()
def get_viz_set(self, test_subject=0): """ Returns total 3d input volumes (t1 and t2 or more) and segmentation maps 3d total vol shape : torch.Size([1, 144, 192, 256]) """ TEST_SUBJECT = test_subject path_T1 = self.list_IDsT1[TEST_SUBJECT] path_T1ce = self.list_IDsT1ce[TEST_SUBJECT] path_T2 = self.list_IDsT2[TEST_SUBJECT] path_flair = self.list_IDsFlair[TEST_SUBJECT] label_path = self.labels[TEST_SUBJECT] segmentation_map = img_loader.load_medical_image(label_path, viz3d=True) img_t1_tensor = img_loader.load_medical_image(path_T1, type="T1", viz3d=True) img_t1ce_tensor = img_loader.load_medical_image(path_T1ce, type="T1ce", viz3d=True) img_t2_tensor = img_loader.load_medical_image(path_T2, type="T2", viz3d=True) img_flair_tensor = img_loader.load_medical_image(path_flair, type="FLAIR", viz3d=True) ### TO DO SAVE FULL VOLUME AS numpy if self.save: self.full_volume = [] segmentation_map = segmentation_map img_t1_tensor = self.find_reshaped_vol(img_t1_tensor) img_t1ce_tensor = self.find_reshaped_vol(img_t1ce_tensor) img_t2_tensor = self.find_reshaped_vol(img_t2_tensor) img_flair_tensor = self.find_reshaped_vol(img_flair_tensor) self.sub_vol_path = self.root + '/MICCAI_BraTS_2018_Data_Training/generated/visualize/' utils.make_dirs(self.sub_vol_path) for i in range(len(img_t1_tensor)): filename = self.sub_vol_path + 'id_' + str( TEST_SUBJECT) + '_VIZ_' + str(i) + '_' f_t1 = filename + 'T1.npy' f_t1ce = filename + 'T1CE.npy' f_t2 = filename + 'T2.npy' f_flair = filename + 'FLAIR.npy' f_seg = filename + 'seg.npy' np.save(f_t1, img_t1_tensor[i]) np.save(f_t1ce, img_t1ce_tensor[i]) np.save(f_t2, img_t2_tensor[i]) np.save(f_flair, img_flair_tensor[i]) np.save(f_seg, segmentation_map[i]) self.full_volume.append(tuple((f_t1, f_t2, f_seg))) print("Full validation volume has been generated") else: self.full_volume = tuple( (img_t1_tensor, img_t2_tensor, segmentation_map))
def main(): args = get_arguments() os.environ["CUDA_VISIBLE_DEVICES"] = "0" ## FOR REPRODUCIBILITY OF RESULTS seed = 1777777 utils.reproducibility(args, seed) utils.make_dirs(args.save) utils.save_arguments(args, args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = create_loss('CrossEntropyLoss') criterion = DiceLoss(classes=args.classes, weight=torch.tensor([0.1, 1, 1, 1]).cuda()) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets')
def main(): args = get_arguments() os.environ["CUDA_VISIBLE_DEVICES"] = "1" ## FOR REPRODUCIBILITY OF RESULTS seed = 1777777 torch.manual_seed(seed) if args.cuda: torch.cuda.manual_seed(seed) np.random.seed(seed) cudnn.deterministic = True # FOR FASTER GPU TRAINING WHEN INPUT SIZE DOESN'T VARY # cudnn.benchmark = True utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training()
def __init__(self, mode, sub_task='lung', split=0.2, fold=0, n_classes=2, samples=10, dataset_path='../datasets', crop_dim=(32, 32, 32)): print("MANDIBLE SEGMENTATION DATASET") self.CLASSES = n_classes self.fold = int(fold) self.crop_size = crop_dim self.full_vol_dim = (256, 256, 192) # width, height,slice self.mode = mode self.full_volume = None self.affine = None self.list = [] self.samples = samples subvol = '_vol_' + str(crop_dim[0]) + 'x' + str( crop_dim[1]) + 'x' + str(crop_dim[2]) self.sub_vol_path = dataset_path + '/MRBrainS18/subvol_generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) self.train_images, self.train_labels, self.val_labels, self.val_images = [], [], [], [] list_images = sorted( glob.glob(os.path.join(dataset_path, 'MRBrainS18/images/*'))) list_labels = sorted( glob.glob(os.path.join(dataset_path, 'MRBrainS18/labels/*'))) len_of_data = len(list_images) for i in range(len_of_data): if i >= (self.fold * int(split * len_of_data)) and i < ( (self.fold * int(split * len_of_data)) + int(split * len_of_data)): self.train_images.append(list_images[i]) self.train_labels.append(list_labels[i]) else: self.val_images.append(list_images[i]) self.val_labels.append(list_labels[i]) if (mode == 'train'): self.list_IDs = self.train_images self.list_labels = self.train_labels elif (mode == 'val'): self.list_IDs = self.val_images self.list_labels = self.val_labels self.list = create_sub_volumes(self.list_IDs, self.list_labels, dataset_name='covid19seg', mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path) print("{} SAMPLES = {}".format(mode, len(self.list)))
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training() visualize_3D_no_overlap_new(args, full_volume, affine, model, 10, args.dim)
def main(): args = get_arguments() seed = 1777777 torch.manual_seed(seed) if args.cuda: torch.cuda.manual_seed(seed) np.random.seed(seed) cudnn.deterministic = True cudnn.benchmark = True utils.make_dirs(args.save) name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr() # TODO visual3D_temp.Basewriter package writer = SummaryWriter(log_dir='../runs/' + name_model, comment=name_model) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") print("START TRAINING...") for epoch in range(1, args.nEpochs + 1): train(args, model, training_generator, optimizer, epoch, writer) val_metrics, confusion_matrix = validation(args, model, val_generator, epoch, writer)
def main(): args = get_arguments() os.environ["CUDA_VISIBLE_DEVICES"] = "0,2" ## FOR REPRODUCIBILITY OF RESULTS seed = 1777777 utils.reproducibility(args, seed) utils.make_dirs(args.save) name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr() # TODO visual3D_temp.Basewriter package writer = SummaryWriter(log_dir='../runs/' + name_model, comment=name_model) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") print("START TRAINING...") for epoch in range(1, args.nEpochs + 1): train(args, model, training_generator, optimizer, epoch, writer) val_metrics, confusion_matrix = validation(args, model, val_generator, epoch, writer)
def __init__(self, dataset_path='./data', voxels_space=(2, 2, 2), modalities=2, to_canonical=False, save=True): """ :param dataset_path: the extracted path that contains the desired images :param voxels_space: for reshampling the voxel space :param modalities: 1 for T1 only, 2 for T1 and T2 :param to_canonical: If you want to convert the coordinates to RAS for more info on this advice here https://www.slicer.org/wiki/Coordinate_systems :param save: to save the generated data offline for faster reading and not load RAM """ self.root = str(dataset_path) self.modalities = modalities self.pathT1 = self.root + '/ixi/T1/' self.pathT2 = self.root + '/ixi/T2/' self.save = save self.CLASSES = 4 self.full_vol_dim = (150, 256, 256) # slice, width, height self.voxels_space = voxels_space self.modalities = str(modalities) self.list = [] self.full_volume = None self.to_canonical = to_canonical self.affine = None subvol = '_vol_' + str(self.voxels_space[0]) + 'x' + str(self.voxels_space[1]) + 'x' + str( self.voxels_space[2]) if self.save: self.sub_vol_path = self.root + '/ixi/generated/' + subvol + '/' utils.make_dirs(self.sub_vol_path) print(self.pathT1) self.list_IDsT1 = sorted(glob.glob(os.path.join(self.pathT1, '*T1.nii.gz'))) self.list_IDsT2 = sorted(glob.glob(os.path.join(self.pathT2, '*T2.nii.gz'))) self.affine = img_loader.load_affine_matrix(self.list_IDsT1[0]) self.create_input_data()
def __init__(self, args, mode, dataset_path='../datasets', classes=4, dim=(32, 32, 32), split_id=0, samples=1000, load=False): self.mode = mode self.root = dataset_path self.classes = classes dataset_name = "mrbrains" + str(classes) self.training_path = self.root + '/mrbrains_2018/training' self.dirs = os.listdir(self.training_path) self.samples = samples self.list = [] self.full_vol_size = (240, 240, 48) self.threshold = 0.1 self.crop_dim = dim self.list_flair = [] self.list_ir = [] self.list_reg_ir = [] self.list_reg_t1 = [] self.labels = [] self.full_volume = None self.save_name = self.root + '/mrbrains_2018/training/mrbrains_2018-classes-' + str( classes) + '-list-' + mode + '-samples-' + str( samples) + '.txt' if load: ## load pre-generated data self.list = utils.load_list(self.save_name) return subvol = '_vol_' + str(dim[0]) + 'x' + str(dim[1]) + 'x' + str(dim[2]) self.sub_vol_path = self.root + '/mrbrains_2018/generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) list_reg_t1 = sorted(glob.glob(os.path.join(self.training_path, '*/pr*/*g_T1.nii.gz'))) list_reg_ir = sorted(glob.glob(os.path.join(self.training_path, '*/pr*/*g_IR.nii.gz'))) list_flair = sorted(glob.glob(os.path.join(self.training_path, '*/pr*/*AIR.nii.gz'))) labels = sorted(glob.glob(os.path.join(self.training_path, '*/*egm.nii.gz'))) self.affine = img_loader.load_affine_matrix(list_reg_t1[0]) split_id = int(split_id) if mode == 'val': labels = [labels[split_id]] list_reg_t1 = [list_reg_t1[split_id]] list_reg_ir = [list_reg_ir[split_id]] list_flair = [list_flair[split_id]] self.full_volume = get_viz_set(list_reg_t1, list_reg_ir, list_flair, labels, dataset_name=dataset_name) else: labels.pop(split_id) list_reg_t1.pop(split_id) list_reg_ir.pop(split_id) list_flair.pop(split_id) self.list = create_sub_volumes(list_reg_t1, list_reg_ir, list_flair, labels, dataset_name=dataset_name, mode=mode, samples=samples, full_vol_dim=self.full_vol_size, crop_size=self.crop_dim, sub_vol_path=self.sub_vol_path, th_percent=self.threshold) utils.save_list(self.save_name, self.list)
def __init__(self, mode, dataset_path='.././datasets', split_idx=150, crop_dim=(512, 512), samples=100, classes=7, save=True): """ :param mode: 'train','val' :param image_paths: image dataset paths :param label_paths: label dataset paths :param crop_dim: 2 element tuple to decide crop values :param samples: number of sub-grids to create(patches of the input img) """ image_paths = sorted( glob.glob( dataset_path + "/MICCAI_2019_pathology_challenge/Train Imgs/Train Imgs/*.jpg") ) label_paths = sorted( glob.glob(dataset_path + "/MICCAI_2019_pathology_challenge/Labels/*.png")) # TODO problem with random shuffle in val and train # TODo cope with 3d affine and full volume for better generalization image_paths, label_paths = utils.shuffle_lists(image_paths, label_paths) self.full_volume = None self.affine = None self.slices = 244 # dataset instances self.mode = mode self.crop_dim = crop_dim self.sample_list = [] self.samples = samples self.save = save self.root = dataset_path self.per_image_sample = int(self.samples / self.slices) if self.per_image_sample < 1: self.per_image_sample = 1 print("per image sampleeeeee", self.per_image_sample) sub_grid = '_2dgrid_' + str(crop_dim[0]) + 'x' + str(crop_dim[1]) if self.save: self.sub_vol_path = self.root + '/MICCAI_2019_pathology_challenge/generated/' + mode + sub_grid + '/' utils.make_dirs(self.sub_vol_path) if self.mode == 'train': self.list_imgs = image_paths[0:split_idx] self.list_labels = label_paths[0:split_idx] elif self.mode == 'val': self.list_imgs = image_paths[split_idx:] self.list_labels = label_paths[split_idx:] self.generate_samples()
def __init__(self, mode, dataset_path='../datasets', classes=4, dim=(32, 32, 32), fold_id=0, samples=1000, save=True): self.mode = mode self.root = dataset_path self.classes = classes self.training_path = self.root + '/mrbrains_2018/training' self.dirs = os.listdir(self.training_path) self.samples = samples self.save = save self.list = [] self.full_vol_size = (240, 240, 48) self.crop_dim = dim self.fold = str(fold_id) self.list_flair = [] self.list_ir = [] self.list_reg_ir = [] self.list_reg_t1 = [] self.labels = [] self.full_volume = None if self.save: subvol = '_vol_' + str(dim[0]) + 'x' + str(dim[1]) + 'x' + str( dim[2]) self.sub_vol_path = self.root + '/mrbrains_2018/generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) self.list_reg_t1 = sorted( glob.glob(os.path.join(self.training_path, '*/pr*/*g_T1.nii.gz'))) self.list_reg_ir = sorted( glob.glob(os.path.join(self.training_path, '*/pr*/*g_IR.nii.gz'))) self.list_flair = sorted( glob.glob(os.path.join(self.training_path, '*/pr*/*AIR.nii.gz'))) self.labels = sorted( glob.glob(os.path.join(self.training_path, '*/*egm.nii.gz'))) self.affine = img_loader.load_affine_matrix(self.list_reg_t1[0]) fold_id = int(fold_id) if mode == 'val': self.labels = [self.labels[fold_id]] self.list_reg_t1 = [self.list_reg_t1[fold_id]] self.list_reg_ir = [self.list_reg_ir[fold_id]] self.list_flair = [self.list_flair[fold_id]] self.get_viz_set() else: self.labels.pop(fold_id) self.list_reg_t1.pop(fold_id) self.list_reg_ir.pop(fold_id) self.list_flair.pop(fold_id) self.get_samples()
def get_viz_set(self, test_subject=0): """ Returns total 3d input volumes (t1 and t2 or more) and segmentation maps 3d total vol shape : torch.Size([1, 144, 192, 256]) """ TEST_SUBJECT = test_subject path_T1 = self.list_IDsT1[TEST_SUBJECT] path_T2 = self.list_IDsT2[TEST_SUBJECT] label_path = self.labels[TEST_SUBJECT] segmentation_map = img_loader.load_medical_image(label_path, viz3d=True) img_t1_tensor = img_loader.load_medical_image(path_T1, type="T1", viz3d=True) img_t2_tensor = img_loader.load_medical_image(path_T2, type="T2", viz3d=True) segmentation_map = self.fix_seg_map(segmentation_map) ### TO DO SAVE FULL VOLUME AS numpy if self.save: self.full_volume = [] segmentation_map = segmentation_map.reshape( -1, self.crop_size[0], self.crop_size[1], self.crop_size[2]) img_t1_tensor = img_t1_tensor.reshape(-1, self.crop_size[0], self.crop_size[1], self.crop_size[2]) img_t2_tensor = img_t1_tensor.reshape(-1, self.crop_size[0], self.crop_size[1], self.crop_size[2]) self.sub_vol_path = self.root + '/iseg_2017/generated/visualize/' utils.make_dirs(self.sub_vol_path) for i in range(len(img_t1_tensor)): filename = self.sub_vol_path + 'id_' + str( TEST_SUBJECT) + '_VIZ_' + str(i) + '_' f_t1 = filename + 'T1.npy' f_t2 = filename + 'T2.npy' f_seg = filename + 'seg.npy' np.save(f_t1, img_t1_tensor[i]) np.save(f_t2, img_t2_tensor[i]) np.save(f_seg, segmentation_map[i]) self.full_volume.append(tuple((f_t1, f_t2, f_seg))) print("Full validation volume has been generated") else: self.full_volume = tuple( (img_t1_tensor, img_t2_tensor, segmentation_map))
def __init__(self, args): name_model = args.log_dir + args.model + "_" + args.dataset_name + "_" + utils.datestr( ) self.writer = SummaryWriter(log_dir=args.log_dir + name_model, comment=name_model) utils.make_dirs(args.save) self.csv_train, self.csv_val = self.create_stats_files(args.save) self.dataset_name = args.dataset_name self.classes = args.classes self.label_names = dict_class_names[args.dataset_name] self.data = self.create_data_structure()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: model = model.cuda() trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator) trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) # ,skip_index_after=2,weight=torch.tensor([0.00001,1,1,1]).cuda()) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator) print("START TRAINING...") trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='./datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) # print("training_generator shape:", training_generator.dim()) # print("val_generator shape:", val_generator.dim()) if args.cuda: model = model.cuda() print("start training...") # torch.save(training_generator, "training_generator.tch") # torch.save(val_generator, "val_generator.tch") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator) trainer.training()
def main(): args = get_arguments() utils.make_dirs(args.save) train_f, val_f = utils.create_stats_files(args.save) name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr() writer = SummaryWriter(log_dir='../runs/' + name_model, comment=name_model) best_prec1 = 100. training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: torch.cuda.manual_seed(seed) model = model.cuda() print("Model transferred in GPU.....") print("START TRAINING...") for epoch in range(1, args.nEpochs + 1): train_stats = train.train_dice(args, epoch, model, training_generator, optimizer, criterion, train_f, writer) val_stats = train.test_dice(args, epoch, model, val_generator, criterion, val_f, writer) utils.write_train_val_score(writer, epoch, train_stats, val_stats) model.save_checkpoint(args.save, epoch, val_stats[0], optimizer=optimizer) # if epoch % 5 == 0: # utils.visualize_no_overlap(args, full_volume, affine, model, epoch, DIM, writer) #utils.save_model(model, args, val_stats[0], epoch, best_prec1) train_f.close() val_f.close()
def main(): args = get_arguments() utils.make_dirs(args.save) train_f, val_f = utils.create_stats_files(args.save) name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr() writer = SummaryWriter(log_dir='../runs/' + name_model, comment=name_model) best_pred = 1.01 samples_train = 200 samples_val = 200 training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets', samples_train=samples_train, samples_val=samples_val) model, optimizer = medzoo.create_model(args) criterion = medzoo.DiceLoss2D(args.classes) if args.cuda: torch.cuda.manual_seed(seed) model = model.cuda() for epoch in range(1, args.nEpochs + 1): train_stats = train.train_dice(args, epoch, model, training_generator, optimizer, criterion, train_f, writer) val_stats = train.test_dice(args, epoch, model, val_generator, criterion, val_f, writer) utils.write_train_val_score(writer, epoch, train_stats, val_stats) best_pred = utils.save_model(model=model, args=args, dice_loss=val_stats[0], epoch=epoch, best_pred_loss=best_pred) train_f.close() val_f.close()
def main(): args = get_arguments() if args.distributed: torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled." #1 torch.backends.cudnn.benchmark = True utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=11, skip_index_after=args.classes) if args.sync_bn: model = apex.parallel.convert_syncbn_model(model) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") if args.distributed: model = DDP(model, delay_allreduce=True) trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training()
train_loader, test_loader, logs, trial=trial, **kwargs) acc = 100 - logs[0]["epoch_log"][config.trainer.epochs]["test_accuracy"] return acc # ## Cteate study object # In[ ]: utils.make_dirs(args.optuna_dir) sampler = optuna.samplers.RandomSampler() pruner = optuna.pruners.SuccessiveHalvingPruner(min_resource=1, reduction_factor=2, min_early_stopping_rate=0) db_path = os.path.join(args.optuna_dir, "optuna.db") study = optuna.create_study(storage=f"sqlite:///{db_path}", study_name='experiment01', sampler=sampler, pruner=pruner, direction="minimize", load_if_exists=True) # ## Start optimization
def __init__(self, mode, dataset_path='./datasets', crop_dim=(32, 32, 32), split_id=1, samples=1000, load=False): """ :param mode: 'train','val','test' :param dataset_path: root dataset folder :param crop_dim: subvolume tuple :param fold_id: 1 to 10 values :param samples: number of sub-volumes that you want to create """ self.mode = mode self.root = str(dataset_path) self.training_path = self.root + '/iseg_2017/iSeg-2017-Training/' self.testing_path = self.root + '/iseg_2017/iSeg-2017-Testing/' self.CLASSES = 4 self.full_vol_dim = (144, 192, 256) # slice, width, height self.crop_size = crop_dim self.list = [] self.samples = samples self.full_volume = None self.save_name = self.root + '/iseg_2017/iSeg-2017-Training/iseg2017-list-' + mode + '-samples-' + str( samples) + '.txt' if load: self.list = utils.load_list(self.save_name) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*T1.img'))) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) return subvol = '_vol_' + str(crop_dim[0]) + 'x' + str( crop_dim[1]) + 'x' + str(crop_dim[2]) self.sub_vol_path = self.root + '/iseg_2017/generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*T1.img'))) list_IDsT2 = sorted( glob.glob(os.path.join(self.training_path, '*T2.img'))) labels = sorted( glob.glob(os.path.join(self.training_path, '*label.img'))) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) if self.mode == 'train': list_IDsT1 = list_IDsT1[:split_id] list_IDsT2 = list_IDsT2[:split_id] labels = labels[:split_id] self.list = create_sub_volumes(list_IDsT1, list_IDsT2, labels, dataset_name="iseg2017", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path, threshold=10) elif self.mode == 'val': list_IDsT1 = list_IDsT1[split_id:] list_IDsT2 = list_IDsT2[:split_id:] labels = labels[split_id:] self.list = create_sub_volumes(list_IDsT1, list_IDsT2, labels, dataset_name="iseg2017", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path, threshold=10) self.full_volume = get_viz_set(list_IDsT1, list_IDsT2, labels) elif self.mode == 'test': self.list_IDsT1 = sorted( glob.glob(os.path.join(self.testing_path, '*T1.img'))) self.list_IDsT2 = sorted( glob.glob(os.path.join(self.testing_path, '*T2.img'))) self.labels = None utils.save_list(self.save_name, self.list)
def __init__(self, args, mode, dataset_path='./datasets', classes=5, crop_dim=(32, 32, 32), split_idx=10, samples=10, load=False): """ :param mode: 'train','val','test' :param dataset_path: root dataset folder :param crop_dim: subvolume tuple :param split_idx: 1 to 10 values :param samples: number of sub-volumes that you want to create """ self.mode = mode self.root = str(dataset_path) self.training_path = self.root + '/MICCAI_BraTS_2018_Data_Training/' self.testing_path = self.root + ' ' self.CLASSES = 4 self.full_vol_dim = (240, 240, 155) # slice, width, height self.crop_size = crop_dim self.threshold = args.threshold self.normalization = args.normalization self.augmentation = args.augmentation self.list = [] self.samples = samples self.full_volume = None self.classes = classes self.save_name = self.root + '/MICCAI_BraTS_2018_Data_Training/brats2018-list-' + mode + '-samples-' + str( samples) + '.txt' if self.augmentation: self.transform = augment3D.RandomChoice(transforms=[ augment3D.GaussianNoise(mean=0, std=0.01), augment3D.RandomFlip(), augment3D.ElasticTransform() ], p=0.5) if load: ## load pre-generated data list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t1.nii.gz'))) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) self.list = utils.load_list(self.save_name) return subvol = '_vol_' + str(crop_dim[0]) + 'x' + str( crop_dim[1]) + 'x' + str(crop_dim[2]) self.sub_vol_path = self.root + '/MICCAI_BraTS_2018_Data_Training/generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t1.nii.gz'))) list_IDsT1ce = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t1ce.nii.gz'))) list_IDsT2 = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t2.nii.gz'))) list_IDsFlair = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*_flair.nii.gz'))) labels = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*_seg.nii.gz'))) # print(len(list_IDsT1),len(list_IDsT2),len(list_IDsFlair),len(labels)) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) if self.mode == 'train': list_IDsT1 = list_IDsT1[:split_idx] list_IDsT1ce = list_IDsT1ce[:split_idx] list_IDsT2 = list_IDsT2[:split_idx] list_IDsFlair = list_IDsFlair[:split_idx] labels = labels[:split_idx] self.list = create_sub_volumes(list_IDsT1, list_IDsT1ce, list_IDsT2, list_IDsFlair, labels, dataset_name="brats2018", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path, normalization=self.normalization, th_percent=self.threshold) elif self.mode == 'val': list_IDsT1 = list_IDsT1[split_idx:] list_IDsT1ce = list_IDsT1ce[split_idx:] list_IDsT2 = list_IDsT2[split_idx:] list_IDsFlair = list_IDsFlair[split_idx:] labels = labels[split_idx:] self.list = create_sub_volumes(list_IDsT1, list_IDsT1ce, list_IDsT2, list_IDsFlair, labels, dataset_name="brats2018", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path, normalization=self.normalization, th_percent=self.threshold) elif self.mode == 'test': self.list_IDsT1 = sorted( glob.glob(os.path.join(self.testing_path, '*GG/*/*t1.nii.gz'))) self.list_IDsT1ce = sorted( glob.glob(os.path.join(self.testing_path, '*GG/*/*t1ce.nii.gz'))) self.list_IDsT2 = sorted( glob.glob(os.path.join(self.testing_path, '*GG/*/*t2.nii.gz'))) self.list_IDsFlair = sorted( glob.glob( os.path.join(self.testing_path, '*GG/*/*_flair.nii.gz'))) self.labels = None utils.save_list(self.save_name, self.list)
def test_net(args, dataset, model, kernel_dim=(32, 32, 32)): # import ipdb;ipdb.set_trace() num_imgs = len(dataset) public_id_list_all = [] label_id_list_all = [] confidence_list_all = [] label_code_list_all = [] utils.make_dirs(args.nii_save_path) for i in range(num_imgs): print('%d/%d is tested'%(i,num_imgs)) x, target, img_path, img_affine, img_hdr = dataset.__getitem__(i) if args.cuda: x = x.unsqueeze(0).cuda() # target = target.cuda() # import ipdb;ipdb.set_trace() # x = full_volume[:-1,...].detach() # # target = full_volume[-1,...].unsqueeze(0).detach() # target = full_volume[-1,...].detach() # # import ipdb;ipdb.set_trace() h,w,d = target.shape # target_new = torch.zeros(6,h,w,d) # target_new[0][target==0] = 1 # target_new[1][target==1] = 1 # target_new[2][target==2] = 1 # target_new[3][target==3] = 1 # target_new[4][target==4] = 1 # target_new[5][target==5] = 1 # target = target_new modalities, D, H, W = x.shape kernel_dim=(512,512,48) kc, kh, kw = kernel_dim dc, dh, dw = kernel_dim # stride # Pad to multiples of kernel_dim a = ((roundup(W, kw) - W) // 2 + W % 2, (roundup(W, kw) - W) // 2, (roundup(H, kh) - H) // 2 + H % 2, (roundup(H, kh) - H) // 2, (roundup(D, kc) - D) // 2 + D % 2, (roundup(D, kc) - D) // 2) x = F.pad(x, a) assert x.size(3) % kw == 0 assert x.size(2) % kh == 0 assert x.size(1) % kc == 0 patches = x.unfold(1, kc, dc).unfold(2, kh, dh).unfold(3, kw, dw) unfold_shape = list(patches.size()) patches = patches.contiguous().view(-1, modalities, kc, kh, kw) ## TODO torch stack # with torch.no_grad(): # output = model.inference(patches) number_of_volumes = patches.shape[0] predictions = [] for i in range(number_of_volumes): input_tensor = patches[i, ...].unsqueeze(0) predictions.append(model.inference(input_tensor)) output = torch.stack(predictions, dim=0).squeeze(1).detach() N, Classes, _, _, _ = output.shape output_unfold_shape = unfold_shape[1:] output_unfold_shape.insert(0, Classes) output = output.view(output_unfold_shape) output_c = output_unfold_shape[1] * output_unfold_shape[4] output_h = output_unfold_shape[2] * output_unfold_shape[5] output_w = output_unfold_shape[3] * output_unfold_shape[6] output = output.permute(0, 1, 4, 2, 5, 3, 6).contiguous() output = output.view(-1, output_c, output_h, output_w) y = output[:, a[4]:output_c - a[5], a[2]:output_h - a[3], a[0]:output_w - a[1]] # x = x.unsqueeze(0) # y = model(x).squeeze(0).cpu().detach() # _, h,w,d = y.shape y = torch.nn.Softmax(dim=0)(y) y[y<0.5] = 0 # y = torch.nn.Sigmoid()(y) index_ = np.where(y!=0) # import ipdb;ipdb.set_trace() # confidence_list = y[index_] # y_new = torch.zeros(h,w,d) # y[index_] = torch.from_numpy(index_[0]).float() pred = torch.zeros(h,w,d) pred_out = torch.zeros(h,w,d) # import ipdb;ipdb.set_trace() # pred_out = morphology.erosion(pred_out) conf = torch.zeros(h,w,d) # import ipdb;ipdb.set_trace() pred[(index_[1], index_[2], index_[3])] = torch.from_numpy(index_[0]).float() conf[(index_[1], index_[2], index_[3])] = y[index_] # pred= segmentation.clear_border(pred) # import ipdb;ipdb.set_trace() # pred = morphology.erosion(pred) # for i in range(1, 6): # index_i= np.where(pred==i) # if index_i[0].size == 0: # continue # fea_1 = np.vstack((index_i[0], index_i[1], index_i[2])).T # label_pred = DBSCAN(eps=5).fit_predict(fea_1) # label_counter = Counter(label_pred) # labelId_toRm = [] # # labelId_left = [] # num_thresh = 10 # for label_id, num in label_counter.items(): # if num < num_thresh: # labelId_toRm.append(label_id) # # else: # # labelId_left.append(label_id) # if -1 not in labelId_toRm: # labelId_toRm.append(-1) # index_toRm_list = [] # for label_id in labelId_toRm: # index_toRm_list.extend(np.where(label_pred == label_id)[0].tolist()) # try: # xyz_toRm = fea_1[np.array(index_toRm_list)].T # except: # import ipdb;ipdb.set_trace() # xyz_toRm_org = (xyz_toRm[0], xyz_toRm[1], xyz_toRm[2]) # pred[xyz_toRm_org] = 0. # pred = morphology.dilation(morphology.erosion(pred)) # fig = plt.figure() # ax = fig.gca(projection='3d') label_id_cnt = 0 label_code_list = [] confidence_list = [] public_id_list = [] label_id_list = [] for i in range(1, 6): pred_i = torch.zeros(h,w,d) # index_i = np.where(pred == i) pred_i[np.where(pred == i)] = 1 pred_i = morphology.erosion(pred_i) pred_i = morphology.erosion(pred_i) # pred_i = morphology.dilation(morphology.erosion(pred_i)) labels = measure.label(pred_i, connectivity=3) # import ipdb;ipdb.set_trace() areas = [_.area for _ in measure.regionprops(labels)] areas.sort() if len(areas) > 2: for region in measure.regionprops(labels): if region.area < areas[-2]: # if region.area > areas[0]: coords_ = region.coords.T coords_org = (coords_[0], coords_[1], coords_[2]) pred_i[coords_org] = 0 labels = measure.label(pred_i, connectivity=3) # import ipdb;ipdb.set_trace() for i, region in enumerate(measure.regionprops(labels)): if len(region.coords) < 10000: continue label_code = int(pred[tuple(region.coords[0].tolist())]) label_code = -1 if label_code == 5 else label_code label_code_list.append(label_code) label_id_cnt += 1 label_id_list.append(label_id_cnt) coords_ = region.coords.T # import ipdb;ipdb.set_trace() # ax.scatter(coords_[0], coords_[1], coords_[2], c = np.tile(np.array([label_id_cnt*10]), len(coords_[2]))) # plt.savefig('scatter.png') # import ipdb;ipdb.set_trace() coords_org = (coords_[0], coords_[1], coords_[2]) confidence = float(torch.sum(conf[coords_org]) / len(coords_org[0])) pred_out[coords_org] = label_id_cnt #float(label_id_cnt) confidence_list.append(confidence) # import ipdb;ipdb.set_trace() # plt.savefig('scatter.png') # import ipdb;ipdb.set_trace() label_code_list.insert(0,0) label_id_list.insert(0,0) confidence_list.insert(0,1) pred_cls_num = len(label_code_list) public_id = os.path.basename(img_path).split('-')[0] public_id_list = [public_id] * pred_cls_num public_id_list_all.extend(public_id_list) label_id_list_all.extend(label_id_list) confidence_list_all.extend(confidence_list) label_code_list_all.extend(label_code_list) test_nii_save_path = os.path.join(args.nii_save_path,public_id+'.nii.gz') # nib.Nifti1Image(pred, img_affine).to_filename(test_nii_save_path) newLabelImg = nib.Nifti1Image(pred_out.numpy(), img_affine) newLabelImg.set_data_dtype(np.dtype(np.float32)) dimsImgToSave = len(pred_out.shape) newZooms = list(img_hdr.get_zooms()[:dimsImgToSave]) if len(newZooms) < dimsImgToSave : #Eg if original image was 3D, but I need to save a multi-channel image. newZooms = newZooms + [1.0]*(dimsImgToSave - len(newZooms)) newLabelImg.header.set_zooms(newZooms) nib.save(newLabelImg, test_nii_save_path) dataframe = pd.DataFrame({'public_id':public_id_list_all, 'label_id':label_id_list_all,'confidence':confidence_list_all, 'label_code':label_code_list_all}) dataframe.to_csv(args.nii_save_path+"/ribfrac-val-pred.csv",index=False,sep=',')
def __init__(self, mode, dataset_path='./datasets', crop_dim=(32, 32, 32), fold_id=1, samples=1000, save=True): """ :param mode: 'train','val','test' :param dataset_path: root dataset folder :param crop_dim: subvolume tuple :param fold_id: 1 to 10 values :param samples: number of sub-volumes that you want to create """ self.mode = mode self.root = str(dataset_path) self.training_path = self.root + '/iseg_2017/iSeg-2017-Training/' self.testing_path = self.root + '/iseg_2017/iSeg-2017-Testing/' self.save = save self.CLASSES = 4 self.full_vol_dim = (144, 192, 256) # slice, width, height self.crop_size = crop_dim self.fold = str(fold_id) self.list = [] self.samples = samples self.full_volume = None subvol = '_vol_' + str(crop_dim[0]) + 'x' + str( crop_dim[1]) + 'x' + str(crop_dim[2]) if self.save: self.sub_vol_path = self.root + '/iseg_2017/generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*T1.img'))) list_IDsT2 = sorted( glob.glob(os.path.join(self.training_path, '*T2.img'))) labels = sorted( glob.glob(os.path.join(self.training_path, '*label.img'))) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) training_labels, training_list_IDsT1, training_list_IDsT2, val_list_IDsT1, val_list_IDsT2, val_labels = [], [], [], [], [], [] print("SELECT subject with ID {} for Validation".format(self.fold)) for i in range(len(labels)): subject_id = (labels[i].split('/')[-1]).split('-')[1] if subject_id == self.fold: val_list_IDsT1.append(list_IDsT1[i]) val_list_IDsT2.append(list_IDsT2[i]) val_labels.append(labels[i]) else: training_list_IDsT1.append(list_IDsT1[i]) training_list_IDsT2.append(list_IDsT2[i]) training_labels.append(labels[i]) if self.mode == 'train': self.list_IDsT1 = training_list_IDsT1 self.list_IDsT2 = training_list_IDsT2 self.labels = training_labels # Generates datasets with non-empty sub-volumes! self.create_sub_volumes() elif self.mode == 'val': self.list_IDsT1 = val_list_IDsT1 self.list_IDsT2 = val_list_IDsT2 self.labels = val_labels self.create_sub_volumes() self.get_viz_set() elif self.mode == 'test': self.list_IDsT1 = sorted( glob.glob(os.path.join(self.testing_path, '*T1.img'))) self.list_IDsT2 = sorted( glob.glob(os.path.join(self.testing_path, '*T2.img'))) self.labels = None
def __init__(self, mode, dataset_path='./datasets', classes=5, crop_dim=(200, 200, 150), split_idx=260, samples=10, load=False): """ :param mode: 'train','val','test' :param dataset_path: root dataset folder :param crop_dim: subvolume tuple :param split_idx: 1 to 10 values :param samples: number of sub-volumes that you want to create """ self.mode = mode self.root = str(dataset_path) self.training_path = self.root + '/brats2019/MICCAI_BraTS_2019_Data_Training/' self.testing_path = self.root + '/brats2019/MICCAI_BraTS_2019_Data_Validation/' self.full_vol_dim = (240, 240, 155) # slice, width, height self.crop_size = crop_dim self.list = [] self.samples = samples self.full_volume = None self.classes = classes self.save_name = self.root + '/brats2019/brats2019-list-' + mode + '-samples-' + str( samples) + '.txt' if load: self.list = utils.load_list(self.save_name) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t1.nii.gz'))) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) return subvol = '_vol_' + str(crop_dim[0]) + 'x' + str( crop_dim[1]) + 'x' + str(crop_dim[2]) self.sub_vol_path = self.root + '/brats2019/MICCAI_BraTS_2019_Data_Training/generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t1.nii.gz'))) list_IDsT1ce = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t1ce.nii.gz'))) list_IDsT2 = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*t2.nii.gz'))) list_IDsFlair = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*_flair.nii.gz'))) labels = sorted( glob.glob(os.path.join(self.training_path, '*GG/*/*_seg.nii.gz'))) list_IDsT1, list_IDsT1ce, list_IDsT2, list_IDsFlair, labels = utils.shuffle_lists( list_IDsT1, list_IDsT1ce, list_IDsT2, list_IDsFlair, labels, seed=17) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) if self.mode == 'train': print('Brats2019, Total data:', len(list_IDsT1)) list_IDsT1 = list_IDsT1[:split_idx] list_IDsT1ce = list_IDsT1ce[:split_idx] list_IDsT2 = list_IDsT2[:split_idx] list_IDsFlair = list_IDsFlair[:split_idx] labels = labels[:split_idx] self.list = create_sub_volumes(list_IDsT1, list_IDsT1ce, list_IDsT2, list_IDsFlair, labels, dataset_name="brats2019", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path) elif self.mode == 'val': list_IDsT1 = list_IDsT1[split_idx:] list_IDsT1ce = list_IDsT1ce[split_idx:] list_IDsT2 = list_IDsT2[split_idx:] list_IDsFlair = list_IDsFlair[split_idx:] labels = labels[split_idx:] self.list = create_sub_volumes(list_IDsT1, list_IDsT1ce, list_IDsT2, list_IDsFlair, labels, dataset_name="brats2019", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path) elif self.mode == 'test': self.list_IDsT1 = sorted( glob.glob(os.path.join(self.testing_path, '*GG/*/*t1.nii.gz'))) self.list_IDsT1ce = sorted( glob.glob(os.path.join(self.testing_path, '*GG/*/*t1ce.nii.gz'))) self.list_IDsT2 = sorted( glob.glob(os.path.join(self.testing_path, '*GG/*/*t2.nii.gz'))) self.list_IDsFlair = sorted( glob.glob( os.path.join(self.testing_path, '*GG/*/*_flair.nii.gz'))) self.labels = None # Todo inference code here utils.save_list(self.save_name, self.list)
def __init__(self, args, mode, dataset_path='./datasets', crop_dim=(32, 32, 32), split_id=1, samples=1000, load=False): """ :param mode: 'train','val','test' :param dataset_path: root dataset folder :param crop_dim: subvolume tuple :param fold_id: 1 to 10 values :param samples: number of sub-volumes that you want to create """ self.mode = mode self.root = str(dataset_path) self.training_path = self.root + '/iseg_2019/iSeg-2019-Training/' self.testing_path = self.root + '/iseg_2019/iSeg-2019-Validation/' self.CLASSES = 4 self.full_vol_dim = (144, 192, 256) # slice, width, height self.crop_size = crop_dim self.threshold = args.threshold self.normalization = args.normalization self.augmentation = args.augmentation self.list = [] self.samples = samples self.full_volume = None self.save_name = self.root + '/iseg_2019/iseg2019-list-' + mode + '-samples-' + str( samples) + '.txt' if self.augmentation: self.transform = augment3D.RandomChoice(transforms=[ augment3D.GaussianNoise(mean=0, std=0.01), augment3D.RandomFlip(), augment3D.ElasticTransform() ], p=0.5) if load: ## load pre-generated data self.list = utils.load_list(self.save_name) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*T1.img'))) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) return subvol = '_vol_' + str(crop_dim[0]) + 'x' + str( crop_dim[1]) + 'x' + str(crop_dim[2]) self.sub_vol_path = self.root + '/iseg_2019/generated/' + mode + subvol + '/' utils.make_dirs(self.sub_vol_path) list_IDsT1 = sorted( glob.glob(os.path.join(self.training_path, '*T1.img'))) list_IDsT2 = sorted( glob.glob(os.path.join(self.training_path, '*T2.img'))) labels = sorted( glob.glob(os.path.join(self.training_path, '*label.img'))) self.affine = img_loader.load_affine_matrix(list_IDsT1[0]) if self.mode == 'train': list_IDsT1 = list_IDsT1[:split_id] list_IDsT2 = list_IDsT2[:split_id] labels = labels[:split_id] self.list = create_sub_volumes(list_IDsT1, list_IDsT2, labels, dataset_name="iseg2019", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path, th_percent=self.threshold) elif self.mode == 'val': list_IDsT1 = list_IDsT1[split_id:] list_IDsT2 = list_IDsT2[:split_id:] labels = labels[split_id:] self.list = create_sub_volumes(list_IDsT1, list_IDsT2, labels, dataset_name="iseg2019", mode=mode, samples=samples, full_vol_dim=self.full_vol_dim, crop_size=self.crop_size, sub_vol_path=self.sub_vol_path, th_percent=self.threshold) self.full_volume = get_viz_set(list_IDsT1, list_IDsT2, labels, dataset_name="iseg2019") elif self.mode == 'test': self.list_IDsT1 = sorted( glob.glob(os.path.join(self.testing_path, '*T1.img'))) self.list_IDsT2 = sorted( glob.glob(os.path.join(self.testing_path, '*T2.img'))) self.labels = None # todo inference here utils.save_list(self.save_name, self.list)
def objective_func(trial): global config if type(args.num_trial) is int: if trial.number > args.num_trial: import sys sys.exit() # make dirs config.trainer.base_dir = os.path.join(args.optuna_dir, f"{trial.number:04}/") utils.make_dirs(config.trainer.base_dir + "log") utils.make_dirs(config.trainer.base_dir + "checkpoint") # change config # set loss funcs & gates for source_id, model_losses in enumerate(config.losses): for target_id, _ in enumerate(model_losses): loss_name = trial.suggest_categorical( f'{source_id:02}_{target_id:02}_loss', LOSS_LISTS[source_id][target_id]) loss_args = copy.deepcopy(args_factory.losses[loss_name]) if "gate" in loss_args.args: gate_name = trial.suggest_categorical( f'{source_id:02}_{target_id:02}_gate', GATE_LIST[source_id][target_id]) loss_args.args.gate = copy.deepcopy( args_factory.gates[gate_name]) config.losses[source_id][target_id] = loss_args for model_id in range(len(config.models)): # set model model_name = trial.suggest_categorical(f"model_{model_id}_name", MODEL_LISTS[model_id]) model = copy.deepcopy(args_factory.models[model_name]) config.models[model_id].name = model.name config.models[model_id].args = model.args # set model weight is_cutoff = all([ loss.args.gate.name == "CutoffGate" for loss in config.losses[model_id] ]) is_ensemble = config.models[model_id].name == "Ensemble" if is_cutoff & (not is_ensemble): config.models[model_id].load_weight.path = model.load_weight.path else: config.models[model_id].load_weight.path = None config = copy.deepcopy(config) # save config utils.save_json(config, config.trainer.base_dir + r"log/config.json") # create object trainer, nets, criterions, optimizers, train_loader, test_loader, logs = create_object( config) # make kwargs kwargs = {"_trial": trial, "_callback": inform_optuna} # set seed trial.set_user_attr("seed", config.manualSeed) # raise exception if target model is pretrained. if config.models[0].load_weight.path is not None: class BlacklistError(optuna.structs.OptunaError): pass raise BlacklistError() # start trial trainer.train(nets, criterions, optimizers, train_loader, test_loader, logs, trial=trial, **kwargs) acc = 100 - logs[0]["epoch_log"][config.trainer.epochs]["test_accuracy"] return acc