def main(): args = get_arguments() os.environ["CUDA_VISIBLE_DEVICES"] = "0" ## FOR REPRODUCIBILITY OF RESULTS seed = 1777777 utils.reproducibility(args, seed) utils.make_dirs(args.save) utils.save_arguments(args, args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = create_loss('CrossEntropyLoss') criterion = DiceLoss(classes=args.classes, weight=torch.tensor([0.1, 1, 1, 1]).cuda()) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training()
def main(): args = get_arguments() #os.environ["CUDA_VISIBLE_DEVICES"] = "0" ## FOR REPRODUCIBILITY OF RESULTS seed = 1777777 utils.reproducibility(args, seed) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) # criterion = DiceLoss(classes=args.classes) # # ## TODO LOAD PRETRAINED MODEL print(affine.shape) # #model.restore_checkpoint(args.pretrained) if args.cuda: model = model.cuda() full_volume = full_volume.cuda() print("Model transferred in GPU.....") x = torch.randn(3, 156, 240, 240).cuda() output = non_overlap_padding(args, x, model, criterion, kernel_dim=(32, 32, 32))
def __init__(self, model, exp_name, device=0, class_num=2, optim=torch.optim.Adam, optim_args={}, loss_args={}, model_name='arcnet', labels=None, num_epochs=10, log_nth=5, lr_scheduler_step_size=3, lr_scheduler_gamma=0.5, use_last_checkpoint=True, exp_dir='experiments', log_dir='logs'): self.device = device self.model = model self.model_name = model_name self.num_epochs = num_epochs self.vae = loss_args["vae_loss"] # get the customized loss function if self.vae: loss_func = CombinedLoss(k1=loss_args["loss_k1_weight"], k2=loss_args["loss_k2_weight"], classes=class_num) else: loss_func = DiceLoss(classes=class_num) if torch.cuda.is_available(): self.loss_func = loss_func.cuda(device) else: self.loss_func = loss_func self.optim = optim(model.parameters(), **optim_args) self.scheduler = lr_scheduler.StepLR(self.optim, step_size=lr_scheduler_step_size, gamma=lr_scheduler_gamma) exp_dir_path = os.path.join(exp_dir, exp_name) common_utils.create_if_not(exp_dir_path) common_utils.create_if_not(os.path.join(exp_dir_path, CHECKPOINT_DIR)) self.exp_dir_path = exp_dir_path self.log_nth = log_nth self.logWriter = LogWriter(class_num, log_dir, exp_name, use_last_checkpoint, labels) self.use_last_checkpoint = use_last_checkpoint self.start_epoch = 1 self.start_iteration = 1 self.best_ds_mean = 0 self.best_ds_mean_epoch = 1 if use_last_checkpoint: self.load_checkpoint()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='/home/mulns/My_project/VV/MedicalZooPytorch/datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: # model=torch.nn.DataParallel(model) model = model.cuda() print("Model transferred in GPU.....") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) # utils.make_dirs(args.save) if not os.path.exists(args.save): os.makedirs(args.save) # training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, training_generator, val_generator, full_volume, affine, dataset = medical_loaders.generate_datasets(args, path='/data/hejy/MedicalZooPytorch_2cls/datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=2, skip_index_after=args.classes, weight = torch.tensor([1, 1]).cuda(), sigmoid_normalization=True) # criterion = WeightedCrossEntropyLoss() if args.cuda: model = model.cuda() # model.restore_checkpoint(args.pretrained) dataloader_train = MICCAI2020_RIBFRAC_DataLoader3D(dataset, args.batchSz, args.dim, num_threads_in_multithreaded=2) tr_transforms = get_train_transform(args.dim) training_generator_aug = SingleThreadedAugmenter(dataloader_train, tr_transforms,) trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None, dataset = dataset, train_data_loader_aug=training_generator_aug) trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) # utils.make_dirs(args.save) if not os.path.exists(args.save): os.makedirs(args.save) training_generator, val_generator, full_volume, affine, dataset = medical_loaders.generate_datasets( args, path='/data/hejy/MedicalZooPytorch_2cls/datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=2, skip_index_after=args.classes, weight=torch.tensor([1, 1]).cuda(), sigmoid_normalization=True) # criterion = WeightedCrossEntropyLoss() if args.cuda: model = model.cuda() # model.restore_checkpoint(args.pretrained) trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) trainer.training()
def main(): args = get_arguments() utils.make_dirs(args.save) name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr() # TODO visual3D_temp.Basewriter package writer = SummaryWriter(log_dir='../runs/' + name_model, comment=name_model) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: torch.cuda.manual_seed(seed) model = model.cuda() print("Model transferred in GPU.....") print("START TRAINING...") for epoch in range(1, args.nEpochs + 1): train_stats = train.train_dice(args, epoch, model, training_generator, optimizer, criterion) val_stats = train.test_dice(args, epoch, model, val_generator, criterion) #old utils.write_train_val_score(writer, epoch, train_stats, val_stats) model.save_checkpoint(args.save, epoch, val_stats[0], optimizer=optimizer)
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training() visualize_3D_no_overlap_new(args, full_volume, affine, model, 10, args.dim)
def main(): args = get_arguments() os.environ["CUDA_VISIBLE_DEVICES"] = "1" ## FOR REPRODUCIBILITY OF RESULTS seed = 1777777 torch.manual_seed(seed) if args.cuda: torch.cuda.manual_seed(seed) np.random.seed(seed) cudnn.deterministic = True # FOR FASTER GPU TRAINING WHEN INPUT SIZE DOESN'T VARY # cudnn.benchmark = True utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) print("START TRAINING...") trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: model = model.cuda() trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator) trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) # ,skip_index_after=2,weight=torch.tensor([0.00001,1,1,1]).cuda()) if args.cuda: model = model.cuda() print("Model transferred in GPU.....") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator) print("START TRAINING...") trainer.training()
def main(): args = get_arguments() if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled." torch.backends.cudnn.benchmark = True utils.reproducibility(args, seed) # utils.make_dirs(args.save) if not os.path.exists(args.save): os.makedirs(args.save) training_generator, val_generator, full_volume, affine, dataset = medical_loaders.generate_datasets( args, path='/data/hejy/MedicalZooPytorch_2cls/datasets') model, optimizer = medzoo.create_model(args) if args.sync_bn: model = apex.parallel.convert_syncbn_model(model) criterion = DiceLoss(classes=2, skip_index_after=args.classes, weight=torch.tensor([1, 1]).cuda(), sigmoid_normalization=True) # criterion = WeightedCrossEntropyLoss() if args.cuda: model = model.cuda() if args.distributed: model = DDP(model, delay_allreduce=True) # model.restore_checkpoint(args.pretrained) trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator, lr_scheduler=None) trainer.training()
def main(): args = get_arguments() utils.reproducibility(args, seed) utils.make_dirs(args.save) training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args, path='./datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) # print("training_generator shape:", training_generator.dim()) # print("val_generator shape:", val_generator.dim()) if args.cuda: model = model.cuda() print("start training...") # torch.save(training_generator, "training_generator.tch") # torch.save(val_generator, "val_generator.tch") trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator, valid_data_loader=val_generator) trainer.training()
def main(): args = get_arguments() utils.make_dirs(args.save) train_f, val_f = utils.create_stats_files(args.save) name_model = args.model + "_" + args.dataset_name + "_" + utils.datestr() writer = SummaryWriter(log_dir='../runs/' + name_model, comment=name_model) best_prec1 = 100. training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets( args, path='.././datasets') model, optimizer = medzoo.create_model(args) criterion = DiceLoss(classes=args.classes) if args.cuda: torch.cuda.manual_seed(seed) model = model.cuda() print("Model transferred in GPU.....") print("START TRAINING...") for epoch in range(1, args.nEpochs + 1): train_stats = train.train_dice(args, epoch, model, training_generator, optimizer, criterion, train_f, writer) val_stats = train.test_dice(args, epoch, model, val_generator, criterion, val_f, writer) utils.write_train_val_score(writer, epoch, train_stats, val_stats) model.save_checkpoint(args.save, epoch, val_stats[0], optimizer=optimizer) # if epoch % 5 == 0: # utils.visualize_no_overlap(args, full_volume, affine, model, epoch, DIM, writer) #utils.save_model(model, args, val_stats[0], epoch, best_prec1) train_f.close() val_f.close()
def __init__(self, k1=0.1, k2=0.1, classes=2): super(CombinedLoss, self).__init__() self.k1 = k2 #need or not self.k2 = k2 self.dice_loss = DiceLoss(classes=classes)