Esempio n. 1
0
 def __init__(self, args):
     kwargs = {'num_workers': 4, 'pin_memory': True}
     self.device = 1
     self.num_class = 2
     self.num_domains = 2
     self.source_loader, self.target_loader, self.test_loader, self.nclass = make_data_loader(
         args, **kwargs)
     self.tbar = tqdm(self.test_loader, desc='\r')
     self.best_IoU = {'disc': 0.77, 'cup': 0.65, 'mode': 'maxmin'}
     self.attempt = 9.5
     self.madan_trainer = madan_trainer(args, self.num_class,
                                        self.num_domains)
     self.trainer_madan(args)
def main():
    args = parse_args()
    config_path = args.config_file_path

    config = get_config(config_path, new_keys_allowed=True)

    config.defrost()
    config.experiment_dir = os.path.join(config.log_dir, config.experiment_name)
    config.tb_dir = os.path.join(config.experiment_dir, 'tb')
    config.model.best_checkpoint_path = os.path.join(config.experiment_dir, 'best_checkpoint.pt')
    config.model.last_checkpoint_path = os.path.join(config.experiment_dir, 'last_checkpoint.pt')
    config.config_save_path = os.path.join(config.experiment_dir, 'segmentation_config.yaml')
    config.freeze()

    init_experiment(config)
    set_random_seed(config.seed)

    train_dataset = make_dataset(config.train.dataset)
    train_loader = make_data_loader(config.train.loader, train_dataset)

    val_dataset = make_dataset(config.val.dataset)
    val_loader = make_data_loader(config.val.loader, val_dataset)

    device = torch.device(config.device)
    model = make_model(config.model).to(device)

    optimizer = make_optimizer(config.optim, model.parameters())
    scheduler = None

    loss_f = make_loss(config.loss)

    early_stopping = EarlyStopping(
        **config.stopper.params
    )

    train_writer = SummaryWriter(log_dir=os.path.join(config.tb_dir, 'train'))
    val_writer = SummaryWriter(log_dir=os.path.join(config.tb_dir, 'val'))

    for epoch in range(1, config.epochs + 1):
        print(f'Epoch {epoch}')
        train_metrics = train(model, optimizer, train_loader, loss_f, device)
        write_metrics(epoch, train_metrics, train_writer)
        print_metrics('Train', train_metrics)

        val_metrics = val(model, val_loader, loss_f, device)
        write_metrics(epoch, val_metrics, val_writer)
        print_metrics('Val', val_metrics)

        early_stopping(val_metrics['loss'])
        if config.model.save and early_stopping.counter == 0:
            torch.save(model.state_dict(), config.model.best_checkpoint_path)
            print('Saved best model checkpoint to disk.')
        if early_stopping.early_stop:
            print(f'Early stopping after {epoch} epochs.')
            break

        if scheduler:
            scheduler.step()

    train_writer.close()
    val_writer.close()

    if config.model.save:
        torch.save(model.state_dict(), config.model.last_checkpoint_path)
        print('Saved last model checkpoint to disk.')
Esempio n. 3
0
 def __init__(self, args):
     kwargs = {'num_workers': 4, 'pin_memory': True}
     self.early_stop = False
     self.device = 1
     self.num_epochs_last_save = 0
     self.num_class = 2
     self.num_domains = 2
     self.srca_train_loader, self.srca__val_loader, self.srca__test_loader, self.nclass = make_data_loader(
         0, args.source1_dataset, args, **kwargs)
     self.srcb_train_loader, self.srcb_val_loader, self.srcb_test_loader, self.nclass = make_data_loader(
         1, args.source2_dataset, args, **kwargs)
     self.tgt_train_loader, self.tgt_val_loader, self.tgt_test_loader, self.nclass = make_data_loader(
         2, args.target_dataset, args, **kwargs)
     self.tbar = tqdm(self.tgt_val_loader, desc='\r')
     self.best_metrics = {'disc': 0.77, 'cup': 0.65, 'delta_cdr': 1.0}
     self.hyper_dict = args.hyparams_dict
     meta_update_lr = args.lr
     meta_update_step = args.meta_update_step
     beta = args.beta
     self.mwdan_trainer = multisource_metatrainer(args, self.num_class,
                                                  meta_update_lr,
                                                  meta_update_step, beta,
                                                  'default')
Esempio n. 4
0
 def prepare_dataloader(self):
     kwargs = {'num_workers': self.args.workers, 'pin_memory': True}
     self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
         self.args, **kwargs)
Esempio n. 5
0
 def __init__(self, args):
     kwargs = {'num_workers': 4, 'pin_memory': True}
     self.device = 1
     self.num_class = 2
     self.num_domains = 2
     self.srca_train_loader, self.srca__val_loader, self.srca__test_loader, self.nclass = make_data_loader(
         0, args.source1_dataset, args, **kwargs)
     self.srcb_train_loader, self.srcb_val_loader, self.srcb_test_loader, self.nclass = make_data_loader(
         1, args.source2_dataset, args, **kwargs)
     self.tgt_train_loader, self.tgt_val_loader, self.tgt_test_loader, self.nclass = make_data_loader(
         2, args.target_dataset, args, **kwargs)
     self.tbar = tqdm(self.tgt_val_loader, desc='\r')
     self.best_metrics = {'disc': 0.77, 'cup': 0.65, 'delta_cdr': 1.0}
     self.hyper_dict = args.hyparams_dict
     self.mwdan_trainer = mwdan_trainer(args, self.num_class,
                                        self.num_domains)