def __setup_model_hparams(self):

        # 1. define losses
        self.loss = nn.BCELoss()

        # 2. define model metric
        self.metric = Dice_score(self.hparams['model']['n_classes'])

        # 3. define optimizer
        self.optimizer = eval(f"torch.optim.{self.hparams['optimizer_name']}")(
            params=self.model.parameters(),
            **self.hparams['optimizer_hparams'])

        # 4. define scheduler
        self.scheduler = eval(
            f"torch.optim.lr_scheduler.{self.hparams['scheduler_name']}")(
                optimizer=self.optimizer, **self.hparams['scheduler_hparams'])

        # 5. define early stopping
        self.early_stopping = EarlyStopping(
            checkpoint_path=self.hparams['checkpoint_path'] +
            f'/checkpoint_{self.start_training}' + '.pt',
            patience=self.hparams['patience'],
            delta=self.hparams['min_delta'],
            is_maximize=False,
        )

        # 6. set gradient clipping
        self.apply_clipping = self.hparams['clipping']  # clipping of gradients

        # 7. Set scaler for optimizer
        self.scaler = torch.cuda.amp.GradScaler()

        return True
Beispiel #2
0
    def __init__(self, hparams, gpu, model, Dataset_train, eval):

        # load the model

        self.hparams = hparams
        self.gpu = gpu
        self.Dataset_train = Dataset_train
        self.eval = eval
        print('\n')
        print('Selected Learning rate:', self.hparams['optimizer_hparams']['lr'])
        print('\n')

        self.exclusions = []

        self.splits, self.splits_test = self.load_split_table()
        self.metric = Dice_score(self.hparams['model']['n_classes'])

        self.model = model