def run(self): epochs_tested = self.get_epochs_to_test() folds_to_test = self.get_folds_to_test() for fold in folds_to_test: for epoch in epochs_tested[fold]: pretrained_path = self.args.pretrained_path or \ os.path.join(self.args.checkpoint_dir, get_chk_name(self.args.exp_name, fold, epoch)) for t in ["", "Intra_"]: if self.args.outfile_name is None: outfile = "%s%s" % ("Test_" + t, self.args.exp_name) else: outfile = "%s%s" % (t, self.args.outfile_name) exp_name = outfile + "_fold{}_epoch{}".format(fold, epoch) model = Base(model=self.net, loss=self.loss, metrics=self.args.metrics, pretrained=pretrained_path, use_cuda=self.args.cuda) model.testing( self.manager.get_dataloader(test=(t != "Intra_"), test_intra=(t == "Intra_"), fold_index=fold), with_visuals=False, with_logit=self.args.with_logit, predict=self.args.predict, saving_dir=self.args.checkpoint_dir, exp_name=exp_name, standard_optim=getattr(self.net, 'std_optim', True), **self.kwargs_test)
def run(self, nb_rep=10): if self.args.pretrained_path is not None: raise ValueError( 'Unset <pretrained_path> to use the EnsemblingTester') epochs_tested = self.get_epochs_to_test() folds_to_test = self.get_folds_to_test() for fold in folds_to_test: for epoch in epochs_tested[fold]: Y, Y_true = [], [] for i in range(nb_rep): pretrained_path = os.path.join( self.args.checkpoint_dir, get_chk_name( self.args.exp_name + '_ensemble_%i' % (i + 1), fold, epoch)) outfile = self.args.outfile_name or ("EnsembleTest_" + self.args.exp_name) exp_name = outfile + "_fold{}_epoch{}.pkl".format( fold, epoch) model = Base(model=self.net, loss=self.loss, metrics=self.args.metrics, pretrained=pretrained_path, use_cuda=self.args.cuda) y, y_true, _, _, _ = model.test( self.manager.get_dataloader(test=True).test) Y.append(y) Y_true.append(y_true) with open(os.path.join(self.args.checkpoint_dir, exp_name), 'wb') as f: pickle.dump( { "y": np.array(Y).swapaxes(0, 1), "y_true": np.array(Y_true).swapaxes(0, 1) }, f)
def run(self, MC=10): epochs_tested = self.get_epochs_to_test() folds_to_test = self.get_folds_to_test() if self.args.cv: self.logger.warning( "CROSS-VALIDATION USED DURING TESTING, EVENTUAL TESTING SET IS OMIT" ) for fold in folds_to_test: for epoch in epochs_tested[fold]: pretrained_path = self.args.pretrained_path or \ os.path.join(self.args.checkpoint_dir, get_chk_name(self.args.exp_name, fold, epoch)) outfile = self.args.outfile_name or ("MCTest_" + self.args.exp_name) exp_name = outfile + "_fold{}_epoch{}.pkl".format(fold, epoch) model = Base(model=self.net, loss=self.loss, metrics=self.args.metrics, pretrained=pretrained_path, use_cuda=self.args.cuda) if self.args.cv: y, y_true = model.MC_test(self.manager.get_dataloader( validation=True, fold_index=fold).validation, MC=MC) else: y, y_true = model.MC_test( self.manager.get_dataloader(test=True).test, MC=MC) with open(os.path.join(self.args.checkpoint_dir, exp_name), 'wb') as f: pickle.dump({"y": y, "y_true": y_true}, f)
def run(self): epochs_tested = self.get_epochs_to_test() folds_to_test = self.get_folds_to_test() for fold in folds_to_test: for epoch in epochs_tested[fold]: pretrained_path = self.args.pretrained_path or \ os.path.join(self.args.checkpoint_dir, get_chk_name(self.args.exp_name, fold, epoch)) outfile = self.args.outfile_name or ("Test_CV_" + self.args.exp_name) exp_name = outfile + "_fold{}_epoch{}".format(fold, epoch) model = Base(model=self.net, loss=self.loss, metrics=self.args.metrics, pretrained=pretrained_path, use_cuda=self.args.cuda) loader = self.manager.get_dataloader(validation=True, fold_index=fold) res = model.test(loader.validation, with_visuals=False, with_logit=self.args.with_logit, predict=self.args.predict, standard_optim=getattr( self.net, 'std_optim', True)) with open( os.path.join(self.args.checkpoint_dir, exp_name + '.pkl'), 'wb') as f: pickle.dump( { 'y_pred': res[0], 'y_true': res[1], 'loss': res[3], 'metrics': res[4] }, f)
def __init__(self, net_params=None, pretrained=None, resume=False, optimizer_name="Adam", learning_rate=1e-3, loss_name="NLLLoss", metrics=None, use_cuda=False, **kwargs): """ Class initilization. Parameters ---------- net_params: NetParameters, default None all the parameters that will be used during the network creation. pretrained: path, default None path to the pretrained model or weights. resume: bool, default False if set to true, the code will restore the weights of the model but also restore the optimizer's state, as well as the hyperparameters used, and the scheduler. optimizer_name: str, default 'Adam' the name of the optimizer: see 'torch.optim' for a description of available optimizer. learning_rate: float, default 1e-3 the optimizer learning rate. loss_name: str, default 'NLLLoss' the name of the loss: see 'torch.nn' for a description of available loss. metrics: list of str a list of extra metrics that will be computed. use_cuda: bool, default False wether to use GPU or CPU. kwargs: dict specify directly a custom 'optimizer' or 'loss'. Can also be used to set specific optimizer parameters. """ if self.__net__ is not None: logger.debug("Creating network '{0}'...".format(self.__net__)) logger.debug(" family: {0}".format(self.__family__)) logger.debug(" params: {0}".format(net_params)) if net_params is None or not isinstance(net_params, NetParameters): raise ValueError("Please specify network parameters.") self.model = self.__net__(**net_params.net_kwargs) Base.__init__(self, optimizer_name=optimizer_name, learning_rate=learning_rate, loss_name=loss_name, metrics=metrics, use_cuda=use_cuda, pretrained=pretrained, resume=resume, **kwargs)
def run(self): epochs_tested = [[self.args.nb_epochs - 1] for _ in range(self.manager.number_of_folds)] folds_to_test = self.get_folds_to_test() std_noise = [0, 0.05, 0.1, 0.15, 0.20] nb_repetitions = 5 # nb of repetitions per Gaussian Noise results = {std: [] for std in std_noise} for sigma in std_noise: self.manager = BaseTrainer.build_data_manager( self.args, input_transforms=[ Crop((1, 121, 128, 121)), Padding([1, 128, 128, 128], mode='constant'), Normalize(), GaussianNoise(sigma) ]) for _ in range(nb_repetitions): for fold in folds_to_test: for epoch in epochs_tested[fold]: pretrained_path = self.args.pretrained_path or \ os.path.join(self.args.checkpoint_dir, get_chk_name(self.args.exp_name, fold, epoch)) outfile = self.args.outfile_name or ( "Test_" + self.args.exp_name) exp_name = outfile + "_fold{}_epoch{}".format( fold, epoch) model = Base(model=self.net, loss=self.loss, metrics=self.args.metrics, pretrained=pretrained_path, use_cuda=self.args.cuda) y, X, y_true, l, metrics = model.testing( self.manager.get_dataloader(test=True), with_visuals=False, with_logit=self.args.with_logit, predict=self.args.predict, saving_dir=None, exp_name=exp_name, standard_optim=getattr(self.net, 'std_optim', True)) results[sigma].append([y, y_true]) with open( os.path.join(self.args.checkpoint_dir, 'Robustness_' + self.args.exp_name + '.pkl'), 'wb') as f: pickle.dump(results, f)
num_workers=5) # Train only linear layer at the end of encoder nets[mod] = MultiModal_MNIST_SVHN([mod], latent_dim=32, mode="linear_classifier") # Freeze all the parameters except the last layer freeze_until(nets[mod], "%s_encoder.head" % mod) if training: for pth in models_pth: optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, nets[mod].parameters()), lr=1e-3, weight_decay=1e-5) models[mod][pth] = Base(model=nets[mod], pretrained=pth, metrics=["balanced_accuracy"], use_cuda=True, loss=nn.CrossEntropyLoss(), optimizer=optimizer) models[mod][pth].training(managers[mod], nb_epochs, checkpointdir=os.path.dirname(pth), exp_name="Linear_%s_Pretrained" % mod.upper(), nb_epochs_per_saving=nb_epochs) _, _, _, _, metrics = models[mod][pth].testing( managers[mod].get_dataloader(test=True)) print("{} with {} --> {}".format(mod, pth, metrics), flush=True) ## 2) GradCAM on Pre-trained networks for mod in modalities: if mod == "mnist":