Пример #1
0
 def _init_optimizer(self):
     self.optimizer = Vadam(
         self.model.parameters(),
         lr=self.optim_params['learning_rate'],
         betas=self.optim_params['betas'],
         prior_prec=self.model_params['prior_prec'],
         prec_init=self.optim_params['prec_init'],
         num_samples=self.train_params['train_mc_samples'],
         train_set_size=self.data.get_current_train_size())
Пример #2
0
    def __init__(self, data_set, model_params, train_params, optim_params, evals_per_epoch=1, normalize_x=False, normalize_y=False, results_folder="./results", data_folder=DEFAULT_DATA_FOLDER, use_cuda=torch.cuda.is_available()):
        super(type(self), self).__init__(data_set, model_params, train_params, optim_params, evals_per_epoch, normalize_x, normalize_y, results_folder, data_folder, use_cuda)

        # Define name for experiment class
        experiment_name = "vadam_mlp_reg"

        print(experiment_name)

        # Define folder name for results
        self.folder_name = folder_name(experiment_name, data_set, model_params, train_params, optim_params, results_folder)

        # Initialize model
        self.model = MLP(input_size = self.data.num_features,
                         hidden_sizes = model_params['hidden_sizes'],
                         output_size = self.data.num_classes,
                         act_func = model_params['act_func'])
        if use_cuda:
            self.model = self.model.cuda()

        # Define prediction function
        def prediction(x):
            mu = self.model(x)
            return mu
        self.prediction = prediction

        # Define objective
        def objective(mu, y):
            return metrics.avneg_loglik_gaussian(mu, y, tau = self.model_params['noise_prec'])
        self.objective = objective

        # Initialize optimizer
        self.optimizer = Vadam(self.model.parameters(),
                               lr = optim_params['learning_rate'],
                               betas = optim_params['betas'],
                               prior_prec = model_params['prior_prec'],
                               prec_init = optim_params['prec_init'],
                               num_samples = train_params['train_mc_samples'],
                               train_set_size = self.data.get_train_size())

        # Initialize metric history
        self.metric_history = dict(elbo_neg_ave = [],
                                   train_pred_logloss=[], train_pred_rmse=[],
                                   test_pred_logloss=[], test_pred_rmse=[])

        # Initialize final metric
        self.final_metric = dict(elbo_neg_ave = [],
                                 train_pred_logloss=[], train_pred_rmse=[],
                                 test_pred_logloss=[], test_pred_rmse=[])
Пример #3
0
class ExperimentVadamMLPReg(Experiment):

    def __init__(self, data_set, model_params, train_params, optim_params, evals_per_epoch=1, normalize_x=False, normalize_y=False, results_folder="./results", data_folder=DEFAULT_DATA_FOLDER, use_cuda=torch.cuda.is_available()):
        super(type(self), self).__init__(data_set, model_params, train_params, optim_params, evals_per_epoch, normalize_x, normalize_y, results_folder, data_folder, use_cuda)

        # Define name for experiment class
        experiment_name = "vadam_mlp_reg"

        print(experiment_name)

        # Define folder name for results
        self.folder_name = folder_name(experiment_name, data_set, model_params, train_params, optim_params, results_folder)

        # Initialize model
        self.model = MLP(input_size = self.data.num_features,
                         hidden_sizes = model_params['hidden_sizes'],
                         output_size = self.data.num_classes,
                         act_func = model_params['act_func'])
        if use_cuda:
            self.model = self.model.cuda()

        # Define prediction function
        def prediction(x):
            mu = self.model(x)
            return mu
        self.prediction = prediction

        # Define objective
        def objective(mu, y):
            return metrics.avneg_loglik_gaussian(mu, y, tau = self.model_params['noise_prec'])
        self.objective = objective

        # Initialize optimizer
        self.optimizer = Vadam(self.model.parameters(),
                               lr = optim_params['learning_rate'],
                               betas = optim_params['betas'],
                               prior_prec = model_params['prior_prec'],
                               prec_init = optim_params['prec_init'],
                               num_samples = train_params['train_mc_samples'],
                               train_set_size = self.data.get_train_size())

        # Initialize metric history
        self.metric_history = dict(elbo_neg_ave = [],
                                   train_pred_logloss=[], train_pred_rmse=[],
                                   test_pred_logloss=[], test_pred_rmse=[])

        # Initialize final metric
        self.final_metric = dict(elbo_neg_ave = [],
                                 train_pred_logloss=[], train_pred_rmse=[],
                                 test_pred_logloss=[], test_pred_rmse=[])

    def _evaluate_model(self, metric_dict, x_train, y_train, x_test, y_test):
        
        # Unnormalize noise precision
        if self.normalize_y:
            tau = self.model_params['noise_prec'] / (self.y_std**2)
        else:
            tau = self.model_params['noise_prec']
        
        # Normalize train x
        if self.normalize_x:
            x_train = (x_train-self.x_means)/self.x_stds
        
        # Get train predictions
        mu_list = self.optimizer.get_mc_predictions(self.model.forward, inputs = x_train, mc_samples = self.train_params['eval_mc_samples'], ret_numpy=False)
        
        # Unnormalize train predictions
        if self.normalize_y:
            mu_list = [self.y_mean + self.y_std * mu for mu in mu_list]

        # Store train metrics
        metric_dict['train_pred_logloss'].append(metrics.predictive_avneg_loglik_gaussian(mu_list, y_train, tau = tau).detach().cpu().item())
        metric_dict['train_pred_rmse'].append(metrics.predictive_rmse(mu_list, y_train).detach().cpu().item())
        metric_dict['elbo_neg_ave'].append(metrics.avneg_elbo_gaussian(mu_list, y_train, tau = tau, train_set_size = self.data.get_train_size(), kl = self.optimizer.kl_divergence()).detach().cpu().item())

        print("mean x_train: ")
        print(x_train.mean())

        print("mean y_train: ")
        print(y_train.mean())

        # Normalize test x
        if self.normalize_x:
            x_test = (x_test-self.x_means)/self.x_stds
        
        # Get test predictions
        mu_list = self.optimizer.get_mc_predictions(self.model.forward, inputs = x_test, mc_samples = self.train_params['eval_mc_samples'], ret_numpy=False)
        
        # Unnormalize test predictions
        if self.normalize_y:
            mu_list = [self.y_mean + self.y_std * mu for mu in mu_list]

        print("mean x_test: ")
        print(x_test.mean())
        #print(mu_list.shape)
        print("mean y_test: ")
        print(y_test.mean())
        # Store test metrics
        metric_dict['test_pred_logloss'].append(metrics.predictive_avneg_loglik_gaussian(mu_list, y_test, tau = tau).detach().cpu().item())
        metric_dict['test_pred_rmse'].append(metrics.predictive_rmse(mu_list, y_test).detach().cpu().item())



    def _print_progress(self, epoch):

        # Print progress
        print('Epoch [{}/{}], test_pred_rmse: {:.4f}, Logloss: {:.4f}, Test Logloss: {:.4f}'.format(
                epoch+1,
                self.train_params['num_epochs'],
                self.metric_history['test_pred_rmse'][-1],
                self.metric_history['train_pred_logloss'][-1],
                self.metric_history['test_pred_logloss'][-1]))
Пример #4
0
class ExperimentVadamMLPClass(Experiment):
    def __init__(self,
                 data_set,
                 model_params,
                 train_params,
                 optim_params,
                 evals_per_epoch=1,
                 normalize_x=False,
                 results_folder="./results",
                 data_folder=DEFAULT_DATA_FOLDER,
                 use_cuda=torch.cuda.is_available()):
        super(type(self),
              self).__init__(data_set, model_params, train_params,
                             optim_params, evals_per_epoch, normalize_x,
                             results_folder, data_folder, use_cuda)

        # Define name for experiment class
        experiment_name = "vadam_mlp_class"

        # Define folder name for results
        self.folder_name = folder_name(experiment_name, data_set, model_params,
                                       train_params, optim_params,
                                       results_folder)

        # Initialize model
        self.model = MLP(input_size=self.data.num_features,
                         hidden_sizes=model_params['hidden_sizes'],
                         output_size=self.data.num_classes,
                         act_func=model_params['act_func'])
        if use_cuda:
            self.model = self.model.cuda()

        # Define prediction function
        def prediction(x):
            logits = self.model(x)
            return logits

        self.prediction = prediction

        # Define objective
        self.objective = metrics.avneg_loglik_categorical

        # Initialize optimizer
        self.optimizer = Vadam(self.model.parameters(),
                               lr=optim_params['learning_rate'],
                               betas=optim_params['betas'],
                               prior_prec=model_params['prior_prec'],
                               prec_init=optim_params['prec_init'],
                               num_samples=train_params['train_mc_samples'],
                               train_set_size=self.data.get_train_size())

        # Initialize metric history
        self.metric_history = dict(elbo_neg_ave=[],
                                   train_pred_logloss=[],
                                   train_pred_accuracy=[],
                                   test_pred_logloss=[],
                                   test_pred_accuracy=[])

        # Initialize final metric
        self.final_metric = dict(elbo_neg_ave=[],
                                 train_pred_logloss=[],
                                 train_pred_accuracy=[],
                                 test_pred_logloss=[],
                                 test_pred_accuracy=[])

    def _evaluate_model(self, metric_dict, x_train, y_train, x_test, y_test):

        # Normalize train x
        if self.normalize_x:
            x_train = (x_train - self.x_means) / self.x_stds

        # Get train predictions
        logits_list = self.optimizer.get_mc_predictions(
            self.model.forward,
            inputs=x_train,
            mc_samples=self.train_params['eval_mc_samples'],
            ret_numpy=False)

        # Store train metrics
        metric_dict['train_pred_logloss'].append(
            metrics.predictive_avneg_loglik_categorical(
                logits_list, y_train).detach().cpu().item())
        metric_dict['train_pred_accuracy'].append(
            metrics.softmax_predictive_accuracy(logits_list,
                                                y_train).detach().cpu().item())
        metric_dict['elbo_neg_ave'].append(
            metrics.avneg_elbo_categorical(
                logits_list,
                y_train,
                train_set_size=self.data.get_train_size(),
                kl=self.optimizer.kl_divergence()).detach().cpu().item())

        # Normalize test x
        if self.normalize_x:
            x_test = (x_test - self.x_means) / self.x_stds

        # Get test predictions
        logits_list = self.optimizer.get_mc_predictions(
            self.model.forward,
            inputs=x_test,
            mc_samples=self.train_params['eval_mc_samples'],
            ret_numpy=False)

        # Store test metrics
        metric_dict['test_pred_logloss'].append(
            metrics.predictive_avneg_loglik_categorical(
                logits_list, y_test).detach().cpu().item())
        metric_dict['test_pred_accuracy'].append(
            metrics.softmax_predictive_accuracy(logits_list,
                                                y_test).detach().cpu().item())

    def _print_progress(self, epoch):

        # Print progress
        print(
            'Epoch [{}/{}], Neg. Ave. ELBO: {:.4f}, Logloss: {:.4f}, Test Logloss: {:.4f}'
            .format(epoch + 1, self.train_params['num_epochs'],
                    self.metric_history['elbo_neg_ave'][-1],
                    self.metric_history['train_pred_logloss'][-1],
                    self.metric_history['test_pred_logloss'][-1]))

    def _closure(self, x, y):
        self.optimizer.zero_grad()
        logits = self.prediction(x)
        loss = self.objective(logits, y)
        loss.backward()
        return loss
Пример #5
0
        logits = model(x_test)
        metric_history['test_logloss'].append(
            avneg_loglik_bernoulli(logits, y_test).detach().cpu().item())
        metric_history['test_accuracy'].append(
            sigmoid_accuracy(logits, y_test).detach().cpu().item())

elif optimizer == "vadam":

    # Use the Vadam optimizer for VI

    from vadam.optimizers import Vadam
    from vadam.metrics import predictive_avneg_loglik_bernoulli, sigmoid_predictive_accuracy

    optimizer = Vadam(model.parameters(),
                      lr=learning_rate,
                      betas=betas,
                      prior_prec=prior_prec,
                      prec_init=prec_init,
                      train_set_size=data.get_train_size())

    # Evaluate using the predictive distribution

    metric_history = dict(train_logloss=[],
                          train_accuracy=[],
                          test_logloss=[],
                          test_accuracy=[])

    def evaluate_model(x_train, y_train, x_test, y_test):

        # Store train metrics
        logits_list = optimizer.get_mc_predictions(model.forward,
                                                   inputs=x_train,