def _init_model(self): self.model = BNN(input_size=self.data.num_features, hidden_sizes=self.model_params['hidden_sizes'], output_size=self.data.num_classes, act_func=self.model_params['act_func'], prior_prec=self.model_params['prior_prec'], prec_init=self.optim_params['prec_init']) if self.use_cuda: self.model = self.model.cuda()
def __init__(self, data_set, model_params, train_params, optim_params, evals_per_epoch=1, normalize_x=False, normalize_y=False, results_folder="./results", data_folder=DEFAULT_DATA_FOLDER, use_cuda=torch.cuda.is_available()): super(type(self), self).__init__(data_set, model_params, train_params, optim_params, evals_per_epoch, normalize_x, normalize_y, results_folder, data_folder, use_cuda) # Define name for experiment class experiment_name = "bbb_mlp_reg" # Define folder name for results self.folder_name = folder_name(experiment_name, data_set, model_params, train_params, optim_params, results_folder) # Initialize model self.model = BNN(input_size = self.data.num_features, hidden_sizes = model_params['hidden_sizes'], output_size = self.data.num_classes, act_func = model_params['act_func'], prior_prec = model_params['prior_prec'], prec_init = optim_params['prec_init']) if use_cuda: self.model = self.model.cuda() # Define prediction function def prediction(x): mu_list = [self.model(x) for _ in range(self.train_params['train_mc_samples'])] return mu_list self.prediction = prediction # Define objective def objective(mu_list, y): return metrics.avneg_elbo_gaussian(mu_list, y, tau = self.model_params['noise_prec'], train_set_size = self.data.get_train_size(), kl = self.model.kl_divergence()) self.objective = objective # Initialize optimizer self.optimizer = Adam(self.model.parameters(), lr = optim_params['learning_rate'], betas = optim_params['betas'], eps = 1e-8) # Initialize metric history self.metric_history = dict(elbo_neg_ave = [], train_pred_logloss=[], train_pred_rmse=[], test_pred_logloss=[], test_pred_rmse=[]) # Initialize final metric self.final_metric = dict(elbo_neg_ave = [], train_pred_logloss=[], train_pred_rmse=[], test_pred_logloss=[], test_pred_rmse=[])
class ExperimentBBBMLPReg(Experiment): def __init__(self, data_set, model_params, train_params, optim_params, evals_per_epoch=1, normalize_x=False, normalize_y=False, results_folder="./results", data_folder=DEFAULT_DATA_FOLDER, use_cuda=torch.cuda.is_available()): super(type(self), self).__init__(data_set, model_params, train_params, optim_params, evals_per_epoch, normalize_x, normalize_y, results_folder, data_folder, use_cuda) # Define name for experiment class experiment_name = "bbb_mlp_reg" # Define folder name for results self.folder_name = folder_name(experiment_name, data_set, model_params, train_params, optim_params, results_folder) # Initialize model self.model = BNN(input_size = self.data.num_features, hidden_sizes = model_params['hidden_sizes'], output_size = self.data.num_classes, act_func = model_params['act_func'], prior_prec = model_params['prior_prec'], prec_init = optim_params['prec_init']) if use_cuda: self.model = self.model.cuda() # Define prediction function def prediction(x): mu_list = [self.model(x) for _ in range(self.train_params['train_mc_samples'])] return mu_list self.prediction = prediction # Define objective def objective(mu_list, y): return metrics.avneg_elbo_gaussian(mu_list, y, tau = self.model_params['noise_prec'], train_set_size = self.data.get_train_size(), kl = self.model.kl_divergence()) self.objective = objective # Initialize optimizer self.optimizer = Adam(self.model.parameters(), lr = optim_params['learning_rate'], betas = optim_params['betas'], eps = 1e-8) # Initialize metric history self.metric_history = dict(elbo_neg_ave = [], train_pred_logloss=[], train_pred_rmse=[], test_pred_logloss=[], test_pred_rmse=[]) # Initialize final metric self.final_metric = dict(elbo_neg_ave = [], train_pred_logloss=[], train_pred_rmse=[], test_pred_logloss=[], test_pred_rmse=[]) def _evaluate_model(self, metric_dict, x_train, y_train, x_test, y_test): # Unnormalize noise precision if self.normalize_y: tau = self.model_params['noise_prec'] / (self.y_std**2) else: tau = self.model_params['noise_prec'] # Normalize train x if self.normalize_x: x_train = (x_train-self.x_means)/self.x_stds # Get train predictions mu_list = [self.model(x_train) for _ in range(self.train_params['eval_mc_samples'])] # Unnormalize train predictions if self.normalize_y: mu_list = [self.y_mean + self.y_std * mu for mu in mu_list] # Store train metrics metric_dict['train_pred_logloss'].append(metrics.predictive_avneg_loglik_gaussian(mu_list, y_train, tau = tau).detach().cpu().item()) metric_dict['train_pred_rmse'].append(metrics.predictive_rmse(mu_list, y_train).detach().cpu().item()) metric_dict['elbo_neg_ave'].append(metrics.avneg_elbo_gaussian(mu_list, y_train, tau = tau, train_set_size = self.data.get_train_size(), kl = self.model.kl_divergence()).detach().cpu().item()) # Normalize test x if self.normalize_x: x_test = (x_test-self.x_means)/self.x_stds # Get test predictions mu_list = [self.model(x_test) for _ in range(self.train_params['eval_mc_samples'])] # Unnormalize test predictions if self.normalize_y: mu_list = [self.y_mean + self.y_std * mu for mu in mu_list] # Store test metrics metric_dict['test_pred_logloss'].append(metrics.predictive_avneg_loglik_gaussian(mu_list, y_test, tau = tau).detach().cpu().item()) metric_dict['test_pred_rmse'].append(metrics.predictive_rmse(mu_list, y_test).detach().cpu().item()) def _print_progress(self, epoch): # Print progress print('Epoch [{}/{}], test_pred_rmse: {:.4f}, Logloss: {:.4f}, Test Logloss: {:.4f}'.format( epoch+1, self.train_params['num_epochs'], self.metric_history['test_pred_rmse'][-1], self.metric_history['train_pred_logloss'][-1], self.metric_history['test_pred_logloss'][-1]))
class ExperimentBBBMLPClass(Experiment): def __init__(self, data_set, model_params, train_params, optim_params, evals_per_epoch=1, normalize_x=False, results_folder="./results", data_folder=DEFAULT_DATA_FOLDER, use_cuda=torch.cuda.is_available()): super(type(self), self).__init__(data_set, model_params, train_params, optim_params, evals_per_epoch, normalize_x, results_folder, data_folder, use_cuda) # Define name for experiment class experiment_name = "bbb_mlp_class" # Define folder name for results self.folder_name = folder_name(experiment_name, data_set, model_params, train_params, optim_params, results_folder) # Initialize model self.model = BNN(input_size=self.data.num_features, hidden_sizes=model_params['hidden_sizes'], output_size=self.data.num_classes, act_func=model_params['act_func'], prior_prec=model_params['prior_prec'], prec_init=optim_params['prec_init']) if use_cuda: self.model = self.model.cuda() # Define prediction function def prediction(x): logits_list = [ self.model(x) for _ in range(self.train_params['train_mc_samples']) ] return logits_list self.prediction = prediction # Define objective def objective(logits_list, y): return metrics.avneg_elbo_categorical( logits_list, y, train_set_size=self.data.get_train_size(), kl=self.model.kl_divergence()) self.objective = objective # Initialize optimizer self.optimizer = Adam(self.model.parameters(), lr=optim_params['learning_rate'], betas=optim_params['betas'], eps=1e-8) # Initialize metric history self.metric_history = dict(elbo_neg_ave=[], train_pred_logloss=[], train_pred_accuracy=[], test_pred_logloss=[], test_pred_accuracy=[]) # Initialize final metric self.final_metric = dict(elbo_neg_ave=[], train_pred_logloss=[], train_pred_accuracy=[], test_pred_logloss=[], test_pred_accuracy=[]) def _evaluate_model(self, metric_dict, x_train, y_train, x_test, y_test): # Normalize train x if self.normalize_x: x_train = (x_train - self.x_means) / self.x_stds # Get train predictions logits_list = [ self.model(x_train) for _ in range(self.train_params['eval_mc_samples']) ] # Store train metrics metric_dict['train_pred_logloss'].append( metrics.predictive_avneg_loglik_categorical( logits_list, y_train).detach().cpu().item()) metric_dict['train_pred_accuracy'].append( metrics.softmax_predictive_accuracy(logits_list, y_train).detach().cpu().item()) metric_dict['elbo_neg_ave'].append( metrics.avneg_elbo_categorical( logits_list, y_train, train_set_size=self.data.get_train_size(), kl=self.model.kl_divergence()).detach().cpu().item()) # Normalize test x if self.normalize_x: x_test = (x_test - self.x_means) / self.x_stds # Get test predictions logits_list = [ self.model(x_test) for _ in range(self.train_params['eval_mc_samples']) ] # Store test metrics metric_dict['test_pred_logloss'].append( metrics.predictive_avneg_loglik_categorical( logits_list, y_test).detach().cpu().item()) metric_dict['test_pred_accuracy'].append( metrics.softmax_predictive_accuracy(logits_list, y_test).detach().cpu().item()) def _print_progress(self, epoch): # Print progress print( 'Epoch [{}/{}], Neg. Ave. ELBO: {:.4f}, Logloss: {:.4f}, Test Logloss: {:.4f}' .format(epoch + 1, self.train_params['num_epochs'], self.metric_history['elbo_neg_ave'][-1], self.metric_history['train_pred_logloss'][-1], self.metric_history['test_pred_logloss'][-1])) def _closure(self, x, y): self.optimizer.zero_grad() logits = self.prediction(x) loss = self.objective(logits, y) loss.backward() return loss