def objective(mu_list, y): return metrics.avneg_elbo_gaussian( mu_list, y, tau=self.model_params['noise_prec'], train_set_size=self.data.get_current_train_size(), kl=self.model.kl_divergence())
def _evaluate_model(self, metric_dict, x_train, y_train, x_test, y_test): # Unnormalize noise precision if self.normalize_y: tau = self.model_params['noise_prec'] / (self.y_std**2) else: tau = self.model_params['noise_prec'] # Normalize train x if self.normalize_x: x_train = (x_train - self.x_means) / self.x_stds # Get train predictions mu_list = self.optimizer.get_mc_predictions( self.model.forward, inputs=x_train, mc_samples=self.train_params['eval_mc_samples'], ret_numpy=False) # Unnormalize train predictions if self.normalize_y: mu_list = [self.y_mean + self.y_std * mu for mu in mu_list] # Store train metrics metric_dict['train_pred_logloss'].append( metrics.predictive_avneg_loglik_gaussian( mu_list, y_train, tau=tau).detach().cpu().item()) metric_dict['train_pred_rmse'].append( metrics.predictive_rmse(mu_list, y_train).detach().cpu().item()) metric_dict['elbo_neg_ave'].append( metrics.avneg_elbo_gaussian( mu_list, y_train, tau=tau, train_set_size=self.data.get_current_train_size(), kl=self.optimizer.kl_divergence()).detach().cpu().item()) # Normalize test x if self.normalize_x: x_test = (x_test - self.x_means) / self.x_stds # Get test predictions mu_list = self.optimizer.get_mc_predictions( self.model.forward, inputs=x_test, mc_samples=self.train_params['eval_mc_samples'], ret_numpy=False) # Unnormalize test predictions if self.normalize_y: mu_list = [self.y_mean + self.y_std * mu for mu in mu_list] # Store test metrics metric_dict['test_pred_logloss'].append( metrics.predictive_avneg_loglik_gaussian( mu_list, y_test, tau=tau).detach().cpu().item()) metric_dict['test_pred_rmse'].append( metrics.predictive_rmse(mu_list, y_test).detach().cpu().item())