def _get_mu_sigma_single(self, autoencoder, x): ae_output = autoencoder(x) y_mu = ae_output[0] if self.decoder_sigma_enabled: y_sigma = ae_output[self.decoder_sig_index] else: y_sigma = torch.ones_like(y_mu) if self.decoder_cluster_enabled: y_cluster = ae_output[ self.decoder_cluster_index].detach().cpu().numpy() #convert to numpy y_mu = y_mu.detach().cpu().numpy() if self.decoder_full_cov_enabled: #convert chol tril and log noise to numpy y_sigma = tuple( [y_sigma_i.detach().cpu().numpy() for y_sigma_i in y_sigma]) else: y_sigma = y_sigma.detach().cpu().numpy() y_sigma = flatten_np(y_sigma) log_noise = autoencoder.log_noise.detach().cpu().numpy() if self.decoder_cluster_enabled: return flatten_np(y_mu), y_sigma, log_noise, y_cluster else: return flatten_np(y_mu), y_sigma, log_noise
def calc_output_single(self, autoencoder, x, select_keys=[ "y_mu", "y_sigma", "se", "bce", "cbce", "nll_homo", "nll_sigma" ]): """ Computes the output of autoencoder per sample. Given the selected keys, we specify the output not only to be the reconstructed signal, but options of squared error (`se`), Gaussian negative loglikelihood (`nll_homo`), etc. This function is used later for every sampled autoencoder from the posterior weights. """ #per sample if self.decoder_cluster_enabled: y_mu, y_sigma, log_noise, y_cluster = self._get_mu_sigma_single( autoencoder, x) else: y_mu, y_sigma, log_noise = self._get_mu_sigma_single( autoencoder, x) #clamp it to min max if self.output_clamp: y_mu = np.clip(y_mu, a_min=self.output_clamp[0], a_max=self.output_clamp[1]) #flatten x into numpy array x = flatten_np(x.detach().cpu().numpy()) return self._calc_output_single(x, y_mu=y_mu, y_sigma=y_sigma, log_noise=log_noise, select_keys=select_keys)
def inverse_transform(self, x_test): if len(x_test.shape) > 2: new_x_test = self.scaler.inverse_transform( flatten_np(x_test)).reshape(x_test.shape) else: new_x_test = self.scaler.inverse_transform(x_test) return new_x_test
def transform(self, x_test): if len(x_test.shape) > 2: new_x_test = self.scaler.transform(flatten_np(x_test)).reshape( x_test.shape) else: new_x_test = self.scaler.transform(x_test) if self.clip: new_x_test = np.clip(new_x_test, 0, 1) return new_x_test
def fit_transform(self, x_train, y_train=None): if len(x_train.shape) > 2: new_x_train = self.scaler.fit_transform( flatten_np(x_train)).reshape(x_train.shape) else: new_x_train = self.scaler.fit_transform(x_train) if self.clip: new_x_train = np.clip(new_x_train, 0, 1) return new_x_train
def fit(self, x_train, y_train=None): if len(x_train.shape) > 2: self.scaler.fit(flatten_np(x_train)) else: self.scaler.fit(x_train)