def gen_noise(self, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) z_sigma = gen_sample() cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = preprocess_data(z_sigma_normalized) noise_hat = self.NoisePredictor.generator(np.expand_dims( z_sigma_normalized, 3), training=False)[0].numpy() noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z, cmax_z) noise_hat = smooth(noise_hat) return noise_hat
def predict_var(self, x_old, z_new, Ns=30): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) cmin_z_unb = np.min(z_new - np.mean(z_new)) - 3 * np.std(z_new) cmax_z_unb = np.max(z_new - np.mean(z_new)) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H x_old_normalized = normalize(x_old, cmin_x, cmax_x) x_old_normalized = preprocess_data(x_old_normalized) x_new_hat = self.Predictor.generator(np.expand_dims( x_old_normalized, 3), training=False)[0].numpy() x_new_hat = np.reshape(x_new_hat, (-1)) x_stack = [] for ii in range(Ns): z_sigma = gen_sample(p_len=self.p_len, v_size=len(x_new_hat)) #z_sigma = smooth_var(z_sigma) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = preprocess_data(z_sigma_normalized) noise_hat = self.NoisePredictor.generator( np.expand_dims(z_sigma_normalized, 3), training=False)[0].numpy() noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z_unb, cmax_z_unb) noise_hat = smooth(noise_hat) z_new_noisy = z_new + noise_hat cmin_z_n = np.min(z_new_noisy) - 3 * np.std(z_new_noisy) cmax_z_n = np.max(z_new_noisy) + 3 * np.std(z_new_noisy) z_new_noisy_normalized = normalize(z_new_noisy, cmin_z_n, cmax_z_n) input_data_update = preprocess_Bayesian_data( x_new_hat, z_new_noisy_normalized) x_new_update = self.Updator.generator(np.expand_dims( input_data_update, 0), training=False)[0].numpy() x_new_update = np.reshape(x_new_update, (-1)) x_new_update = unnormalize(x_new_update, cmin_x, cmax_x) x_stack.append(x_new_update) x_stack = np.array(x_stack) var = np.var(x_stack, axis=0) return var, x_stack
def train_noise_predictor(self, z_new): #z_sigma = smooth_var(z_new) z_sigma = gen_sample(p_len=self.p_len, v_size=len(z_new)) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_output = z_new - np.mean(z_new) cmin_z = np.min(z_output) - 3 * np.std(z_output) cmax_z = np.max(z_output) + 3 * np.std(z_output) z_output_normalized = normalize(z_output, cmin_z, cmax_z) z_sigma_normalized = preprocess_data(z_sigma_normalized) z_output_normalized = preprocess_data(z_output_normalized) self.NoisePredictor.train_step(np.expand_dims(z_sigma_normalized, 3), np.expand_dims(z_output_normalized, 3), L=50, loss_type="l2")
def predict_var(self, x_old, z_new): cmin_z = np.min(z_new - np.mean(z_new)) - 3 * np.std(z_new) cmax_z = np.max(z_new - np.mean(z_new)) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H x_old_normalized = normalize(x_old, cmin_x, cmax_x) x_old_normalized = np.reshape( x_old_normalized, (1, self.input_shape[0], self.input_shape[1])) x_new_hat = self.Predictor.generator.predict(x_old_normalized)[0] x_new_hat = np.reshape(x_new_hat, (1, self.output_shape[0], self.output_shape[1])) x_stack = [] for ii in range(30): z_sigma = gen_sample(self.output_shape[0]) #z_sigma = smooth_var(z_sigma) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = np.reshape( z_sigma_normalized, (1, self.output_shape[0], self.output_shape[1])) noise_hat = self.NoisePredictor.generator.predict( z_sigma_normalized)[0] noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z, cmax_z) noise_hat = smooth(noise_hat) z_new_noisy = z_new + noise_hat cmin_z_n = np.min(z_new_noisy) - 3 * np.std(z_new_noisy) cmax_z_n = np.max(z_new_noisy) + 3 * np.std(z_new_noisy) z_new_noisy_normalized = normalize(z_new_noisy, cmin_z_n, cmax_z_n) z_new_noisy_normalized = np.reshape( z_new_noisy_normalized, (1, self.output_shape[0], self.output_shape[1])) input_data_update = np.concatenate( (x_new_hat, z_new_noisy_normalized), axis=2) x_new_update = self.Updator.generator.predict(input_data_update)[0] x_new_update = np.reshape(x_new_update, (-1)) x_new_update = unnormalize(x_new_update, cmin_x, cmax_x) x_stack.append(x_new_update) x_stack = np.array(x_stack) var = np.var(x_stack, axis=0) return var
def predict_var(self, x_old, z_new): x_new_hat = self.predict_mean(x_old, z_new) x_stack = [] for ii in range(50): z_sigma = gen_sample() z_sigma = preprocess_data(z_sigma) z_sigma = normalize_v(z_sigma) z_sigma = np.expand_dims(z_sigma, 3) input_data_update = np.concatenate((x_old, z_sigma), axis=3) noise_hat = self.NoisePredictor.generator( input_data_update, training=False)[0].numpy() x_new_hat_noisy = x_new_hat + noise_hat x_stack.append(x_new_hat_noisy) x_stack = np.array(x_stack) var = np.var(x_stack, axis=0) return var
def gen_noise(self, z_new, Ns=30): cmin_z = np.min(z_new - np.mean(z_new)) - 3 * np.std(z_new) cmax_z = np.max(z_new - np.mean(z_new)) + 3 * np.std(z_new) z_new_noisy_stack = [] for ii in range(Ns): z_sigma = gen_sample(p_len=self.p_len, v_size=len(z_new)) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = preprocess_data(z_sigma_normalized) noise_hat = self.NoisePredictor.generator( np.expand_dims(z_sigma_normalized, 3), training=False)[0].numpy() noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z, cmax_z) noise_hat = smooth(noise_hat) z_new_noisy = z_new + noise_hat z_new_noisy_stack.append(z_new_noisy) return z_new_noisy_stack