def train_predictor(self, x_old, x_new):
     cmin_x = np.min(x_old) - 10 * np.std(x_old)
     cmax_x = np.max(x_old) + 10 * np.std(x_old)
     x_old = normalize(x_old, cmin_x, cmax_x)
     x_new = normalize(x_new, cmin_x, cmax_x)
     x_new = preprocess_data(x_new)
     x_old = preprocess_data(x_old)
     self.Predictor.train_step(np.expand_dims(x_old, 3),
                               np.expand_dims(x_new, 3),
                               L=50)
 def train_noise_predictor(self, z_new):
     cmin_z = np.min(z_new) - 3 * np.std(z_new)
     cmax_z = np.max(z_new) + 3 * np.std(z_new)
     z_sample = sample(z_new, self.p_len)
     z_sample_normalized = normalize(z_sample, cmin_z, cmax_z)
     z_new_normalized = normalize(z_new, cmin_z, cmax_z)
     z_new_normalized = preprocess_data(z_new_normalized)
     z_sample_normalized = preprocess_data(z_sample_normalized)
     self.NoisePredictor.train_step(np.expand_dims(z_sample_normalized, 3),
                                    np.expand_dims(z_new_normalized, 3),
                                    L=50,
                                    loss_type="l2")
 def train_updator(self, x_old, x_new, z_new):
     cmin_z = np.min(z_new) - 3 * np.std(z_new)
     cmax_z = np.max(z_new) + 3 * np.std(z_new)
     if self.H is None:
         cmin_x = np.min(x_old) - 10 * np.std(x_old)
         cmax_x = np.max(x_old) + 10 * np.std(x_old)
     else:
         cmin_x = cmin_z / self.H
         cmax_x = cmax_z / self.H
     z_new = normalize(z_new, cmin_z, cmax_z)
     x_old = normalize(x_old, cmin_x, cmax_x)
     x_new = normalize(x_new, cmin_x, cmax_x)
     x_new = preprocess_data(x_new)
     x_old = preprocess_data(x_old)
     x_new_hat = self.Predictor.generator(np.expand_dims(x_old, 3),
                                          training=False)[0].numpy()
     x_new_hat = np.reshape(x_new_hat, (-1))
     input_data_update = preprocess_Bayesian_data(x_new_hat, z_new)
     self.Updator.train_step(np.expand_dims(input_data_update, 0),
                             np.expand_dims(x_new, 3),
                             L=50,
                             loss_type=self.loss_type)
    def predict_var(self, x_old, z_new, Ns=30):
        cmin_z = np.min(z_new) - 3 * np.std(z_new)
        cmax_z = np.max(z_new) + 3 * np.std(z_new)
        if self.H is None:
            cmin_x = np.min(x_old) - 10 * np.std(x_old)
            cmax_x = np.max(x_old) + 10 * np.std(x_old)
        else:
            cmin_x = cmin_z / self.H
            cmax_x = cmax_z / self.H
        x_old_normalized = normalize(x_old, cmin_x, cmax_x)
        x_old_normalized = preprocess_data(x_old_normalized)
        x_new_hat = self.Predictor.generator(np.expand_dims(
            x_old_normalized, 3),
                                             training=False)[0].numpy()
        x_new_hat = np.reshape(x_new_hat, (-1))
        x_stack = []
        for ii in range(Ns):
            z_sample = sample(z_new, self.p_len)
            z_sample_normalized = normalize(z_sample, cmin_z, cmax_z)
            z_sample_normalized = preprocess_data(z_sample_normalized)

            noise_hat = self.NoisePredictor.generator(
                np.expand_dims(z_sample_normalized,
                               3), training=False)[0].numpy()
            noise_hat = np.reshape(noise_hat, (-1))

            input_data_update = preprocess_Bayesian_data(
                x_new_hat, z_sample_normalized)
            x_new_update = self.Updator.generator(np.expand_dims(
                input_data_update, 0),
                                                  training=False)[0].numpy()
            x_new_update = np.reshape(x_new_update, (-1))
            x_new_update = unnormalize(x_new_update, cmin_x, cmax_x)
            x_stack.append(x_new_update)
        x_stack = np.array(x_stack)
        var = np.var(x_stack, axis=0)
        return var, x_stack
 def prop(self, x_old, z_new):
     cmin_z = np.min(z_new) - 3 * np.std(z_new)
     cmax_z = np.max(z_new) + 3 * np.std(z_new)
     if self.H is None:
         cmin_x = np.min(x_old) - 10 * np.std(x_old)
         cmax_x = np.max(x_old) + 10 * np.std(x_old)
     else:
         cmin_x = cmin_z / self.H
         cmax_x = cmax_z / self.H
     x_old = normalize(x_old, cmin_x, cmax_x)
     x_old = preprocess_data(x_old)
     x_new_hat = self.Predictor.generator(np.expand_dims(x_old, 3),
                                          training=False)[0].numpy()
     x_new_hat = np.reshape(x_new_hat, (-1))
     x_new_hat = unnormalize(x_new_hat, cmin_x, cmax_x)
     return x_new_hat