def __init__(self, H=None): self.NoisePredictor = DeepFilter(input_shape=(128, 128, 1), output_shape=(128, 128, 1), lr=2e-5, max_filters=64, n0_filter_zise=2) self.Updator = DeepFilter(input_shape=(128, 128, 2), output_shape=(128, 128, 1), n0_filter_zise=4, lr=1e-5, max_filters=256) self.H = H
def __init__(self, hist=4, image_shape=(128, 128)): self.Predictor = DeepFilter(input_shape=(image_shape[0], image_shape[1], hist), output_shape=(image_shape[0], image_shape[1], 1), lr=1e-5, max_filters=256) self.NoisePredictor = DeepFilter(input_shape=(image_shape[0], image_shape[1], 4), output_shape=(image_shape[0], image_shape[1], 1), lr=1e-5, max_filters=128) self.Updator = DeepFilter(input_shape=(image_shape[0], image_shape[1], 2), output_shape=(image_shape[0], image_shape[1], 1), lr=1e-5, max_filters=1024) self.Likelihood = DeepFilter(input_shape=(image_shape[0], image_shape[1], 1), output_shape=(image_shape[0], image_shape[1], 1), lr=2e-6, max_filters=1024) self.hist = hist
def __init__(self, H=None, loss_type="l1", sh=128): self.timesteps = sh**2 self.Predictor = TCNPredictor(input_shape=self.timesteps, timesteps=self.timesteps, lr=1e-4) self.NoisePredictor = DeepFilter(input_shape=(sh, sh, 1), output_shape=(sh, sh, 1), lr=1e-4, max_filters=64, n0_filter_zise=2, n0_filters=32) self.Updator = DeepFilter(input_shape=(sh, sh, 2), output_shape=(sh, sh, 1), n0_filter_zise=4, lr=1e-4, max_filters=256, n0_filters=32) self.H = H self.loss_type = loss_type
def __init__(self, H=None, loss_type="l1", sh=128): self.Predictor = DeepFilter(input_shape=(sh, sh, 1), output_shape=(sh, sh, 1), n0_filter_zise=4, lr=1e-4, max_filters=256, n0_filters=32) self.NoisePredictor = DeepFilter(input_shape=(sh, sh, 1), output_shape=(sh, sh, 1), lr=1e-4, max_filters=64, n0_filter_zise=2, n0_filters=32) self.Updator = DeepFilter(input_shape=(sh, sh, 2), output_shape=(sh, sh, 1), n0_filter_zise=4, lr=2e-5, max_filters=512, n0_filters=64) self.H = H self.loss_type = loss_type self.p_len = 64
class DeepNoisyBayesianFilterLinearPredictor(): def __init__(self, H=None): self.NoisePredictor = DeepFilter(input_shape=(128, 128, 1), output_shape=(128, 128, 1), lr=2e-5, max_filters=64, n0_filter_zise=2) self.Updator = DeepFilter(input_shape=(128, 128, 2), output_shape=(128, 128, 1), n0_filter_zise=4, lr=1e-5, max_filters=256) self.H = H def predict_linear(self, x_old): x_new_hat = np.ones_like(x_old) x_new_hat = x_new_hat * x_old[-1] return x_new_hat def train_noise_predictor(self, z_new): z_sigma = smooth_var(z_new) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_output = z_new - np.mean(z_new) cmin_z = np.min(z_output) - 3 * np.std(z_output) cmax_z = np.max(z_output) + 3 * np.std(z_output) z_output_normalized = normalize(z_output, cmin_z, cmax_z) z_sigma_normalized = preprocess_data(z_sigma_normalized) z_output_normalized = preprocess_data(z_output_normalized) self.NoisePredictor.train_step(np.expand_dims(z_sigma_normalized, 3), np.expand_dims(z_output_normalized, 3), L=30) def train_updator(self, x_old, x_new, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H z_new = normalize(z_new, cmin_z, cmax_z) x_old = normalize(x_old, cmin_x, cmax_x) x_new = normalize(x_new, cmin_x, cmax_x) x_new = preprocess_data(x_new) x_old = preprocess_data(x_old) x_new_hat = self.predict_linear(np.reshape(x_old, (-1))) input_data_update = preprocess_Bayesian_data(x_new_hat, z_new) self.Updator.train_step(np.expand_dims(input_data_update, 0), np.expand_dims(x_new, 3), L=100) def predict_var(self, x_old, z_new): cmin_z = np.min(z_new - np.mean(z_new)) - 3 * np.std(z_new) cmax_z = np.max(z_new - np.mean(z_new)) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H x_old_normalized = normalize(x_old, cmin_x, cmax_x) x_old_normalized = preprocess_data(x_old_normalized) x_new_hat = self.predict_linear(np.reshape(x_old_normalized, (-1))) x_stack = [] for ii in range(30): z_sigma = gen_sample() #z_sigma = smooth_var(z_sigma) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = preprocess_data(z_sigma_normalized) noise_hat = self.NoisePredictor.generator( np.expand_dims(z_sigma_normalized, 3), training=False)[0].numpy() noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z, cmax_z) noise_hat = smooth(noise_hat) z_new_noisy = z_new + noise_hat cmin_z_n = np.min(z_new_noisy) - 3 * np.std(z_new_noisy) cmax_z_n = np.max(z_new_noisy) + 3 * np.std(z_new_noisy) z_new_noisy_normalized = normalize(z_new_noisy, cmin_z_n, cmax_z_n) input_data_update = preprocess_Bayesian_data( x_new_hat, z_new_noisy_normalized) x_new_update = self.Updator.generator(np.expand_dims( input_data_update, 0), training=False)[0].numpy() x_new_update = np.reshape(x_new_update, (-1)) x_new_update = unnormalize(x_new_update, cmin_x, cmax_x) x_stack.append(x_new_update) x_stack = np.array(x_stack) var = np.var(x_stack, axis=0) return var def predict_mean(self, x_old, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H z_new = normalize(z_new, cmin_z, cmax_z) x_old = normalize(x_old, cmin_x, cmax_x) x_old = preprocess_data(x_old) x_new_hat = self.predict_linear(np.reshape(x_old, (-1))) input_data_update = preprocess_Bayesian_data(x_new_hat, z_new) x_new_update = self.Updator.generator(np.expand_dims( input_data_update, 0), training=False)[0].numpy() x_new_update = np.reshape(x_new_update, (-1)) x_new_update = unnormalize(x_new_update, cmin_x, cmax_x) return x_new_update def gen_noise(self, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) z_sigma = gen_sample() cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = preprocess_data(z_sigma_normalized) noise_hat = self.NoisePredictor.generator(np.expand_dims( z_sigma_normalized, 3), training=False)[0].numpy() noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z, cmax_z) noise_hat = smooth(noise_hat) return noise_hat
class TCNBayesianFilter(): def __init__(self, H=None, loss_type="l1", sh=128): self.timesteps = sh**2 self.Predictor = TCNPredictor(input_shape=self.timesteps, timesteps=self.timesteps, lr=1e-4) self.NoisePredictor = DeepFilter(input_shape=(sh, sh, 1), output_shape=(sh, sh, 1), lr=1e-4, max_filters=64, n0_filter_zise=2, n0_filters=32) self.Updator = DeepFilter(input_shape=(sh, sh, 2), output_shape=(sh, sh, 1), n0_filter_zise=4, lr=1e-4, max_filters=256, n0_filters=32) self.H = H self.loss_type = loss_type def train_predictor(self, x_old, x_new): cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) x_old = normalize(x_old, cmin_x, cmax_x) x_new = normalize(x_new, cmin_x, cmax_x) x_new = np.reshape(x_new, (1, self.timesteps, 1)) x_old = np.reshape(x_old, (1, self.timesteps, 1)) self.Predictor.train_step(x_old, x_new, self.loss_type) def train_noise_predictor(self, z_new): z_sigma = smooth_var(z_new) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_output = z_new - np.mean(z_new) cmin_z = np.min(z_output) - 3 * np.std(z_output) cmax_z = np.max(z_output) + 3 * np.std(z_output) z_output_normalized = normalize(z_output, cmin_z, cmax_z) z_sigma_normalized = preprocess_data(z_sigma_normalized) z_output_normalized = preprocess_data(z_output_normalized) self.NoisePredictor.train_step(np.expand_dims(z_sigma_normalized, 3), np.expand_dims(z_output_normalized, 3), L=30) def train_updator(self, x_old, x_new, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H z_new = normalize(z_new, cmin_z, cmax_z) x_new = normalize(x_new, cmin_x, cmax_x) x_new = preprocess_data(x_new) x_old = normalize(x_old, cmin_x, cmax_x) x_new_hat = self.Predictor.predict(x_old) input_data_update = preprocess_Bayesian_data(x_new_hat, z_new) self.Updator.train_step(np.expand_dims(input_data_update, 0), np.expand_dims(x_new, 3), L=100, loss_type=self.loss_type) def predict_var(self, x_old, z_new): cmin_z = np.min(z_new - np.mean(z_new)) - 3 * np.std(z_new) cmax_z = np.max(z_new - np.mean(z_new)) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H x_old = normalize(x_old, cmin_x, cmax_x) x_new_hat = self.Predictor.predict(x_old) x_stack = [] for ii in range(30): z_sigma = gen_sample(v_size=len(x_new_hat)) #z_sigma = smooth_var(z_sigma) cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = preprocess_data(z_sigma_normalized) noise_hat = self.NoisePredictor.generator( np.expand_dims(z_sigma_normalized, 3), training=False)[0].numpy() noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z, cmax_z) noise_hat = smooth(noise_hat) z_new_noisy = z_new + noise_hat cmin_z_n = np.min(z_new_noisy) - 3 * np.std(z_new_noisy) cmax_z_n = np.max(z_new_noisy) + 3 * np.std(z_new_noisy) z_new_noisy_normalized = normalize(z_new_noisy, cmin_z_n, cmax_z_n) input_data_update = preprocess_Bayesian_data( x_new_hat, z_new_noisy_normalized) x_new_update = self.Updator.generator(np.expand_dims( input_data_update, 0), training=False)[0].numpy() x_new_update = np.reshape(x_new_update, (-1)) x_new_update = unnormalize(x_new_update, cmin_x, cmax_x) x_stack.append(x_new_update) x_stack = np.array(x_stack) var = np.var(x_stack, axis=0) return var def predict_mean(self, x_old, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H z_new = normalize(z_new, cmin_z, cmax_z) x_old = normalize(x_old, cmin_x, cmax_x) x_new_hat = self.Predictor.predict(x_old) input_data_update = preprocess_Bayesian_data(x_new_hat, z_new) x_new_update = self.Updator.generator(np.expand_dims( input_data_update, 0), training=False)[0].numpy() x_new_update = np.reshape(x_new_update, (-1)) x_new_update = unnormalize(x_new_update, cmin_x, cmax_x) return x_new_update def prop(self, x_old, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) if self.H is None: cmin_x = np.min(x_old) - 10 * np.std(x_old) cmax_x = np.max(x_old) + 10 * np.std(x_old) else: cmin_x = cmin_z / self.H cmax_x = cmax_z / self.H x_old = normalize(x_old, cmin_x, cmax_x) x_new_hat = self.Predictor.predict(x_old) x_new_hat = np.reshape(x_new_hat, (-1)) x_new_hat = unnormalize(x_new_hat, cmin_x, cmax_x) return x_new_hat def gen_noise(self, z_new): cmin_z = np.min(z_new) - 3 * np.std(z_new) cmax_z = np.max(z_new) + 3 * np.std(z_new) z_sigma = gen_sample() cmin_z_sigma = np.min(z_sigma) - 3 * np.std(z_sigma) cmax_z_sigma = np.max(z_sigma) + 3 * np.std(z_sigma) z_sigma_normalized = normalize(z_sigma, cmin_z_sigma, cmax_z_sigma) z_sigma_normalized = preprocess_data(z_sigma_normalized) noise_hat = self.NoisePredictor.generator(np.expand_dims( z_sigma_normalized, 3), training=False)[0].numpy() noise_hat = np.reshape(noise_hat, (-1)) noise_hat = unnormalize(noise_hat, cmin_z, cmax_z) noise_hat = smooth(noise_hat) return noise_hat
class DeepNoisyBayesianFilter(): def __init__(self, hist=4, image_shape=(128, 128)): self.Predictor = DeepFilter(input_shape=(image_shape[0], image_shape[1], hist), output_shape=(image_shape[0], image_shape[1], 1), lr=1e-5, max_filters=256) self.NoisePredictor = DeepFilter(input_shape=(image_shape[0], image_shape[1], 4), output_shape=(image_shape[0], image_shape[1], 1), lr=1e-5, max_filters=128) self.Updator = DeepFilter(input_shape=(image_shape[0], image_shape[1], 2), output_shape=(image_shape[0], image_shape[1], 1), lr=1e-5, max_filters=1024) self.Likelihood = DeepFilter(input_shape=(image_shape[0], image_shape[1], 1), output_shape=(image_shape[0], image_shape[1], 1), lr=2e-6, max_filters=1024) self.hist = hist def train_predictor(self, x_old, x_new): x_new = np.expand_dims(x_new, 0) x_new = np.expand_dims(x_new, 3) x_old = np.expand_dims(x_old, 3) x_old = np.transpose(x_old, axes=(3, 1, 2, 0)) self.Predictor.train_step(x_old, x_new, L=30) def train_likelihood(self, z_new, x_new): x_new = np.expand_dims(x_new, 0) x_new = np.expand_dims(x_new, 3) z_new = np.expand_dims(z_new, 0) z_new = np.expand_dims(z_new, 3) self.Likelihood.train_step(z_new, x_new, L=20) def train_noise_predictor(self, x_old, x_new): x_new = np.expand_dims(x_new, 0) x_new = np.expand_dims(x_new, 3) x_old = np.expand_dims(x_old, 3) x_old = np.transpose(x_old, axes=(3, 1, 2, 0)) x_new_pred = self.Predictor.generator(x_old, training=False).numpy() x_new_pred = normalize_v(x_new_pred) err = x_new - x_new_pred err = normalize_v(err) var = np.var(x_old, axis=3) var = np.expand_dims(var, 3) var = normalize_v(var) input_data_update = np.concatenate((x_old, var), axis=3) self.NoisePredictor.train_step(input_data_update, err, L=30) def train_updator(self, x_old, x_new, z_new): x_old = np.expand_dims(x_old, 3) x_old = np.transpose(x_old, axes=(3, 1, 2, 0)) x_new_pred = self.Predictor.generator(x_old, training=False).numpy() z_new = np.expand_dims(z_new, 0) z_new = np.expand_dims(z_new, 3) x_new_hat = self.Likelihood.generator(z_new, training=False).numpy() x_new_hat = normalize_v(x_new_hat) input_data_update = np.concatenate((x_new_hat, x_new_pred), axis=3) x_new = np.expand_dims(x_new, 0) x_new = np.expand_dims(x_new, 3) self.Updator.train_step(input_data_update, x_new, L=20) def predict_var(self, x_old, z_new): x_new_hat = self.predict_mean(x_old, z_new) x_stack = [] for ii in range(50): z_sigma = gen_sample() z_sigma = preprocess_data(z_sigma) z_sigma = normalize_v(z_sigma) z_sigma = np.expand_dims(z_sigma, 3) input_data_update = np.concatenate((x_old, z_sigma), axis=3) noise_hat = self.NoisePredictor.generator( input_data_update, training=False)[0].numpy() x_new_hat_noisy = x_new_hat + noise_hat x_stack.append(x_new_hat_noisy) x_stack = np.array(x_stack) var = np.var(x_stack, axis=0) return var def propogate(self, x_old): x_old = np.expand_dims(x_old, 3) x_old = np.transpose(x_old, axes=(3, 1, 2, 0)) x_new_hat = self.Predictor.generator(x_old, training=False).numpy() return x_new_hat def estimate(self, z_new): z_new = np.expand_dims(z_new, 0) z_new = np.expand_dims(z_new, 3) x_new_hat = self.Likelihood.generator(z_new, training=False).numpy() x_new_hat = normalize_v(x_new_hat) return x_new_hat def predict_mean(self, x_old, z_new): x_new_pred = self.propogate(x_old) x_new_hat = self.estimate(z_new) z_new = np.expand_dims(z_new, 0) z_new = np.expand_dims(z_new, 3) x_new_mult = np.multiply(x_new_pred, z_new) #input_data_update = np.concatenate((z_new, x_new_pred), axis = 3) input_data_update = np.concatenate((x_new_hat, x_new_pred), axis=3) #input_data_update = x_new_mult x_new_update = self.Updator.generator(input_data_update, training=False)[0].numpy() return x_new_update def save_weights(self, path): Predictor_path = path + '/predictor' # InvPredictor_path = path + '/invpredictor' Updator_path = path + '/updator' Likelihood_path = path + '/likelihood' self.Predictor.generator.save_weights(Predictor_path) # self.InvPredictor.generator.save_weights(InvPredictor_path) self.Updator.generator.save_weights(Updator_path) self.Likelihood.generator.save_weights(Likelihood_path) def load_weights(self, path): Predictor_path = path + '/predictor' # InvPredictor_path = path + '/invpredictor' Updator_path = path + '/updator' Likelihood_path = path + '/likelihood' self.Predictor.generator.load_weights(Predictor_path) # self.InvPredictor.generator.save_weights(InvPredictor_path) self.Updator.generator.load_weights(Updator_path) self.Likelihood.generator.load_weights(Likelihood_path)