def predict(self, cond_x, residuals, predict_residuals=True): norm_x = normalize_data(cond_x, scaling_values=self.x_scaling_values)[0] u_mean = self.mean_model.predict(norm_x).ravel() u_res = self.res_predict([norm_x, residuals, 1])[0].ravel() if predict_residuals: return u_mean, u_res else: return u_mean
def fit(self, cond_x, u): norm_x, self.x_scaling_values = normalize_data( cond_x, scaling_values=self.x_scaling_values) self.model.fit(norm_x, u, batch_size=self.config["batch_size"], epochs=self.config["num_epochs"], verbose=self.config["verbose"]) self.x_scaling_values.to_csv(self.x_scaling_file, index_label="Channel")
def predict(self, cond_x, residuals=None, predict_residuals=False): norm_x = normalize_data(cond_x, scaling_values=self.x_scaling_values)[0] u_mean = self.sample_predict([norm_x, 0])[0].ravel() if predict_residuals: u_total = self.sample_predict([norm_x, 1])[0].ravel() u_res = u_total - u_mean return u_mean, u_res else: return u_mean
def predict(self, cond_x, random_x, train_mode=1): norm_x = normalize_data(np.expand_dims(cond_x, axis=2), scaling_values=self.x_scaling_values)[0] predictions = unnormalize_data( self.pred_func([norm_x[:, :, 0], random_x, train_mode])[0], self.y_scaling_values)[:, :, 0] if predictions.shape[1] > 1: predictions = predictions.sum(axis=1) else: predictions = predictions.ravel() return predictions
def predict(self, cond_x, residuals=None, predict_residuals=True): norm_x = normalize_data(cond_x, scaling_values=self.x_scaling_values)[0] sample_predict = K.function( [self.model.input, K.learning_phase()], [self.model.output]) u_mean = sample_predict([norm_x, 0])[0].ravel() u_total = sample_predict([norm_x, 1])[0].ravel() u_res = u_total - u_mean if predict_residuals: return u_mean, u_res else: return u_mean
def fit(self, cond_x, u): norm_x, self.x_scaling_values = normalize_data( cond_x, scaling_values=self.x_scaling_values) self.model.fit(norm_x, u, batch_size=self.config["batch_size"], epochs=self.config["num_epochs"], verbose=self.config["verbose"]) u_mean = self.model.predict(norm_x).ravel() residuals = u.ravel() - u_mean self.config["corr"] = float( np.corrcoef(residuals[1:], residuals[:-1])[0, 1]) self.config["res_sd"] = float(np.std(residuals))
def fit(self, cond_x, u): split_index = int(cond_x.shape[0] * self.config["val_split"]) norm_x, self.x_scaling_values = normalize_data( cond_x, scaling_values=self.x_scaling_values) self.x_scaling_values.to_csv(self.x_scaling_file, index_label="Channel") self.mean_model.fit(norm_x[:split_index], u[:split_index], batch_size=self.config["batch_size"], epochs=self.config["num_epochs"], verbose=self.config["verbose"]) mean_preds = self.mean_model.predict(norm_x[split_index:]).ravel() residuals = u[split_index:] - mean_preds self.res_model.fit( [norm_x[split_index:-1], residuals[:-1].reshape(-1, 1)], residuals[1:], batch_size=self.config["batch_size"], epochs=self.config["num_epochs"], verbose=self.config["verbose"])
def predict_batch(self, cond_x, random_x, batch_size=8, stochastic=0): norm_x = normalize_data(np.expand_dims(cond_x, axis=2), scaling_values=self.x_scaling_values)[0] batch_indices = np.arange(0, norm_x.shape[0], batch_size, dtype=np.int32) batch_indices = np.append(batch_indices, norm_x.shape[0]) predictions = np.zeros( (norm_x.shape[0], self.model.output.shape[1].value)) print("Start batches", cond_x.shape[0]) for b, batch_index in enumerate(batch_indices[:-1]): predictions[batch_index:batch_indices[b + 1]] = unnormalize_data( self.pred_func([ norm_x[batch_index:batch_indices[b + 1], :, 0], random_x[batch_index:batch_indices[b + 1]], stochastic ])[0], self.y_scaling_values)[:, :, 0] print("End batches", cond_x.shape[0]) if predictions.shape[1] > 1: predictions = predictions.sum(axis=1) else: predictions = predictions.ravel() return predictions
def train_lorenz_gan(config, combined_data, combined_time_series): """ Train GAN on Lorenz data Args: config: combined_data: Returns: """ if "num_procs" in config.keys(): num_procs = config["num_procs"] else: num_procs = 1 sess = tf.Session( config=tf.ConfigProto(intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=1)) K.set_session(sess) x_cols = config["gan"]["cond_inputs"] y_cols = config["gan"]["output_cols"] X_series = combined_data[x_cols].values Y_series = combined_data[y_cols].values X_norm, X_scaling_values = normalize_data(X_series) if config["gan"]["output"].lower() == "mean": Y_norm, Y_scaling_values = normalize_data( np.expand_dims(Y_series.mean(axis=1), axis=-1)) else: Y_norm, Y_scaling_values = normalize_data(Y_series) X_scaling_values.to_csv(join( config["gan"]["gan_path"], "gan_X_scaling_values_{0:04d}.csv".format(config["gan"]["gan_index"])), index_label="Channel") Y_scaling_values.to_csv(join( config["gan"]["gan_path"], "gan_Y_scaling_values_{0:04d}.csv".format(config["gan"]["gan_index"])), index_label="Channel") trim = X_norm.shape[0] % config["gan"]["batch_size"] if config["gan"]["structure"] == "dense": gen_model = generator_dense(**config["gan"]["generator"]) disc_model = discriminator_dense(**config["gan"]["discriminator"]) rand_vec_length = config["gan"]["generator"]["num_random_inputs"] elif config["gan"]["structure"] == "specified_random": gen_model = generator_dense_stoch(**config["gan"]["generator"]) disc_model = discriminator_dense(**config["gan"]["discriminator"]) rand_vec_length = config["gan"]["generator"]["num_random_inputs"] + \ 2 * config["gan"]["generator"]["num_hidden_neurons"] + \ config["gan"]["generator"]["num_cond_inputs"] elif config["gan"]["structure"] == "auto_stoch": gen_model = generator_dense_auto_stoch(**config["gan"]["generator"]) disc_model = discriminator_dense(**config["gan"]["discriminator"]) rand_vec_length = config["gan"]["generator"]["num_random_inputs"] + \ 2 * config["gan"]["generator"]["num_hidden_neurons"] + \ config["gan"]["generator"]["num_cond_inputs"] elif config["gan"]["structure"] == "concrete": gen_model = generator_conv_concrete(**config["gan"]["generator"]) disc_model = discriminator_conv_concrete( **config["gan"]["discriminator"]) rand_vec_length = config["gan"]["generator"]["num_random_inputs"] else: gen_model = generator_conv(**config["gan"]["generator"]) disc_model = discriminator_conv(**config["gan"]["discriminator"]) rand_vec_length = config["gan"]["generator"]["num_random_inputs"] optimizer = Adam(lr=config["gan"]["learning_rate"], beta_1=0.5, beta_2=0.9) loss = config["gan"]["loss"] gen_disc = initialize_gan(gen_model, disc_model, loss, optimizer, config["gan"]["metrics"]) if trim > 0: Y_norm = Y_norm[:-trim] X_norm = X_norm[:-trim] train_gan(np.expand_dims(Y_norm, -1), X_norm, gen_model, disc_model, gen_disc, config["gan"]["batch_size"], rand_vec_length, config["gan"]["gan_path"], config["gan"]["gan_index"], config["gan"]["num_epochs"], config["gan"]["metrics"]) gen_pred_func = predict_stochastic(gen_model) x_ts_norm, _ = normalize_data(combined_time_series[x_cols].values, scaling_values=X_scaling_values) gen_ts_pred_norm = gen_pred_func( [x_ts_norm, np.zeros((x_ts_norm.shape[0], rand_vec_length)), 0])[0] print(gen_ts_pred_norm.shape) gen_ts_preds = unnormalize_data(gen_ts_pred_norm, scaling_values=Y_scaling_values) gen_ts_residuals = combined_time_series[y_cols].values.ravel( ) - gen_ts_preds.ravel() train_random_updater( gen_ts_residuals, config["random_updater"]["out_file"].replace( ".pkl", "_{0:04d}.pkl".format(config["gan"]["gan_index"])))
def predict_mean(self, cond_x): norm_x = normalize_data(cond_x, scaling_values=self.x_scaling_values)[0] u_mean = self.sample_predict([norm_x, 0])[0].ravel() return u_mean