def simulate(plot=True): plotting = Plotting() preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.feature_range = [0, 1] # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) print("sets_training_scaled.shape", sets_training_scaled[0].shape) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) autoencoder.train(sets_training_scaled, sets_test_scaled) autoencoder.save_model("ae_" + ae_params_hash) # autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation # decoded_generated_segments_first_sim = decoded_generated_segments[0] simulated = preprocess.rescale_data(decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) error = reconstruction_error(np.array(sets_test[0]), simulated) if plot: plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) plotting.plot_some_curves("normalised_compare_ae_before_rescale", sets_test_scaled[0], decoded_test, [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], simulated, [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) return error
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_normalisation.feature_range = [-1, 1] # preprocess_normalisation.enable_ignore_price = True # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data( ) all_training_scaled = np.vstack(sets_training_scaled) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 3, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_square_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder vae = VariationalAutoencoder(ae_params) vae.train(all_training_scaled, sets_test_scaled) vae.save_model("vae_" + ae_params_hash) # vae.load_model("vae_" + ae_params_hash) # 3: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(vae.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(vae.encode(set_test_scaled)) # 4: decode using vae decoded_data = vae.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data( decoded_data, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_data) reconstruction_error(np.array(sets_test[0]), simulated) # plot latent space plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_vae_on_", save=True) plotting.plot_space(maturities, vae, "variational_grid", latent_dim=sets_encoded_test[0].shape[1]) # plot scaled results plotting.plot_some_curves("test_feature_normalised_compare_vae_scaled", sets_test_scaled[0], decoded_data, [25, 50, 75, 815], maturities) plotting.plot_some_curves("test_feature_normalised_compare_vae", sets_test[0], simulated, [25, 50, 75, 815], maturities)
def simulate(plot=True): plotting = Plotting() preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.feature_range = [0, 1] window_size = 20 # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( chunks_of=window_size) print("sets_training_scaled.shape", sets_training_scaled[0].shape) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) ae_params = { 'input_dim': ( window_size, sets_training_scaled[0].shape[1], ), # 10 x 56 'latent_dim': ( 2, 56, ), 'hidden_layers': ( 12 * 56, 4 * 56, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500, } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = AutoencoderWindows(ae_params) print("sets_training_scaled", sets_training_scaled[0].shape) autoencoder.train(sets_training_scaled, sets_test_scaled) autoencoder.save_model("ae_" + ae_params_hash) # autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) print("sets_encoded_training", len(sets_encoded_training), sets_encoded_training[0].shape) print("sets_encoded_test", sets_encoded_test[0].shape) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) print("decoded_test", decoded_test.shape) # 7: undo minimax, for now only the first simulation # decoded_generated_segments_first_sim = decoded_generated_segments[0] preprocess.enable_curve_smoothing = True simulated_smooth = preprocess.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) # error = reconstruction_error(np.array(sets_test[0]), simulated_smooth) # print("error:", error) smape_result_smooth = smape(simulated_smooth, np.array(sets_test[0]), over_curves=True) print(np.mean(smape_result_smooth), np.var(smape_result_smooth)) if plot: # plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) # plotting.plot_some_curves("normalised_compare_ae_before_rescale", sets_test_scaled[0], decoded_test, # [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], simulated_smooth, [25, 50, 75, 815], maturities)
def simulate(latent_dim=2, plot=False, preprocess_type=None, model_type=None, force_training=True): plotting = Plotting() preprocess = PreprocessData(preprocess_type) window_size = None if model_type is AEModel.AE_WINDOWS: window_size = 10 # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( chunks_of=window_size) all_training_scaled = np.vstack(sets_training_scaled) if model_type is AEModel.AAE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'hidden_layers_discriminator': ( 2, 2, ), 'leaky_relu': 0.1, 'last_activation': 'linear', 'last_activation_discriminator': 'sigmoid', 'loss_generator': 'mean_squared_error', 'loss_discriminator': 'binary_crossentropy', 'batch_size': 20, 'epochs': 20000 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder autoencoder = AdversarialAutoencoder(ae_params, plot=False) elif model_type is AEModel.VAE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_squared_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder autoencoder = VariationalAutoencoder(ae_params, plot=False) elif model_type is AEModel.AE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params, plot=False) elif model_type is AEModel.PCA: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'latent_dim': latent_dim } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = PCAModel(ae_params, plot=False) else: # model_type is AEModel.AE_WINDOWS: ae_params = { 'input_dim': ( window_size, sets_training_scaled[0].shape[1], ), # 10 x 56 'latent_dim': ( 2, 56, ), 'hidden_layers': ( 12 * 56, 4 * 56, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 10, 'steps_per_epoch': 500, } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = AutoencoderWindows(ae_params, plot=False) if force_training: autoencoder.train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) else: autoencoder.load_else_train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = autoencoder.encode(sets_training_scaled) sets_encoded_test = autoencoder.encode(sets_test_scaled) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo scaling # decoded_generated_segments_first_sim = decoded_generated_segments[0] simulated = preprocess.rescale_data(decoded_test, dataset_name=test_dataset_names[0]) preprocess.enable_curve_smoothing = True simulated_smooth = preprocess.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # error = reconstruction_error(np.array(sets_test[0]), simulated) # error_smooth = reconstruction_error(np.array(sets_test[0]), simulated_smooth) smape_result = smape(simulated, np.array(sets_test[0]), over_curves=True) smape_result_smooth = smape(simulated_smooth, np.array(sets_test[0]), over_curves=True) print(np.mean(smape_result_smooth)) if plot and model_type is not AEModel.AE_WINDOWS: plotting.plot_2d(sets_encoded_test[0], preprocess_type.name + "_" + model_type.name + "_latent_space", sets_test_scaled[0].index.values, save=True) plotting.plot_some_curves( preprocess_type.name + "_" + model_type.name + "_in_vs_out", sets_test[0], simulated, [25, 50, 75, 815], maturities) # plotting.plot_some_curves("normalised_compare_ae", sets_test[0], sets_test_scaled[0], # [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) preprocess.enable_curve_smoothing = False if model_type is AEModel.VAE: plotting.plot_grid_2dim(maturities, autoencoder.generator_model, preprocess_type.name + "_" + model_type.name + "_latent_grid", preprocess, test_dataset_names[0], n=6) elif model_type is AEModel.AAE: plotting.plot_grid_2dim(maturities, autoencoder.decoder, preprocess_type.name + "_" + model_type.name + "_latent_grid", preprocess, test_dataset_names[0], n=6) return smape_result_smooth
class Analysis(): def __init__(self): self.preprocess_data = PreprocessData() self.plotting = Plotting() self.config = Config() # self.preprocess_data.enable_min_max_scaler = True self.preprocess_data.enable_log_returns = True self.sets_training, self.sets_test, self.sets_training_scaled, self.sets_test_scaled, \ self.training_dataset_names, self.test_dataset_names, self.maturities = self.preprocess_data.get_data() wti_nymex = self.sets_test[0] time = wti_nymex.axes[0].tolist() self.wti_nymex_short_end = wti_nymex.iloc[:, 0] self.data_scaled = self.sets_test_scaled[0][0] def normalisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.NORMALISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) print("sets_test[0].shape", sets_test[0].shape, sets_test_scaled[0].shape) self.plotting.plot_some_curves( "normalisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def standardisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.STANDARDISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "standardisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def logreturns_over_tenors(self): preprocess = PreprocessData(PreprocessType.LOG_RETURNS_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "logreturns_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] self.plotting.plot_3d( "logreturns_over_curves_3d", sets_test_scaled[0], ) def normalisation_over_curves(self): preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.enable_ignore_price = True preprocess.feature_range = [0, 1] sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "normalisation_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def standardisation_over_curves(self): print("todo standardisation_over_curves") def logreturns_over_curves(self): print("todo logreturns_over_curves") def all_log_returns(self): preprocess_data = PreprocessData() plotting = Plotting() preprocess_data.enable_log_returns = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): print("set_training_scaled.shape", set_training_scaled.shape, i) plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=False, title=True) def all_normalised_data(self): preprocess_data = PreprocessData() preprocess_data.enable_normalisation_scaler = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): self.plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=True, title=True) for i, set_test_scaled in enumerate(sets_test_scaled): self.plotting.plot_2d(set_test_scaled, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=True) def all_data(self, show_title=False): preprocess_data = PreprocessData(extend_data=False) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) print("maturities", maturities) for i, set_training in enumerate(sets_training): print(self.training_dataset_names[i]) print(set_training.index[0], set_training.index[-1], round(np.min(set_training.min()), 2), round(np.max(set_training.max()), 2)) # self.plotting.plot_2d(set_training, "/time_series/" + training_dataset_names[i], timeseries=True, # save=True, title=show_title) # self.plotting.plot_3d("/time_series/" + training_dataset_names[i] + "_3d", set_training, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_training) # self.plotting.plot_3d_cov("/time_series/" + training_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n") for i, set_test in enumerate(sets_test): print(self.test_dataset_names[i]) print(set_test.index[0], set_test.index[-1], round(np.min(set_test.min()), 2), round(np.max(set_test.max()), 2)) self.plotting.plot_2d(set_test, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=show_title) self.plotting.plot_3d("/time_series/" + test_dataset_names[i] + "_3d", set_test, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_test) # self.plotting.plot_3d_cov("/time_series/" + test_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n")
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_normalisation.feature_range = [0, 1] # preprocess_normalisation.enable_scaler = True # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data( ) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) all_stacked = np.vstack((np.vstack(sets_training), np.vstack(sets_test))) all_stacked_scaled = np.vstack( (np.vstack(sets_training_scaled), np.vstack(sets_test_scaled))) all_training_scaled = np.vstack(sets_training_scaled) # print("all_stacked_scaled.shape", all_stacked_scaled.shape) # plotting.plot_2d(all_stacked[:, 0], "training and test data", save=False) # plotting.plot_2d(all_stacked_scaled[:, 0], "training and test data scaled", save=False) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': (56, 40, 28, 12, 4, 2), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) # autoencoder.train(all_stacked_scaled, sets_test_scaled) # autoencoder.train(sets_test_scaled[0], sets_test_scaled) # autoencoder.train(all_training_scaled, sets_test_scaled) # autoencoder.save_model("ae_" + ae_params_hash) autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder_before_rescale", sets_test_scaled[0], decoded_test, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder", sets_test[0], simulated, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] # curve_smooth = [] # for curve in simulated: # print("curve.shape", curve.shape) # curve_smooth.append(savgol_filter(curve, 23, 5)) # window size 51, polynomial order 3 # curve_smooth = np.array(curve_smooth) print("reconstruction error BEFORE smoothing:") reconstruction_error(np.array(sets_test[0]), simulated) preprocess_normalisation.enable_curve_smoothing = True simulated = preprocess_normalisation.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder", sets_test[0], simulated, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] # plotting.plot_some_curves("test_feature_normalised_compare_normalisation", sets_test[0], sets_test_scaled[0], # [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) print("reconstruction error AFTER smoothing:") reconstruction_error(np.array(sets_test[0]), simulated)