def all_log_returns(self): preprocess_data = PreprocessData() plotting = Plotting() preprocess_data.enable_log_returns = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): print("set_training_scaled.shape", set_training_scaled.shape, i) plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=False, title=True)
def simulate(self): plotting = Plotting() old_rates = self.model.rates plotting.plot_3d("AMModel_input_data", old_rates) plotting.plot_2d(old_rates[-1, :], "AMModel_input_data_first") tenors = self.model.tenors obs_time = self.model.obs_time print("tenors", tenors) print("obs_time", obs_time) print("old_rates", old_rates) self.model.make_data() rates = self.model.rates print("new rates", rates) plotting.plot_3d("AMModel_test", rates) # , maturities=tenors, time=obs_time print("made data")
def simulate(plot=True): plotting = Plotting() preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.feature_range = [0, 1] # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) print("sets_training_scaled.shape", sets_training_scaled[0].shape) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) autoencoder.train(sets_training_scaled, sets_test_scaled) autoencoder.save_model("ae_" + ae_params_hash) # autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation # decoded_generated_segments_first_sim = decoded_generated_segments[0] simulated = preprocess.rescale_data(decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) error = reconstruction_error(np.array(sets_test[0]), simulated) if plot: plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) plotting.plot_some_curves("normalised_compare_ae_before_rescale", sets_test_scaled[0], decoded_test, [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], simulated, [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) return error
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_normalisation.feature_range = [-1, 1] # preprocess_normalisation.enable_ignore_price = True # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data( ) all_training_scaled = np.vstack(sets_training_scaled) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 3, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_square_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder vae = VariationalAutoencoder(ae_params) vae.train(all_training_scaled, sets_test_scaled) vae.save_model("vae_" + ae_params_hash) # vae.load_model("vae_" + ae_params_hash) # 3: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(vae.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(vae.encode(set_test_scaled)) # 4: decode using vae decoded_data = vae.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data( decoded_data, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_data) reconstruction_error(np.array(sets_test[0]), simulated) # plot latent space plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_vae_on_", save=True) plotting.plot_space(maturities, vae, "variational_grid", latent_dim=sets_encoded_test[0].shape[1]) # plot scaled results plotting.plot_some_curves("test_feature_normalised_compare_vae_scaled", sets_test_scaled[0], decoded_data, [25, 50, 75, 815], maturities) plotting.plot_some_curves("test_feature_normalised_compare_vae", sets_test[0], simulated, [25, 50, 75, 815], maturities)
def simulate(latent_dim=2, plot=False, preprocess_type=None, model_type=None, force_training=True): plotting = Plotting() preprocess = PreprocessData(preprocess_type) window_size = None if model_type is AEModel.AE_WINDOWS: window_size = 10 # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( chunks_of=window_size) all_training_scaled = np.vstack(sets_training_scaled) if model_type is AEModel.AAE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'hidden_layers_discriminator': ( 2, 2, ), 'leaky_relu': 0.1, 'last_activation': 'linear', 'last_activation_discriminator': 'sigmoid', 'loss_generator': 'mean_squared_error', 'loss_discriminator': 'binary_crossentropy', 'batch_size': 20, 'epochs': 20000 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder autoencoder = AdversarialAutoencoder(ae_params, plot=False) elif model_type is AEModel.VAE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_squared_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder autoencoder = VariationalAutoencoder(ae_params, plot=False) elif model_type is AEModel.AE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params, plot=False) elif model_type is AEModel.PCA: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'latent_dim': latent_dim } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = PCAModel(ae_params, plot=False) else: # model_type is AEModel.AE_WINDOWS: ae_params = { 'input_dim': ( window_size, sets_training_scaled[0].shape[1], ), # 10 x 56 'latent_dim': ( 2, 56, ), 'hidden_layers': ( 12 * 56, 4 * 56, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 10, 'steps_per_epoch': 500, } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = AutoencoderWindows(ae_params, plot=False) if force_training: autoencoder.train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) else: autoencoder.load_else_train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = autoencoder.encode(sets_training_scaled) sets_encoded_test = autoencoder.encode(sets_test_scaled) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo scaling # decoded_generated_segments_first_sim = decoded_generated_segments[0] simulated = preprocess.rescale_data(decoded_test, dataset_name=test_dataset_names[0]) preprocess.enable_curve_smoothing = True simulated_smooth = preprocess.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # error = reconstruction_error(np.array(sets_test[0]), simulated) # error_smooth = reconstruction_error(np.array(sets_test[0]), simulated_smooth) smape_result = smape(simulated, np.array(sets_test[0]), over_curves=True) smape_result_smooth = smape(simulated_smooth, np.array(sets_test[0]), over_curves=True) print(np.mean(smape_result_smooth)) if plot and model_type is not AEModel.AE_WINDOWS: plotting.plot_2d(sets_encoded_test[0], preprocess_type.name + "_" + model_type.name + "_latent_space", sets_test_scaled[0].index.values, save=True) plotting.plot_some_curves( preprocess_type.name + "_" + model_type.name + "_in_vs_out", sets_test[0], simulated, [25, 50, 75, 815], maturities) # plotting.plot_some_curves("normalised_compare_ae", sets_test[0], sets_test_scaled[0], # [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) preprocess.enable_curve_smoothing = False if model_type is AEModel.VAE: plotting.plot_grid_2dim(maturities, autoencoder.generator_model, preprocess_type.name + "_" + model_type.name + "_latent_grid", preprocess, test_dataset_names[0], n=6) elif model_type is AEModel.AAE: plotting.plot_grid_2dim(maturities, autoencoder.decoder, preprocess_type.name + "_" + model_type.name + "_latent_grid", preprocess, test_dataset_names[0], n=6) return smape_result_smooth
class Analysis(): def __init__(self): self.preprocess_data = PreprocessData() self.plotting = Plotting() self.config = Config() # self.preprocess_data.enable_min_max_scaler = True self.preprocess_data.enable_log_returns = True self.sets_training, self.sets_test, self.sets_training_scaled, self.sets_test_scaled, \ self.training_dataset_names, self.test_dataset_names, self.maturities = self.preprocess_data.get_data() wti_nymex = self.sets_test[0] time = wti_nymex.axes[0].tolist() self.wti_nymex_short_end = wti_nymex.iloc[:, 0] self.data_scaled = self.sets_test_scaled[0][0] def normalisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.NORMALISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) print("sets_test[0].shape", sets_test[0].shape, sets_test_scaled[0].shape) self.plotting.plot_some_curves( "normalisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def standardisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.STANDARDISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "standardisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def logreturns_over_tenors(self): preprocess = PreprocessData(PreprocessType.LOG_RETURNS_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "logreturns_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] self.plotting.plot_3d( "logreturns_over_curves_3d", sets_test_scaled[0], ) def normalisation_over_curves(self): preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.enable_ignore_price = True preprocess.feature_range = [0, 1] sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "normalisation_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def standardisation_over_curves(self): print("todo standardisation_over_curves") def logreturns_over_curves(self): print("todo logreturns_over_curves") def all_log_returns(self): preprocess_data = PreprocessData() plotting = Plotting() preprocess_data.enable_log_returns = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): print("set_training_scaled.shape", set_training_scaled.shape, i) plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=False, title=True) def all_normalised_data(self): preprocess_data = PreprocessData() preprocess_data.enable_normalisation_scaler = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): self.plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=True, title=True) for i, set_test_scaled in enumerate(sets_test_scaled): self.plotting.plot_2d(set_test_scaled, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=True) def all_data(self, show_title=False): preprocess_data = PreprocessData(extend_data=False) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) print("maturities", maturities) for i, set_training in enumerate(sets_training): print(self.training_dataset_names[i]) print(set_training.index[0], set_training.index[-1], round(np.min(set_training.min()), 2), round(np.max(set_training.max()), 2)) # self.plotting.plot_2d(set_training, "/time_series/" + training_dataset_names[i], timeseries=True, # save=True, title=show_title) # self.plotting.plot_3d("/time_series/" + training_dataset_names[i] + "_3d", set_training, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_training) # self.plotting.plot_3d_cov("/time_series/" + training_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n") for i, set_test in enumerate(sets_test): print(self.test_dataset_names[i]) print(set_test.index[0], set_test.index[-1], round(np.min(set_test.min()), 2), round(np.max(set_test.max()), 2)) self.plotting.plot_2d(set_test, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=show_title) self.plotting.plot_3d("/time_series/" + test_dataset_names[i] + "_3d", set_test, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_test) # self.plotting.plot_3d_cov("/time_series/" + test_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n")
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_normalisation.feature_range = [0, 1] # preprocess_normalisation.enable_scaler = True # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data( ) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) all_stacked = np.vstack((np.vstack(sets_training), np.vstack(sets_test))) all_stacked_scaled = np.vstack( (np.vstack(sets_training_scaled), np.vstack(sets_test_scaled))) all_training_scaled = np.vstack(sets_training_scaled) # print("all_stacked_scaled.shape", all_stacked_scaled.shape) # plotting.plot_2d(all_stacked[:, 0], "training and test data", save=False) # plotting.plot_2d(all_stacked_scaled[:, 0], "training and test data scaled", save=False) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': (56, 40, 28, 12, 4, 2), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) # autoencoder.train(all_stacked_scaled, sets_test_scaled) # autoencoder.train(sets_test_scaled[0], sets_test_scaled) # autoencoder.train(all_training_scaled, sets_test_scaled) # autoencoder.save_model("ae_" + ae_params_hash) autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder_before_rescale", sets_test_scaled[0], decoded_test, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder", sets_test[0], simulated, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] # curve_smooth = [] # for curve in simulated: # print("curve.shape", curve.shape) # curve_smooth.append(savgol_filter(curve, 23, 5)) # window size 51, polynomial order 3 # curve_smooth = np.array(curve_smooth) print("reconstruction error BEFORE smoothing:") reconstruction_error(np.array(sets_test[0]), simulated) preprocess_normalisation.enable_curve_smoothing = True simulated = preprocess_normalisation.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder", sets_test[0], simulated, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] # plotting.plot_some_curves("test_feature_normalised_compare_normalisation", sets_test[0], sets_test_scaled[0], # [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) print("reconstruction error AFTER smoothing:") reconstruction_error(np.array(sets_test[0]), simulated)
def simulate(): plotting = Plotting() preprocess_minmax = PreprocessData() preprocess_logreturns = PreprocessData() preprocess_minmax.enable_min_max_scaler = True preprocess_logreturns.enable_log_returns = True # 1. get data and apply minimax sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_minmax.get_data( ) print("sets_training_scaled.shape", sets_training_scaled[0].shape) autoencoder = DeepAutoencoder( input_shape=(sets_training_scaled[0].shape[1], ), latent_dim=2) # autoencoder.train(np.vstack(sets_training_scaled), sets_test_scaled, epochs=100, batch_size=5) # autoencoder.save_model("deep_general_minimax") autoencoder.load_model("deep_general_minimax") # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) plotting.plot_2d(sets_encoded_test[0], "encoded test data with deep autoencoder", save=False) # 3: log returns of encoded data sets_encoded_log_training = [] for index, set_encoded_training in enumerate(sets_encoded_training): sets_encoded_log_training.append( preprocess_logreturns.scale_data(set_encoded_training)) sets_encoded_log_test = [] for index, set_encoded_test in enumerate(sets_encoded_test): sets_encoded_log_test.append( preprocess_logreturns.scale_data(set_encoded_test)) plotting.plot_2d( sets_encoded_log_test[0], "encoded test data with deep autoencoder, then log returns", save=False) num_tenors = sets_encoded_log_training[0].shape[1] gan = GAN(num_c=6 * 7, num_z=6 * 7, num_o=6 * 7, num_tenors=num_tenors) # try training on larger input and output # gan.train(sets_encoded_log_training, epochs=20000, batch_size=100, sample_interval=200) # gan.save_model("general_ae") gan.load_model("general_ae") print("sets_encoded_log_test[0].shape", sets_encoded_log_test[0].shape) test_arr = np.full([1, 6 * 7 + 6 * 7, num_tenors], 10) validity = gan.discriminator.predict( test_arr) # np.array(sets_encoded_log_test[0] print(validity) rolled_encoded_log_test = rolling_windows(sets_encoded_log_test[0], 6 * 7 + 6 * 7) validity = gan.discriminator.predict( rolled_encoded_log_test) # np.array(sets_encoded_log_test[0] print(validity)
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_logreturns = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_logreturns.enable_log_returns = True # 1. get data and apply pre-processing sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data() ae_params = { 'preprocess_type': PreprocessType.NORMALISATION_OVER_TENORS.value, 'input_dim': (10, sets_training_scaled[0].shape[1],), # 56 'latent_dim': 2*56, 'hidden_layers': (12*56, 4*56, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 5, 'epochs': 5, 'steps_per_epoch': 500} ae_params_hash = hashlib.md5(json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) # autoencoder.train(np.vstack(sets_training_scaled), sets_test_scaled) # autoencoder.save_model("ae_" + ae_params_hash) autoencoder.load_else_train(sets_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = autoencoder.encode(sets_training_scaled) sets_encoded_test = autoencoder.encode(sets_test_scaled) print("sets_encoded_test", sets_encoded_test[0].shape) plotting.plot_2d(sets_encoded_test[0], "encoded test data with deep autoencoder", save=False) # 3: log returns of encoded data sets_encoded_log_training = preprocess_logreturns.scale_data(sets_encoded_training) sets_encoded_log_test = preprocess_logreturns.scale_data(sets_encoded_test) plotting.plot_2d(sets_encoded_log_test[0], "encoded test data with deep autoencoder, then log returns", save=False) num_c = 6*7 num_o = 6*7 gan_params = {'ae_params_hash': ae_params_hash, 'num_tenors': sets_encoded_log_training[0].shape[1], 'num_c': num_c, 'num_z': 6*7, 'num_o': num_o, 'gen_model_type': 'standard', # conv 'dis_model_type': 'standard', # conv 'gen_layers': (4*(6*7*2),), # 4 * num_o * num_tenors 'dis_layers': (4*(6*7),), # 4 * num_o 'gen_last_activation': 'tanh', 'dis_last_activation': 'sigmoid', 'loss': 'binary_crossentropy', 'batch_size': 128, 'epochs': 20000} gan_params_hash = hashlib.md5(json.dumps(gan_params, sort_keys=True).encode('utf-8')).hexdigest() gan = GAN(gan_params) # try training on larger input and output # gan.train(sets_encoded_log_training, sample_interval=200) # gan.save_model("gan_" + gan_params_hash) gan.load_model("gan_" + gan_params_hash) # COV TEST, TEMPORARY # for name, set in zip(training_dataset_names, sets_training): # print("name:", name) # set_cov_log_returns_over_features = cov_log_returns_over_features(set) # plotting.plot_3d_cov("covariance_time_series_" + name, set_cov_log_returns_over_features, show_title=False) # plotting.plot_3d("time_series_" + name, set, maturities) # END COV TEST. # 4: simulate on encoded log returns, conditioned on test dataset num_simulations = 10 num_repeats = 0 generated, _ = gan.generate(condition=sets_encoded_log_test[-1], condition_on_end=False, num_simulations=num_simulations, repeat=num_repeats) # insert the last real futures curve in order to do rescaling print("sets_encoded_log_test[-1][num_c] shape", sets_encoded_log_test[-1].iloc[num_c].shape) print("generated_segments.shape", generated.shape) generated = np.insert(generated, 0, sets_encoded_log_test[-1].iloc[num_c], axis=0) # 5: undo log-returns # todo: this start_value is actually one off! Error still persists... autoencoder causing the difference? encoded_generated = preprocess_logreturns.rescale_data(generated, start_value=sets_encoded_test[-1][num_c]) encoded_generated = encoded_generated[:, 1:] # remove first curve again # 6: decode using autoencoder decoded_generated_segments = autoencoder.decode(encoded_generated) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data(decoded_generated_segments, dataset_name=test_dataset_names[-1]) preprocess_normalisation.enable_curve_smoothing = True simulated_smooth = preprocess_normalisation.rescale_data(decoded_generated_segments, dataset_name=test_dataset_names[-1]) real = np.array(sets_test[-1])[num_c:num_c + num_o] print("simulated, real", simulated.shape, real.shape) smape_result = smape(simulated, real) smape_result_smooth = smape(simulated_smooth, real) print("smape_result and smooth", smape_result, smape_result_smooth) print("smape_resul_smooth", smape_result_smooth)
gain.train(sets_encoded_log_training, test, test_mask) test_prediction = gain.predict(test, test_mask) print("test.head(10)", test.head(10)) print("test_prediction.head(10)", test_prediction.head(10)) standardised_test_prediction = preprocess2.rescale_data( test_prediction, test_dataset_names[0], start_value=sets_test_scaled[0][0], index=sets_test_scaled[0].index.values) rescaled_test_prediction = preprocess.rescale_data( standardised_test_prediction, test_dataset_names[0]) # print("isinstance(rescaled_test_prediction, pd.DataFrame)", isinstance(rescaled_test_prediction, pd.DataFrame)) plotting.plot_2d(sets_test[0], "sets_test[0]", title=True) plotting.plot_2d(standardised_test_prediction, "standardised_test_prediction", title=True) plotting.plot_2d(rescaled_test_prediction, "rescaled_test_prediction", title=True) # plotting.plot_2d(rescaled_test_with_mask, "rescaled_test_with_mask", title=True) plotting.plot_2d(rescaled_test_prediction, "rescaled_test_prediction", title=True) # plotting.plot_2d(test, "gain_test_prediction", curve2=test_prediction, title=True)
def simulate(): plotting = Plotting() preprocess_type = PreprocessType.STANDARDISATION_OVER_TENORS preprocess = PreprocessData(preprocess_type) # 1. get data and apply minimax sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) all_training_scaled = np.vstack(sets_training_scaled) ae_params = { 'preprocess_type': preprocess_type.value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) autoencoder.load_else_train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder encoded = autoencoder.encode(sets_test_scaled[0]) decoded = autoencoder.decode(encoded) rescaled = preprocess.rescale_data(decoded, dataset_name=test_dataset_names[0]) smape_result = smape(rescaled, np.array(sets_test[0]), over_curves=True) print("smape_result test set", np.mean(smape_result), np.std(smape_result), np.min(smape_result), np.max(smape_result)) plotting.plot_2d(sets_test[0], "evaluation of test curves", timeseries=True, evaluation=smape_result, title=False) # for i in np.arange(len(test_eval)): # if test_eval[i] > 4: # plotting.plot_2d(sets_test_scaled[0][i], "Possible unrealistic curve" + str(i), save=False, title=True) # 3: lets see how well the autoencoder can map a zero vector # todo: generate random curves, THEN apply min-max feature scaling, THEN evaluate unrealistic_curves = [] curve_shape = 56 unrealistic_curves.append(np.full(curve_shape, 5)) unrealistic_curves.append(np.full(curve_shape, 10)) unrealistic_curves.append(np.full(curve_shape, 20)) unrealistic_curves.append(np.full(curve_shape, 50)) unrealistic_curves.append(np.full(curve_shape, 70)) unrealistic_curves.append(np.full(curve_shape, 100)) unrealistic_curves.append(np.full(curve_shape, 150)) unrealistic_curves.append(np.full(curve_shape, 200)) unrealistic_curves.append(np.full(curve_shape, 250)) unrealistic_curves.append(np.full(curve_shape, 300)) unrealistic_curves.append( np.hstack((np.full(int(curve_shape / 2), 50), np.full(int(curve_shape / 2), 150)))) unrealistic_curves.append( np.hstack((np.full(int(curve_shape / 2), 100), np.full(int(curve_shape / 2), 150)))) unrealistic_curves.append( np.hstack((np.full(int(curve_shape / 2), 100), np.full(int(curve_shape / 2), 200)))) unrealistic_curves.append(np.random.uniform(0, 10, curve_shape)) unrealistic_curves.append(np.random.uniform(10, 70, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 100, curve_shape)) unrealistic_curves.append(np.random.uniform(100, 200, curve_shape)) unrealistic_curves.append(np.random.uniform(200, 300, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 200, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 250, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 300, curve_shape)) unrealistic_curves.append(np.linspace(0, 100, num=curve_shape)) unrealistic_curves.append(np.linspace(50, 150, num=curve_shape)) unrealistic_curves.append(np.linspace(100, 200, num=curve_shape)) unrealistic_curves.append(np.linspace(150, 250, num=curve_shape)) unrealistic_curves.append(np.linspace(200, 300, num=curve_shape)) unrealistic_curves.append(np.linspace(0, 200, num=curve_shape)) unrealistic_curves.append(np.linspace(0, 300, num=curve_shape)) unrealistic_curves.append(np.linspace(100, 0, num=curve_shape)) unrealistic_curves.append(np.linspace(150, 50, num=curve_shape)) unrealistic_curves.append(np.linspace(200, 100, num=curve_shape)) unrealistic_curves.append(np.linspace(250, 150, num=curve_shape)) unrealistic_curves.append(np.linspace(300, 200, num=curve_shape)) unrealistic_curves.append(np.linspace(200, 0, num=curve_shape)) unrealistic_curves.append(np.linspace(300, 0, num=curve_shape)) unrealistic_curves = np.array(unrealistic_curves) print("unrealistic_curves.shape", unrealistic_curves.shape) unrealistic_curves_scaled = preprocess.scale_data( unrealistic_curves, dataset_name=training_dataset_names[0], should_fit=True) encoded = autoencoder.encode(unrealistic_curves_scaled) decoded = autoencoder.decode(encoded) rescaled = preprocess.rescale_data(decoded, dataset_name=training_dataset_names[0]) smape_result = smape(rescaled, unrealistic_curves, over_curves=True) round_to_n = lambda x, n: round(x, -int(np.floor(np.log10(x))) + (n - 1)) print("smape results", smape_result) for a_smape_result in smape_result: print(round_to_n(a_smape_result, 2)) plotting.plot_2d(smape_result, "loss of unrealistic curves from autoencoder SMAPE", save=False, title=True) plotting.plot_2d(smape_result, "loss of unrealistic curves from autoencoder SMAPE", save=False, title=True) # plotting.plot_2d(unrealistic_eval_mse, "loss of unrealistic curves from autoencoder MSE", save=False, title=True) plotting.plot_unrealisticness( unrealistic_curves, "loss of unrealistic curves from autoencoder", timeseries=True, evaluation=smape_result, title=False, eval_label="SMAPE")