def main(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True # preprocess_normalisation.enable_standardisation_scaler = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data( ) # sklearn model (check that it is doing the same (it is)) # pca_model_sklearn = PCA(n_components=2) # pca_model_sklearn.fit(sets_test_scaled[0]) # test_data_scaled_encoded = pca_model_sklearn.transform(sets_test_scaled[0]) # test_data_scaled_decoded = pca_model_sklearn.inverse_transform(test_data_scaled_encoded) # our own model def pca_on_normalised(): params = {'latent_dim': 2} pca_model = PCAModel(params) pca_model.train(np.vstack(sets_training_scaled)) test_data_scaled_encoded = pca_model.encode(sets_test_scaled[0]) test_data_scaled_decoded = pca_model.decode(test_data_scaled_encoded) print("sets_test_scaled[0].shape", sets_test_scaled[0].shape) print("test_data_scaled_encoded.shape", test_data_scaled_encoded.shape) print("test_data_scaled_decoded.shape", test_data_scaled_decoded.shape) # plot results plotting.plot_2d(test_data_scaled_encoded, "wti_nymex_encoded_pca") simulated = preprocess_normalisation.rescale_data( test_data_scaled_decoded, dataset_name=test_dataset_names[0]) plotting.plot_some_curves("wti_nymex_normalised_compare_pca", sets_test[0], simulated, [25, 50, 75, 815], maturities) # plotting.plot_some_curves("test_feature_normalised_compare_normalisation", sets_test[0], sets_test_scaled[0], # [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) # print("reconstruction_error", reconstruction_error(sets_test_scaled[0], test_data_scaled_decoded)) # print("reconstruction_error", reconstruction_error(np.array(sets_test[0]), simulated)) print("smape", smape(np.array(sets_test[0]), simulated)) # print("smape", np.mean(smape(np.array(sets_test[0]), simulated, over_curves=True))) def pca_on_unnormalised(): pca_model = PCAModel(k=2) pca_model.train(np.vstack(sets_training)) test_data_encoded = pca_model.encode(np.array(sets_test[0])) test_data_decoded = pca_model.decode(test_data_encoded) # plot results plotting.plot_2d(test_data_encoded.T, "wti_nymex_pca") # simulated = preprocess_normalisation.rescale_data(test_data_decoded, dataset_name=test_dataset_names[0]) plotting.plot_some_curves("wti_nymex_compare_pca", sets_test[0], test_data_decoded, [25, 50, 75, 815], maturities) # pca_on_unnormalised() pca_on_normalised()
def all_normalised_data(self): preprocess_data = PreprocessData() preprocess_data.enable_normalisation_scaler = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): self.plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=True, title=True) for i, set_test_scaled in enumerate(sets_test_scaled): self.plotting.plot_2d(set_test_scaled, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=True)
def simulate(plot=True): plotting = Plotting() preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.feature_range = [0, 1] window_size = 20 # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( chunks_of=window_size) print("sets_training_scaled.shape", sets_training_scaled[0].shape) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) ae_params = { 'input_dim': ( window_size, sets_training_scaled[0].shape[1], ), # 10 x 56 'latent_dim': ( 2, 56, ), 'hidden_layers': ( 12 * 56, 4 * 56, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500, } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = AutoencoderWindows(ae_params) print("sets_training_scaled", sets_training_scaled[0].shape) autoencoder.train(sets_training_scaled, sets_test_scaled) autoencoder.save_model("ae_" + ae_params_hash) # autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) print("sets_encoded_training", len(sets_encoded_training), sets_encoded_training[0].shape) print("sets_encoded_test", sets_encoded_test[0].shape) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) print("decoded_test", decoded_test.shape) # 7: undo minimax, for now only the first simulation # decoded_generated_segments_first_sim = decoded_generated_segments[0] preprocess.enable_curve_smoothing = True simulated_smooth = preprocess.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) # error = reconstruction_error(np.array(sets_test[0]), simulated_smooth) # print("error:", error) smape_result_smooth = smape(simulated_smooth, np.array(sets_test[0]), over_curves=True) print(np.mean(smape_result_smooth), np.var(smape_result_smooth)) if plot: # plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) # plotting.plot_some_curves("normalised_compare_ae_before_rescale", sets_test_scaled[0], decoded_test, # [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], simulated_smooth, [25, 50, 75, 815], maturities)
def simulate(plot=True): plotting = Plotting() preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.feature_range = [0, 1] # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) print("sets_training_scaled.shape", sets_training_scaled[0].shape) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) autoencoder.train(sets_training_scaled, sets_test_scaled) autoencoder.save_model("ae_" + ae_params_hash) # autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation # decoded_generated_segments_first_sim = decoded_generated_segments[0] simulated = preprocess.rescale_data(decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) error = reconstruction_error(np.array(sets_test[0]), simulated) if plot: plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) plotting.plot_some_curves("normalised_compare_ae_before_rescale", sets_test_scaled[0], decoded_test, [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], simulated, [25, 50, 75, 815], maturities) plotting.plot_some_curves("normalised_compare_ae", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) return error
def normalisation_over_curves(self): preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.enable_ignore_price = True preprocess.feature_range = [0, 1] sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "normalisation_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815]
def all_log_returns(self): preprocess_data = PreprocessData() plotting = Plotting() preprocess_data.enable_log_returns = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): print("set_training_scaled.shape", set_training_scaled.shape, i) plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=False, title=True)
def test_two_preprocessing_methods(self): preprocess = PreprocessData(PreprocessType.STANDARDISATION_OVER_TENORS, short_end=True) preprocess2 = PreprocessData(PreprocessType.LOG_RETURNS_OVER_TENORS, short_end=True) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) sets_encoded_log_test = preprocess2.scale_data(sets_test_scaled, test_dataset_names, should_fit=True) # in this case the start_value is required, otherwise it will take the start_value of the original data instead standardised_test_prediction = preprocess2.rescale_data( sets_encoded_log_test[0], test_dataset_names[0], start_value=sets_test_scaled[0][0], index=sets_test_scaled[0].index.values) rescaled_test_prediction = preprocess.rescale_data( standardised_test_prediction, test_dataset_names[0]) # plotting.plot_2d(sets_test[0], "gain_test_prediction_rescaled", curve2=rescaled_test_prediction, title=True) np.testing.assert_allclose(rescaled_test_prediction, sets_test[0])
def standardisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.STANDARDISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "standardisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815]
def simulate(latent_dim=2, preprocess_type1=None, preprocess_type2=None, ae_model=None, gan_model=None, force_training=True, plot=False): preprocess1 = PreprocessData(preprocess_type1, short_end=True) preprocess2 = PreprocessData(preprocess_type2, short_end=True) # 1. get data and apply scaling sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess1.get_data( ) print("sets_test_scaled, sets_training_scaled:", sets_test_scaled[0].shape, sets_training_scaled[0].shape) # 2: log returns of encoded data sets_encoded_log_training = preprocess2.scale_data(sets_training_scaled, training_dataset_names, should_fit=True) sets_encoded_log_test = preprocess2.scale_data(sets_test_scaled, test_dataset_names, should_fit=True) num_c = 6 * 7 num_o = 6 * 7 if gan_model is GANModel.WGAN: gan_params = { 'short_end_encoding': preprocess_type1.name + "_" + preprocess_type2.name, 'num_tenors': sets_encoded_log_training[0].shape[1], 'num_c': 6 * 7, 'num_z': 6 * 7, 'num_o': 6 * 7, 'gen_model_type': 'standard', # conv 'dis_model_type': 'standard', # conv 'gen_layers': (4 * (6 * 7 * 2), ), # 4 * num_o * num_tenors 'dis_layers': (4 * (6 * 7), ), # 4 * num_o 'gen_last_activation': 'tanh', 'dis_last_activation': 'sigmoid', 'loss': 'binary_crossentropy', 'batch_size': 32, 'epochs': 10000, 'sample_interval': 1000 } gan_params_hash = hashlib.md5( json.dumps(gan_params, sort_keys=True).encode('utf-8')).hexdigest() gan = CWGANGP(gan_params, plot=False) else: if gan_model is GANModel.GAN_CONV: model_type = 'conv' else: # if gan_model is GANModel.GAN: model_type = 'standard' print("num tenors:", sets_encoded_log_training[0].shape[1]) gan_params = { 'short_end_encoding': preprocess_type1.name + "_" + preprocess_type2.name, 'num_tenors': sets_encoded_log_training[0].shape[1], 'num_c': num_c, 'num_z': 6 * 7, 'num_o': num_o, 'gen_model_type': model_type, # conv 'dis_model_type': model_type, # conv 'gen_layers': (4 * (6 * 7 * 2), ), # 4 * num_o * num_tenors 'dis_layers': (4 * (6 * 7), ), # 4 * num_o 'gen_last_activation': 'tanh', 'dis_last_activation': 'sigmoid', 'loss': 'binary_crossentropy', 'batch_size': 128, 'epochs': 20000 } gan_params_hash = hashlib.md5( json.dumps(gan_params, sort_keys=True).encode('utf-8')).hexdigest() gan = GAN(gan_params, plot=False) # try training on larger input and output if force_training: gan.train(sets_encoded_log_training, "gan_" + gan_params_hash) else: gan.load_else_train(sets_encoded_log_training, "gan_" + gan_params_hash) # 4: simulate on encoded log returns, conditioned on test dataset num_simulations = 100 num_repeats = 0 print("sets_encoded_log_test[-1]", sets_encoded_log_test[-1].shape) generated, _ = gan.generate(condition=sets_encoded_log_test[-1], condition_on_end=False, num_simulations=num_simulations, repeat=num_repeats) # insert the last real futures curve in order to do rescaling if preprocess_type2 is PreprocessType.LOG_RETURNS_OVER_TENORS: generated = np.insert(generated, 0, sets_encoded_log_test[-1].iloc[num_c], axis=1) print("sets_test_scaled[-1]", sets_test_scaled[-1].shape) print("sets_test_scaled[-1][num_c]", sets_test_scaled[-1].iloc[num_c]) # 5: undo scaling encoded_generated = preprocess2.rescale_data( generated, start_value=sets_test_scaled[-1].iloc[num_c], dataset_name=test_dataset_names[-1]) if preprocess_type2 is PreprocessType.LOG_RETURNS_OVER_TENORS: encoded_generated = encoded_generated[:, 1:] # remove first curve again # 7: undo scaling, this can be log-returns simulated = preprocess1.rescale_data(encoded_generated, start_value=sets_test[-1].iloc[num_c], dataset_name=test_dataset_names[-1]) if preprocess_type2 is PreprocessType.LOG_RETURNS_OVER_TENORS: real = np.array( sets_test[-1])[num_c:num_c + num_o + 1] # `+1` because the log-returns also does +1 else: real = np.array(sets_test[-1])[num_c:num_c + num_o + 1] sim = simulated.reshape(100, 43) print("sets_test[-1].iloc[num_c], sim[0][0]", sets_test[-1].iloc[num_c], sim[0][0], sim[1][0], sim[2][0]) print("real, simulated", real.shape, sim.shape) smape_result = smape(sim, real, over_curves=True) if plot: condition_and_real = sets_test[-1].iloc[0:num_c + num_o + 1] plotting = Plotting() plotting.plot_training_sample("simulated_simple", sim, condition_and_real, num_c, after_real_data=True) # print("smape test:", smape(simulated[0], real), smape_result) return smape_result
def simulate(latent_dim=2, preprocess_type1=None, preprocess_type2=None, ae_model=None, plot=False): preprocess1 = PreprocessData(preprocess_type1) preprocess2 = PreprocessData(preprocess_type2) # 1. get data and apply scaling sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess1.get_data() if ae_model is AEModel.AAE: ae_params = {'preprocess_type': preprocess_type1.value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': (56, 40, 28, 12, 4,), 'hidden_layers_discriminator': (2, 2, ), 'leaky_relu': 0.1, 'last_activation': 'linear', 'last_activation_discriminator': 'sigmoid', 'loss_generator': 'mean_squared_error', 'loss_discriminator': 'binary_crossentropy', 'batch_size': 20, 'epochs': 20000} ae_params_hash = hashlib.md5(json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = AdversarialAutoencoder(ae_params, plot=False) elif ae_model is AEModel.VAE: ae_params = {'preprocess_type': preprocess_type1.value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': (56, 40, 28, 12, 4,), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_square_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500} ae_params_hash = hashlib.md5(json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = VariationalAutoencoder(ae_params, plot=False) elif ae_model is AEModel.AE: ae_params = {'preprocess_type': preprocess_type1.value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': (56, 40, 28, 12, 4,), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500} ae_params_hash = hashlib.md5(json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params, plot=False) else: # elif ae_model is AEModel.PCA: ae_params = {'preprocess_type': preprocess_type1.value, # only to make preprocess_type part of the hash 'latent_dim': latent_dim } ae_params_hash = hashlib.md5(json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = PCAModel(ae_params, plot=False) # 2. train/load autoencoder autoencoder.load_else_train(np.vstack(sets_training_scaled), sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = autoencoder.encode(sets_training_scaled) sets_encoded_test = autoencoder.encode(sets_test_scaled) # 3: log returns of encoded data sets_encoded_log_training = preprocess2.scale_data(sets_encoded_training, training_dataset_names, should_fit=True) sets_encoded_log_test = preprocess2.scale_data(sets_encoded_test, test_dataset_names, should_fit=True) print("="*20) print(ae_model.name) print("\n") for set_encoded_log_training, training_dataset_name in zip(sets_encoded_log_training, training_dataset_names): print(training_dataset_name) print("min:", np.min(set_encoded_log_training.min()), "max:", np.max(set_encoded_log_training.max())) print("\n") for set_encoded_log_test, test_dataset_name in zip(sets_encoded_log_test, test_dataset_names): print(test_dataset_name) print("min:", np.min(set_encoded_log_test.min()), "max:", np.max(set_encoded_log_test.max())) print("\n") print("=" * 20)
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_logreturns = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_logreturns.enable_log_returns = True # 1. get data and apply pre-processing sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data() ae_params = { 'preprocess_type': PreprocessType.NORMALISATION_OVER_TENORS.value, 'input_dim': (10, sets_training_scaled[0].shape[1],), # 56 'latent_dim': 2*56, 'hidden_layers': (12*56, 4*56, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 5, 'epochs': 5, 'steps_per_epoch': 500} ae_params_hash = hashlib.md5(json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) # autoencoder.train(np.vstack(sets_training_scaled), sets_test_scaled) # autoencoder.save_model("ae_" + ae_params_hash) autoencoder.load_else_train(sets_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = autoencoder.encode(sets_training_scaled) sets_encoded_test = autoencoder.encode(sets_test_scaled) print("sets_encoded_test", sets_encoded_test[0].shape) plotting.plot_2d(sets_encoded_test[0], "encoded test data with deep autoencoder", save=False) # 3: log returns of encoded data sets_encoded_log_training = preprocess_logreturns.scale_data(sets_encoded_training) sets_encoded_log_test = preprocess_logreturns.scale_data(sets_encoded_test) plotting.plot_2d(sets_encoded_log_test[0], "encoded test data with deep autoencoder, then log returns", save=False) num_c = 6*7 num_o = 6*7 gan_params = {'ae_params_hash': ae_params_hash, 'num_tenors': sets_encoded_log_training[0].shape[1], 'num_c': num_c, 'num_z': 6*7, 'num_o': num_o, 'gen_model_type': 'standard', # conv 'dis_model_type': 'standard', # conv 'gen_layers': (4*(6*7*2),), # 4 * num_o * num_tenors 'dis_layers': (4*(6*7),), # 4 * num_o 'gen_last_activation': 'tanh', 'dis_last_activation': 'sigmoid', 'loss': 'binary_crossentropy', 'batch_size': 128, 'epochs': 20000} gan_params_hash = hashlib.md5(json.dumps(gan_params, sort_keys=True).encode('utf-8')).hexdigest() gan = GAN(gan_params) # try training on larger input and output # gan.train(sets_encoded_log_training, sample_interval=200) # gan.save_model("gan_" + gan_params_hash) gan.load_model("gan_" + gan_params_hash) # COV TEST, TEMPORARY # for name, set in zip(training_dataset_names, sets_training): # print("name:", name) # set_cov_log_returns_over_features = cov_log_returns_over_features(set) # plotting.plot_3d_cov("covariance_time_series_" + name, set_cov_log_returns_over_features, show_title=False) # plotting.plot_3d("time_series_" + name, set, maturities) # END COV TEST. # 4: simulate on encoded log returns, conditioned on test dataset num_simulations = 10 num_repeats = 0 generated, _ = gan.generate(condition=sets_encoded_log_test[-1], condition_on_end=False, num_simulations=num_simulations, repeat=num_repeats) # insert the last real futures curve in order to do rescaling print("sets_encoded_log_test[-1][num_c] shape", sets_encoded_log_test[-1].iloc[num_c].shape) print("generated_segments.shape", generated.shape) generated = np.insert(generated, 0, sets_encoded_log_test[-1].iloc[num_c], axis=0) # 5: undo log-returns # todo: this start_value is actually one off! Error still persists... autoencoder causing the difference? encoded_generated = preprocess_logreturns.rescale_data(generated, start_value=sets_encoded_test[-1][num_c]) encoded_generated = encoded_generated[:, 1:] # remove first curve again # 6: decode using autoencoder decoded_generated_segments = autoencoder.decode(encoded_generated) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data(decoded_generated_segments, dataset_name=test_dataset_names[-1]) preprocess_normalisation.enable_curve_smoothing = True simulated_smooth = preprocess_normalisation.rescale_data(decoded_generated_segments, dataset_name=test_dataset_names[-1]) real = np.array(sets_test[-1])[num_c:num_c + num_o] print("simulated, real", simulated.shape, real.shape) smape_result = smape(simulated, real) smape_result_smooth = smape(simulated_smooth, real) print("smape_result and smooth", smape_result, smape_result_smooth) print("smape_resul_smooth", smape_result_smooth)
from helpers.preprocess_data import PreprocessData from helpers.evaluate import * from helpers.plotting import Plotting from imputance.gain_model import gain import numpy as np import matplotlib.pyplot as plt if __name__ == '__main__': plotting = Plotting() preprocess = PreprocessData(PreprocessType.STANDARDISATION_OVER_TENORS, short_end=True) preprocess2 = PreprocessData(PreprocessType.LOG_RETURNS_OVER_TENORS, short_end=True) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) sets_encoded_log_training = preprocess2.scale_data(sets_training_scaled, training_dataset_names, should_fit=True) sets_encoded_log_test = preprocess2.scale_data(sets_test_scaled, test_dataset_names, should_fit=True) train = sets_encoded_log_training[0].copy() test = sets_encoded_log_test[0].copy() # print("train.shape[1]", train.shape[1]) # print("sets_test_scaled[0]", sets_test_scaled[0].shape) # print("sets_encoded_log_test[0]", sets_encoded_log_test[0].shape) params = {
def simulate(): plotting = Plotting() preprocess_type = PreprocessType.STANDARDISATION_OVER_TENORS preprocess = PreprocessData(preprocess_type) # 1. get data and apply minimax sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) all_training_scaled = np.vstack(sets_training_scaled) ae_params = { 'preprocess_type': preprocess_type.value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) autoencoder.load_else_train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder encoded = autoencoder.encode(sets_test_scaled[0]) decoded = autoencoder.decode(encoded) rescaled = preprocess.rescale_data(decoded, dataset_name=test_dataset_names[0]) smape_result = smape(rescaled, np.array(sets_test[0]), over_curves=True) print("smape_result test set", np.mean(smape_result), np.std(smape_result), np.min(smape_result), np.max(smape_result)) plotting.plot_2d(sets_test[0], "evaluation of test curves", timeseries=True, evaluation=smape_result, title=False) # for i in np.arange(len(test_eval)): # if test_eval[i] > 4: # plotting.plot_2d(sets_test_scaled[0][i], "Possible unrealistic curve" + str(i), save=False, title=True) # 3: lets see how well the autoencoder can map a zero vector # todo: generate random curves, THEN apply min-max feature scaling, THEN evaluate unrealistic_curves = [] curve_shape = 56 unrealistic_curves.append(np.full(curve_shape, 5)) unrealistic_curves.append(np.full(curve_shape, 10)) unrealistic_curves.append(np.full(curve_shape, 20)) unrealistic_curves.append(np.full(curve_shape, 50)) unrealistic_curves.append(np.full(curve_shape, 70)) unrealistic_curves.append(np.full(curve_shape, 100)) unrealistic_curves.append(np.full(curve_shape, 150)) unrealistic_curves.append(np.full(curve_shape, 200)) unrealistic_curves.append(np.full(curve_shape, 250)) unrealistic_curves.append(np.full(curve_shape, 300)) unrealistic_curves.append( np.hstack((np.full(int(curve_shape / 2), 50), np.full(int(curve_shape / 2), 150)))) unrealistic_curves.append( np.hstack((np.full(int(curve_shape / 2), 100), np.full(int(curve_shape / 2), 150)))) unrealistic_curves.append( np.hstack((np.full(int(curve_shape / 2), 100), np.full(int(curve_shape / 2), 200)))) unrealistic_curves.append(np.random.uniform(0, 10, curve_shape)) unrealistic_curves.append(np.random.uniform(10, 70, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 100, curve_shape)) unrealistic_curves.append(np.random.uniform(100, 200, curve_shape)) unrealistic_curves.append(np.random.uniform(200, 300, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 200, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 250, curve_shape)) unrealistic_curves.append(np.random.uniform(0, 300, curve_shape)) unrealistic_curves.append(np.linspace(0, 100, num=curve_shape)) unrealistic_curves.append(np.linspace(50, 150, num=curve_shape)) unrealistic_curves.append(np.linspace(100, 200, num=curve_shape)) unrealistic_curves.append(np.linspace(150, 250, num=curve_shape)) unrealistic_curves.append(np.linspace(200, 300, num=curve_shape)) unrealistic_curves.append(np.linspace(0, 200, num=curve_shape)) unrealistic_curves.append(np.linspace(0, 300, num=curve_shape)) unrealistic_curves.append(np.linspace(100, 0, num=curve_shape)) unrealistic_curves.append(np.linspace(150, 50, num=curve_shape)) unrealistic_curves.append(np.linspace(200, 100, num=curve_shape)) unrealistic_curves.append(np.linspace(250, 150, num=curve_shape)) unrealistic_curves.append(np.linspace(300, 200, num=curve_shape)) unrealistic_curves.append(np.linspace(200, 0, num=curve_shape)) unrealistic_curves.append(np.linspace(300, 0, num=curve_shape)) unrealistic_curves = np.array(unrealistic_curves) print("unrealistic_curves.shape", unrealistic_curves.shape) unrealistic_curves_scaled = preprocess.scale_data( unrealistic_curves, dataset_name=training_dataset_names[0], should_fit=True) encoded = autoencoder.encode(unrealistic_curves_scaled) decoded = autoencoder.decode(encoded) rescaled = preprocess.rescale_data(decoded, dataset_name=training_dataset_names[0]) smape_result = smape(rescaled, unrealistic_curves, over_curves=True) round_to_n = lambda x, n: round(x, -int(np.floor(np.log10(x))) + (n - 1)) print("smape results", smape_result) for a_smape_result in smape_result: print(round_to_n(a_smape_result, 2)) plotting.plot_2d(smape_result, "loss of unrealistic curves from autoencoder SMAPE", save=False, title=True) plotting.plot_2d(smape_result, "loss of unrealistic curves from autoencoder SMAPE", save=False, title=True) # plotting.plot_2d(unrealistic_eval_mse, "loss of unrealistic curves from autoencoder MSE", save=False, title=True) plotting.plot_unrealisticness( unrealistic_curves, "loss of unrealistic curves from autoencoder", timeseries=True, evaluation=smape_result, title=False, eval_label="SMAPE")
def all_data(self, show_title=False): preprocess_data = PreprocessData(extend_data=False) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) print("maturities", maturities) for i, set_training in enumerate(sets_training): print(self.training_dataset_names[i]) print(set_training.index[0], set_training.index[-1], round(np.min(set_training.min()), 2), round(np.max(set_training.max()), 2)) # self.plotting.plot_2d(set_training, "/time_series/" + training_dataset_names[i], timeseries=True, # save=True, title=show_title) # self.plotting.plot_3d("/time_series/" + training_dataset_names[i] + "_3d", set_training, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_training) # self.plotting.plot_3d_cov("/time_series/" + training_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n") for i, set_test in enumerate(sets_test): print(self.test_dataset_names[i]) print(set_test.index[0], set_test.index[-1], round(np.min(set_test.min()), 2), round(np.max(set_test.max()), 2)) self.plotting.plot_2d(set_test, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=show_title) self.plotting.plot_3d("/time_series/" + test_dataset_names[i] + "_3d", set_test, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_test) # self.plotting.plot_3d_cov("/time_series/" + test_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n")
def helper(self, preprocess_type): preprocess = PreprocessData() if preprocess_type is None or preprocess_type is PreprocessType.NORMALISATION_OVER_TENORS: preprocess.enable_normalisation_scaler = True preprocess.feature_range = [0, 1] elif preprocess_type is PreprocessType.NORMALISATION_OVER_CURVES: preprocess.enable_normalisation_scaler = True preprocess.feature_range = [0, 1] preprocess.enable_ignore_price = True elif preprocess_type is PreprocessType.STANDARDISATION_OVER_TENORS: preprocess.enable_standardisation_scaler = True elif preprocess_type is PreprocessType.LOG_RETURNS_OVER_TENORS: preprocess.enable_log_returns = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) rescaled_first_test_set = preprocess.rescale_data( sets_test_scaled[0], test_dataset_names[0]) # check that assert_allclose is working: # rand = np.random.random_sample(sets_test[0].shape) # np.testing.assert_allclose(rescaled_first_test_set, rand) np.testing.assert_allclose(rescaled_first_test_set, sets_test[0])
def normalisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.NORMALISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) print("sets_test[0].shape", sets_test[0].shape, sets_test_scaled[0].shape) self.plotting.plot_some_curves( "normalisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815]
def simulate(): plotting = Plotting() preprocess_minmax = PreprocessData() preprocess_logreturns = PreprocessData() preprocess_minmax.enable_min_max_scaler = True preprocess_logreturns.enable_log_returns = True # 1. get data and apply minimax sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_minmax.get_data( ) print("sets_training_scaled.shape", sets_training_scaled[0].shape) autoencoder = DeepAutoencoder( input_shape=(sets_training_scaled[0].shape[1], ), latent_dim=2) # autoencoder.train(np.vstack(sets_training_scaled), sets_test_scaled, epochs=100, batch_size=5) # autoencoder.save_model("deep_general_minimax") autoencoder.load_model("deep_general_minimax") # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) plotting.plot_2d(sets_encoded_test[0], "encoded test data with deep autoencoder", save=False) # 3: log returns of encoded data sets_encoded_log_training = [] for index, set_encoded_training in enumerate(sets_encoded_training): sets_encoded_log_training.append( preprocess_logreturns.scale_data(set_encoded_training)) sets_encoded_log_test = [] for index, set_encoded_test in enumerate(sets_encoded_test): sets_encoded_log_test.append( preprocess_logreturns.scale_data(set_encoded_test)) plotting.plot_2d( sets_encoded_log_test[0], "encoded test data with deep autoencoder, then log returns", save=False) num_tenors = sets_encoded_log_training[0].shape[1] gan = GAN(num_c=6 * 7, num_z=6 * 7, num_o=6 * 7, num_tenors=num_tenors) # try training on larger input and output # gan.train(sets_encoded_log_training, epochs=20000, batch_size=100, sample_interval=200) # gan.save_model("general_ae") gan.load_model("general_ae") print("sets_encoded_log_test[0].shape", sets_encoded_log_test[0].shape) test_arr = np.full([1, 6 * 7 + 6 * 7, num_tenors], 10) validity = gan.discriminator.predict( test_arr) # np.array(sets_encoded_log_test[0] print(validity) rolled_encoded_log_test = rolling_windows(sets_encoded_log_test[0], 6 * 7 + 6 * 7) validity = gan.discriminator.predict( rolled_encoded_log_test) # np.array(sets_encoded_log_test[0] print(validity)
def logreturns_over_tenors(self): preprocess = PreprocessData(PreprocessType.LOG_RETURNS_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "logreturns_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] self.plotting.plot_3d( "logreturns_over_curves_3d", sets_test_scaled[0], )
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_normalisation.feature_range = [0, 1] # preprocess_normalisation.enable_scaler = True # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data( ) # plotting.plot_2d(sets_training_scaled[0][:, 0], "sets_training_scaled[0][:, 0]", save=False) # plotting.plot_2d(sets_test_scaled[0][:, 0], "test_feature_normalised_short_end", save=True) all_stacked = np.vstack((np.vstack(sets_training), np.vstack(sets_test))) all_stacked_scaled = np.vstack( (np.vstack(sets_training_scaled), np.vstack(sets_test_scaled))) all_training_scaled = np.vstack(sets_training_scaled) # print("all_stacked_scaled.shape", all_stacked_scaled.shape) # plotting.plot_2d(all_stacked[:, 0], "training and test data", save=False) # plotting.plot_2d(all_stacked_scaled[:, 0], "training and test data scaled", save=False) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 2, 'hidden_layers': (56, 40, 28, 12, 4, 2), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params) # autoencoder.train(all_stacked_scaled, sets_test_scaled) # autoencoder.train(sets_test_scaled[0], sets_test_scaled) # autoencoder.train(all_training_scaled, sets_test_scaled) # autoencoder.save_model("ae_" + ae_params_hash) autoencoder.load_model("ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(autoencoder.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(autoencoder.encode(set_test_scaled)) plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_autoencoder_on_", save=True) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder_before_rescale", sets_test_scaled[0], decoded_test, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder", sets_test[0], simulated, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] # curve_smooth = [] # for curve in simulated: # print("curve.shape", curve.shape) # curve_smooth.append(savgol_filter(curve, 23, 5)) # window size 51, polynomial order 3 # curve_smooth = np.array(curve_smooth) print("reconstruction error BEFORE smoothing:") reconstruction_error(np.array(sets_test[0]), simulated) preprocess_normalisation.enable_curve_smoothing = True simulated = preprocess_normalisation.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) plotting.plot_some_curves( "test_feature_normalised_compare_autoencoder", sets_test[0], simulated, [25, 50, 75, 815], maturities) # old: [25, 50, 75, 100, 600, 720, 740, 815] # plotting.plot_some_curves("test_feature_normalised_compare_normalisation", sets_test[0], sets_test_scaled[0], # [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_test) print("reconstruction error AFTER smoothing:") reconstruction_error(np.array(sets_test[0]), simulated)
class Analysis(): def __init__(self): self.preprocess_data = PreprocessData() self.plotting = Plotting() self.config = Config() # self.preprocess_data.enable_min_max_scaler = True self.preprocess_data.enable_log_returns = True self.sets_training, self.sets_test, self.sets_training_scaled, self.sets_test_scaled, \ self.training_dataset_names, self.test_dataset_names, self.maturities = self.preprocess_data.get_data() wti_nymex = self.sets_test[0] time = wti_nymex.axes[0].tolist() self.wti_nymex_short_end = wti_nymex.iloc[:, 0] self.data_scaled = self.sets_test_scaled[0][0] def normalisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.NORMALISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) print("sets_test[0].shape", sets_test[0].shape, sets_test_scaled[0].shape) self.plotting.plot_some_curves( "normalisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def standardisation_over_tenors(self): preprocess = PreprocessData(PreprocessType.STANDARDISATION_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "standardisation_over_tenors", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def logreturns_over_tenors(self): preprocess = PreprocessData(PreprocessType.LOG_RETURNS_OVER_TENORS) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "logreturns_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] self.plotting.plot_3d( "logreturns_over_curves_3d", sets_test_scaled[0], ) def normalisation_over_curves(self): preprocess = PreprocessData() preprocess.enable_normalisation_scaler = True preprocess.enable_ignore_price = True preprocess.feature_range = [0, 1] sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( ) self.plotting.plot_some_curves( "normalisation_over_curves", sets_test[0], sets_test_scaled[0], [25, 50, 75, 815], maturities, plot_separate=True) # old: [25, 50, 75, 100, 600, 720, 740, 815] def standardisation_over_curves(self): print("todo standardisation_over_curves") def logreturns_over_curves(self): print("todo logreturns_over_curves") def all_log_returns(self): preprocess_data = PreprocessData() plotting = Plotting() preprocess_data.enable_log_returns = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): print("set_training_scaled.shape", set_training_scaled.shape, i) plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=False, title=True) def all_normalised_data(self): preprocess_data = PreprocessData() preprocess_data.enable_normalisation_scaler = True sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) for i, set_training_scaled in enumerate(sets_training_scaled): self.plotting.plot_2d(set_training_scaled, "/time_series/" + training_dataset_names[i], timeseries=True, save=True, title=True) for i, set_test_scaled in enumerate(sets_test_scaled): self.plotting.plot_2d(set_test_scaled, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=True) def all_data(self, show_title=False): preprocess_data = PreprocessData(extend_data=False) sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_data.get_data( ) print("maturities", maturities) for i, set_training in enumerate(sets_training): print(self.training_dataset_names[i]) print(set_training.index[0], set_training.index[-1], round(np.min(set_training.min()), 2), round(np.max(set_training.max()), 2)) # self.plotting.plot_2d(set_training, "/time_series/" + training_dataset_names[i], timeseries=True, # save=True, title=show_title) # self.plotting.plot_3d("/time_series/" + training_dataset_names[i] + "_3d", set_training, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_training) # self.plotting.plot_3d_cov("/time_series/" + training_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n") for i, set_test in enumerate(sets_test): print(self.test_dataset_names[i]) print(set_test.index[0], set_test.index[-1], round(np.min(set_test.min()), 2), round(np.max(set_test.max()), 2)) self.plotting.plot_2d(set_test, "/time_series/" + test_dataset_names[i], timeseries=True, save=True, title=show_title) self.plotting.plot_3d("/time_series/" + test_dataset_names[i] + "_3d", set_test, show_title=show_title) cov_log_returns = cov_log_returns_over_tenors(set_test) # self.plotting.plot_3d_cov("/time_series/" + test_dataset_names[i] + "_cov", cov_log_returns, maturities=maturities, show_title=show_title) print("\n")
class Classical(): def __init__(self): self.preprocess_data = PreprocessData() self.plotting = Plotting() self.config = Config() # self.preprocess_data.enable_min_max_scaler = True self.preprocess_data.enable_log_returns = True self.sets_training, self.sets_test, self.sets_training_scaled, self.sets_test_scaled, \ self.training_dataset_names, self.test_dataset_names, self.maturities = self.preprocess_data.get_data() self.wti_nymex = self.sets_test[0] time = self.wti_nymex.axes[0].tolist() self.wti_nymex_short_end = self.wti_nymex.iloc[:, 0] self.data_scaled = self.sets_test_scaled[0][0] self.train_len = 128 self.test_len = 42 self.data_train = self.wti_nymex[:self.train_len] self.data_test = self.wti_nymex[self.train_len:self.train_len + self.test_len] self.data_train_and_test = self.wti_nymex[:self.train_len + self.test_len] print("self.data_train.shape", self.data_train.shape) print("self.data_test.shape", self.data_test.shape) # ACF and PACF plots # result = seasonal_decompose(wti_nymex_short_end, model='multiplicative') # fig2 = result.plot() # plot_mpl(fig2, image_filename="seasonal.html") def VAR(self): print("=" * 30 + "\nVAR\n" + "=" * 30 + "\n") # fit model model = VAR(self.data_train) model_fit = model.fit() # make prediction yhat = model_fit.forecast(model_fit.y, steps=42) prediction = pd.DataFrame(yhat, index=self.data_test.index.values, columns=self.data_train.columns.values) curves = self.data_train_and_test fig = plt.figure() ax = fig.add_subplot(111) ax.grid(True) NUM_COLORS = curves.shape[1] cm = plt.get_cmap('coolwarm') ax.set_prop_cycle( color=[cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)[::-1]]) ax.plot(curves) custom_lines = [ Line2D([0], [0], color=cm(1.), lw=4), Line2D([0], [0], color=cm(0.), lw=4) ] ax.legend(custom_lines, ['Short End', 'Long End']) plt.xticks(rotation=20) # plt.plot() plt.plot(prediction, color='red') plt.savefig(self.config.get_filepath_img("var"), dpi=300, bbox_inches='tight') plt.savefig(self.config.get_filepath_pgf("var"), dpi=300, transparent=True) # , bbox_inches='tight' # plt.title("VAR") plt.show() def VMA(self): self.VARMA(order=(0, 1), name="VMA") def VARMA(self, order=(1, 1), name="VARMA"): print("=" * 30 + "\n" + name + "\n" + "=" * 30 + "\n") # fit model model = VARMAX(self.data_train, order=order) model_fit = model.fit(disp=False) # make prediction yhat = model_fit.forecast(steps=42) prediction = pd.DataFrame(yhat, index=self.data_test.index.values, columns=self.data_train.columns.values) plt.plot(self.data_train_and_test) plt.plot(prediction, color='red') plt.title(name) plt.show() def AR(self): print("=" * 30 + "\nAR\n" + "=" * 30 + "\n") # fit model model = AR(self.data_train.iloc[:, 0]) model_fit = model.fit() yhat = model_fit.predict(len(self.data_train), len(self.data_train) + 42) # print(yhat) print(self.wti_nymex_short_end) # model_output = model_fit.fittedvalues plt.plot(self.wti_nymex_short_end[:84]) plt.plot(yhat, color='red') # plt.title('AR RSS: %.4f' % np.nansum((model_output - self.data_scaled) ** 2)) plt.show() # print(model_output.shape) print(self.data_scaled.shape) # df = pd.concat({'original':self.data_scaled, '0':model_output}, axis=1, sort=True) # df.fillna(0, inplace=True) # df.drop(['original'], axis=1, inplace=True) # df.rename({'0': 0}, axis='columns', inplace=True) # # print("df", df.head(30)) # # rescaled = self.preprocess_data.rescale_data(df[0], self.test_dataset_names[0]) # # plt.plot(rescaled) # plt.plot(self.sets_test[0][0]) # plt.show() def MA(self): print("=" * 30 + "\nMA\n" + "=" * 30 + "\n") model = ARMA(self.data_scaled, order=(0, 1)) model_fit = model.fit() # model_fit.summary() model_output = model_fit.fittedvalues plt.plot(self.data_scaled) plt.plot(model_output, color='red') plt.title('MA RSS: %.4f' % np.nansum( (model_output - self.data_scaled)**2)) plt.show() df = pd.concat({ 'original': self.data_scaled, '0': model_output }, axis=1, sort=True) df.fillna(0, inplace=True) df.drop(['original'], axis=1, inplace=True) df.rename({'0': 0}, axis='columns', inplace=True) rescaled = self.preprocess_data.rescale_data( df[0], self.test_dataset_names[0]) plt.plot(rescaled) plt.plot(self.sets_test[0][0]) plt.show() def ARMA(self): print("=" * 30 + "\nARMA\n" + "=" * 30 + "\n") model = ARMA(self.data_scaled, order=(2, 1)) model_fit = model.fit() # model_fit.summary() model_output = model_fit.fittedvalues plt.plot(self.data_scaled) plt.plot(model_output, color='red') plt.title('ARMA RSS: %.4f' % np.nansum( (model_output - self.data_scaled)**2)) plt.show() df = pd.concat({ 'original': self.data_scaled, '0': model_output }, axis=1, sort=True) df.fillna(0, inplace=True) df.drop(['original'], axis=1, inplace=True) df.rename({'0': 0}, axis='columns', inplace=True) rescaled = self.preprocess_data.rescale_data( df[0], self.test_dataset_names[0]) plt.plot(rescaled) plt.plot(self.sets_test[0][0]) plt.show() def ARIMA(self): print("=" * 30 + "\nARIMA\n" + "=" * 30 + "\n") model = ARIMA(self.data_scaled, order=(1, 1, 1)) model_fit = model.fit() # model_fit.summary() model_output = model_fit.fittedvalues plt.plot(self.data_scaled) plt.plot(model_output, color='red') plt.title('ARIMA RSS: %.4f' % np.nansum( (model_output - self.data_scaled)**2)) plt.show() df = pd.concat({ 'original': self.data_scaled, '0': model_output }, axis=1, sort=True) df.fillna(0, inplace=True) df.drop(['original'], axis=1, inplace=True) df.rename({'0': 0}, axis='columns', inplace=True) rescaled = self.preprocess_data.rescale_data( df[0], self.test_dataset_names[0]) plt.plot(rescaled) plt.plot(self.sets_test[0][0]) plt.show() def acf(self): sm.graphics.tsa.plot_acf(self.wti_nymex_short_end, lags=30) plt.show() def test_stationarity(self): timeseries = self.wti_nymex_short_end # Determing rolling statistics rolmean = timeseries.rolling(12).mean() rolstd = timeseries.rolling( 12).std() #pd.rolling_std(timeseries, window=12) # Plot rolling statistics: orig = plt.plot(timeseries, color='blue', label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label='Rolling Std') plt.legend(loc='best') plt.title('Rolling Mean & Standard Deviation') plt.show(block=False) # Perform Dickey-Fuller test: print('Results of Dickey-Fuller Test:') dftest = adfuller(timeseries, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=[ 'Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used' ]) for key, value in dftest[4].items(): dfoutput['Critical Value (%s)' % key] = value print(dfoutput)
def simulate(latent_dim=2, plot=False, preprocess_type=None, model_type=None, force_training=True): plotting = Plotting() preprocess = PreprocessData(preprocess_type) window_size = None if model_type is AEModel.AE_WINDOWS: window_size = 10 # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess.get_data( chunks_of=window_size) all_training_scaled = np.vstack(sets_training_scaled) if model_type is AEModel.AAE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'hidden_layers_discriminator': ( 2, 2, ), 'leaky_relu': 0.1, 'last_activation': 'linear', 'last_activation_discriminator': 'sigmoid', 'loss_generator': 'mean_squared_error', 'loss_discriminator': 'binary_crossentropy', 'batch_size': 20, 'epochs': 20000 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder autoencoder = AdversarialAutoencoder(ae_params, plot=False) elif model_type is AEModel.VAE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_squared_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder autoencoder = VariationalAutoencoder(ae_params, plot=False) elif model_type is AEModel.AE: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params, plot=False) elif model_type is AEModel.PCA: ae_params = { 'preprocess_type': preprocess_type. value, # only to make preprocess_type part of the hash 'latent_dim': latent_dim } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = PCAModel(ae_params, plot=False) else: # model_type is AEModel.AE_WINDOWS: ae_params = { 'input_dim': ( window_size, sets_training_scaled[0].shape[1], ), # 10 x 56 'latent_dim': ( 2, 56, ), 'hidden_layers': ( 12 * 56, 4 * 56, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 10, 'steps_per_epoch': 500, } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = AutoencoderWindows(ae_params, plot=False) if force_training: autoencoder.train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) else: autoencoder.load_else_train(all_training_scaled, sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = autoencoder.encode(sets_training_scaled) sets_encoded_test = autoencoder.encode(sets_test_scaled) # 6: decode using autoencoder decoded_test = autoencoder.decode(sets_encoded_test[0]) # 7: undo scaling # decoded_generated_segments_first_sim = decoded_generated_segments[0] simulated = preprocess.rescale_data(decoded_test, dataset_name=test_dataset_names[0]) preprocess.enable_curve_smoothing = True simulated_smooth = preprocess.rescale_data( decoded_test, dataset_name=test_dataset_names[0]) # reconstruction error # error = reconstruction_error(np.array(sets_test[0]), simulated) # error_smooth = reconstruction_error(np.array(sets_test[0]), simulated_smooth) smape_result = smape(simulated, np.array(sets_test[0]), over_curves=True) smape_result_smooth = smape(simulated_smooth, np.array(sets_test[0]), over_curves=True) print(np.mean(smape_result_smooth)) if plot and model_type is not AEModel.AE_WINDOWS: plotting.plot_2d(sets_encoded_test[0], preprocess_type.name + "_" + model_type.name + "_latent_space", sets_test_scaled[0].index.values, save=True) plotting.plot_some_curves( preprocess_type.name + "_" + model_type.name + "_in_vs_out", sets_test[0], simulated, [25, 50, 75, 815], maturities) # plotting.plot_some_curves("normalised_compare_ae", sets_test[0], sets_test_scaled[0], # [25, 50, 75, 815, 100, 600, 720, 740], maturities, plot_separate=True) preprocess.enable_curve_smoothing = False if model_type is AEModel.VAE: plotting.plot_grid_2dim(maturities, autoencoder.generator_model, preprocess_type.name + "_" + model_type.name + "_latent_grid", preprocess, test_dataset_names[0], n=6) elif model_type is AEModel.AAE: plotting.plot_grid_2dim(maturities, autoencoder.decoder, preprocess_type.name + "_" + model_type.name + "_latent_grid", preprocess, test_dataset_names[0], n=6) return smape_result_smooth
def simulate(latent_dim=2, preprocess_type1=None, preprocess_type2=None, ae_model=None, gan_model=None, force_training=True, plot=False): preprocess1 = PreprocessData(preprocess_type1) preprocess2 = PreprocessData(preprocess_type2) # 1. get data and apply scaling sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess1.get_data( ) if ae_model is AEModel.AAE: ae_params = { 'preprocess_type': preprocess_type1. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'hidden_layers_discriminator': ( 2, 2, ), 'leaky_relu': 0.1, 'last_activation': 'linear', 'last_activation_discriminator': 'sigmoid', 'loss_generator': 'mean_squared_error', 'loss_discriminator': 'binary_crossentropy', 'batch_size': 20, 'epochs': 20000 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = AdversarialAutoencoder(ae_params, plot=False) elif ae_model is AEModel.VAE: ae_params = { 'preprocess_type': preprocess_type1. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_square_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = VariationalAutoencoder(ae_params, plot=False) elif ae_model is AEModel.AE: ae_params = { 'preprocess_type': preprocess_type1. value, # only to make preprocess_type part of the hash 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': latent_dim, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'loss': 'mse', 'last_activation': 'linear', 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = Autoencoder(ae_params, plot=False) else: # elif ae_model is AEModel.PCA: ae_params = { 'preprocess_type': preprocess_type1. value, # only to make preprocess_type part of the hash 'latent_dim': latent_dim } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() autoencoder = PCAModel(ae_params, plot=False) # 2. train/load autoencoder autoencoder.load_else_train(np.vstack(sets_training_scaled), sets_test_scaled, "ae_" + ae_params_hash) # 2: encode data using autoencoder sets_encoded_training = autoencoder.encode(sets_training_scaled) sets_encoded_test = autoencoder.encode(sets_test_scaled) # 3: log returns of encoded data sets_encoded_log_training = preprocess2.scale_data(sets_encoded_training, training_dataset_names, should_fit=True) sets_encoded_log_test = preprocess2.scale_data(sets_encoded_test, test_dataset_names, should_fit=True) num_z = 6 * 7 num_c = 6 * 7 num_o = 6 * 7 if gan_model is GANModel.WGAN: gan_params = { 'ae_params_hash': ae_params_hash, 'num_tenors': sets_encoded_log_training[0].shape[1], 'num_c': num_c, 'num_z': num_z, 'num_o': num_o, 'gen_model_type': 'standard', # conv 'dis_model_type': 'standard', # conv 'gen_layers': (4 * (6 * 7 * 2), ), # 4 * num_o * num_tenors 'dis_layers': (4 * (6 * 7), ), # 4 * num_o 'gen_last_activation': 'tanh', 'dis_last_activation': 'sigmoid', 'loss': 'binary_crossentropy', 'batch_size': 32, 'epochs': 10000, 'sample_interval': 1000 } gan_params_hash = hashlib.md5( json.dumps(gan_params, sort_keys=True).encode('utf-8')).hexdigest() gan = CWGANGP(gan_params, plot=False) else: if gan_model is GANModel.GAN_CONV: model_type = 'conv' else: # if gan_model is GANModel.GAN: model_type = 'standard' gan_params = { 'ae_params_hash': ae_params_hash, 'num_tenors': sets_encoded_log_training[0].shape[1], 'num_c': num_c, 'num_z': num_z, 'num_o': num_o, 'gen_model_type': model_type, # conv 'dis_model_type': model_type, # conv 'gen_layers': (4 * (6 * 7 * 2), ), # 4 * num_o * num_tenors 'dis_layers': (4 * (6 * 7), ), # 4 * num_o 'gen_last_activation': 'tanh', 'dis_last_activation': 'sigmoid', 'loss': 'binary_crossentropy', 'batch_size': 128, 'epochs': 20000 } gan_params_hash = hashlib.md5( json.dumps(gan_params, sort_keys=True).encode('utf-8')).hexdigest() gan = GAN(gan_params, plot=False) # try training on larger input and output if force_training: gan.train(sets_encoded_log_training, "gan_" + gan_params_hash) else: gan.load_else_train(sets_encoded_log_training, "gan_" + gan_params_hash) # 4: simulate on encoded log returns, conditioned on test dataset num_simulations = 100 num_repeats = 1 generated, _ = gan.generate(condition=sets_encoded_log_test[-1], condition_on_end=False, num_simulations=num_simulations, repeat=num_repeats) # insert the last real futures curve in order to do rescaling if preprocess_type2 is PreprocessType.LOG_RETURNS_OVER_TENORS: generated = np.insert(generated, 0, sets_encoded_log_test[-1].iloc[num_c], axis=1) # 5: undo scaling encoded_generated = preprocess2.rescale_data( generated, start_value=sets_encoded_test[-1][num_c], dataset_name=test_dataset_names[-1]) if preprocess_type2 is PreprocessType.LOG_RETURNS_OVER_TENORS: encoded_generated = encoded_generated[:, 1:] # remove first curve again # 6: decode using autoencoder decoded_generated_segments = autoencoder.decode(encoded_generated) # 7: undo scaling, this can be log-returns simulated = preprocess1.rescale_data(decoded_generated_segments, start_value=sets_test[-1].iloc[num_c], dataset_name=test_dataset_names[-1]) preprocess1.enable_curve_smoothing = True simulated_smooth = preprocess1.rescale_data( decoded_generated_segments, start_value=sets_test[-1].iloc[num_c], dataset_name=test_dataset_names[-1]) if preprocess_type2 is PreprocessType.LOG_RETURNS_OVER_TENORS: real = sets_test[-1].iloc[ num_c:num_c + num_o * num_repeats + 1] # `+1` because the log-returns also does +1 else: real = sets_test[-1].iloc[num_c:num_c + num_o * num_repeats + 1] print("simulated, real", simulated.shape, real.shape) smape_result = smape(simulated, real) smape_result_smooth = smape(simulated_smooth, real) print("smape_result_smooth mean and std:", np.mean(smape_result_smooth), np.std(smape_result_smooth)) if plot: plotting = Plotting() plotting.plot_3d("real", real, show_title=False) cov_log_returns = cov_log_returns_over_tenors(real) plotting.plot_3d_cov("gan_real_cov", cov_log_returns, show_title=False) for i in np.arange(1, 11): # name = '_' + preprocess_type1.name + '_' + preprocess_type2.name + '_' + str(latent_dim) + '_' + ae_model.name + '_'+ gan_model.name plotting.plot_3d("gan_simulated_" + str(i), simulated_smooth[i], maturities=maturities, time=real.index.values, show_title=False) smape_result = smape(simulated_smooth[i], real) print("simulated_smooth[i], real", simulated_smooth[i].shape, real.shape) print("simulate rates", i) print("smape:", smape_result) print("=============\n") cov_log_returns = cov_log_returns_over_tenors(simulated_smooth[i]) plotting.plot_3d_cov("gan_simulated_" + str(i) + "_cov", cov_log_returns, maturities=maturities, show_title=False) return smape_result_smooth
def simulate(): plotting = Plotting() preprocess_normalisation = PreprocessData() preprocess_normalisation.enable_normalisation_scaler = True preprocess_normalisation.feature_range = [-1, 1] # preprocess_normalisation.enable_ignore_price = True # 1. get data and apply normalisation sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_normalisation.get_data( ) all_training_scaled = np.vstack(sets_training_scaled) ae_params = { 'input_dim': sets_training_scaled[0].shape[1], # 56 'latent_dim': 3, 'hidden_layers': ( 56, 40, 28, 12, 4, ), 'leaky_relu': 0.1, 'last_activation': 'linear', # sigmoid or linear 'loss': 'mean_square_error', # binary_crossentropy or mean_square_error 'epsilon_std': 1.0, 'batch_size': 20, 'epochs': 100, 'steps_per_epoch': 500 } ae_params_hash = hashlib.md5( json.dumps(ae_params, sort_keys=True).encode('utf-8')).hexdigest() # 2. train/load variational autoencoder vae = VariationalAutoencoder(ae_params) vae.train(all_training_scaled, sets_test_scaled) vae.save_model("vae_" + ae_params_hash) # vae.load_model("vae_" + ae_params_hash) # 3: encode data using autoencoder sets_encoded_training = [] for set_training_scaled in sets_training_scaled: sets_encoded_training.append(vae.encode(set_training_scaled)) sets_encoded_test = [] for set_test_scaled in sets_test_scaled: sets_encoded_test.append(vae.encode(set_test_scaled)) # 4: decode using vae decoded_data = vae.decode(sets_encoded_test[0]) # 7: undo minimax, for now only the first simulation simulated = preprocess_normalisation.rescale_data( decoded_data, dataset_name=test_dataset_names[0]) # reconstruction error # reconstruction_error(sets_test_scaled[0], decoded_data) reconstruction_error(np.array(sets_test[0]), simulated) # plot latent space plotting.plot_2d(sets_encoded_test[0], "test_feature_normalised_encoded_vae_on_", save=True) plotting.plot_space(maturities, vae, "variational_grid", latent_dim=sets_encoded_test[0].shape[1]) # plot scaled results plotting.plot_some_curves("test_feature_normalised_compare_vae_scaled", sets_test_scaled[0], decoded_data, [25, 50, 75, 815], maturities) plotting.plot_some_curves("test_feature_normalised_compare_vae", sets_test[0], simulated, [25, 50, 75, 815], maturities)
def main(): preprocess1 = PreprocessData(PreprocessType.STANDARDISATION_OVER_TENORS, short_end=True) preprocess2 = PreprocessData(PreprocessType.LOG_RETURNS_OVER_TENORS, short_end=True) # 1. get data and apply scaling sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess1.get_data( ) print("sets_test_scaled, sets_training_scaled:", sets_test_scaled[0].shape, sets_training_scaled[0].shape) # 2: log returns of encoded data sets_encoded_log_training = preprocess2.scale_data(sets_training_scaled, training_dataset_names, should_fit=True) sets_encoded_log_test = preprocess2.scale_data(sets_test_scaled, test_dataset_names, should_fit=True) layers = [ 35, 35 ] # Number of hidden neurons in each layer of the encoder and decoder learning_rate = 0.01 decay = 0 # Learning rate decay num_input_features = 1 # The dimensionality of the input at each time step. In this case a 1D signal. num_output_features = 1 # The dimensionality of the output at each time step. In this case a 1D signal. # There is no reason for the input sequence to be of same dimension as the ouput sequence. loss = "mse" # Other loss functions are possible, see Keras documentation. # Regularisation isn't really needed for this application lambda_regulariser = 0.000001 # Will not be used if regulariser is None regulariser = None # Possible regulariser: keras.regularizers.l2(lambda_regulariser) batch_size = 512 steps_per_epoch = 200 # batch_size * steps_per_epoch = total number of training examples epochs = 10 input_sequence_length = 42 # Length of the sequence used by the encoder target_sequence_length = 42 # Length of the sequence predicted by the decoder num_steps_to_predict = 42 # Length to use when testing the model model = Model(layers, learning_rate, decay, num_input_features, num_output_features, loss, lambda_regulariser, regulariser, batch_size, steps_per_epoch, epochs, input_sequence_length, target_sequence_length, num_steps_to_predict) model.build() # model.load() model.train(sets_encoded_log_training) # model.predict_sequences_simple(np.vstack(sets_training_first_last_tenors)) model.predict_sequences(sets_encoded_log_training)
def simulate(): plotting = Plotting() preprocess_logreturns = PreprocessData() preprocess_logreturns.enable_log_returns = True # 1. get data and apply minimax sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_logreturns.get_data() sets_training_first_last_tenors = [] for set_training_scaled in sets_training_scaled: sets_training_first_last_tenors.append(set_training_scaled[:,[0,-1]]) # sets_training_first_last_tenors = np.array(sets_training_first_last_tenors) sets_test_first_last_tenors = [] for set_test_scaled in sets_test_scaled: sets_test_first_last_tenors.append(set_test_scaled[:,[0,-1]]) # sets_test_first_last_tenors = np.array(sets_test_first_last_tenors) gan_params = {'num_tenors': sets_training_first_last_tenors[0].shape[1], 'num_c': 6*7, 'num_z': 6*7, 'num_o': 6*7, 'gen_model_type': 'standard', # conv 'dis_model_type': 'standard', # conv 'gen_layers': (4*(6*7*2),), # 4 * num_o * num_tenors 'dis_layers': (4*(6*7),), # 4 * num_o 'gen_last_activation': 'tanh', 'dis_last_activation': 'sigmoid', 'loss': 'binary_crossentropy', 'batch_size': 128, 'epochs': 20000} gan_params_hash = hashlib.md5(json.dumps(gan_params, sort_keys=True).encode('utf-8')).hexdigest() gan = GAN(gan_params) # gan.train(np.vstack(sets_training_first_last_tenors)) # gan.save_model("gan_test_" + gan_params_hash) gan.load_model("gan_test_" + gan_params_hash) # 4: simulate on encoded log returns, conditioned on test dataset num_simulations = 10 num_repeats = 20 generated_segments, real_segment = gan.generate(data=sets_test_first_last_tenors[-1], num_simulations=num_simulations, remove_condition=False) last_generated_segment = generated_segments for _ in np.arange(num_repeats - 1): generated_temp, real_temp = gan.generate(condition=last_generated_segment, remove_condition=True) last_generated_segment = generated_temp generated_segments = np.append(generated_segments, generated_temp, axis=1) # 5: undo log-returns generated_segments = preprocess_logreturns.rescale_data(generated_segments, start_value=sets_test_first_last_tenors[-1][-1]) # plotting.plot_3d_many(file_name, data, save=False) plotting.plot_3d_training("3d recursively generated with GAN, test", generated_segments, sets_test[-1], show=True, after_real_data=True)
def __init__(self): print("Andersen Markov Model") self.plotting = Plotting() preprocess_logreturns = PreprocessData() preprocess_logreturns.enable_log_returns = True # 1. get data and apply minimax sets_training, sets_test, sets_training_scaled, sets_test_scaled, training_dataset_names, test_dataset_names, maturities = preprocess_logreturns.get_data( ) # tenors: rate tenors in year fractions (from 0.083 to 5 over 60 steps) # rates: corresponding zero rates matrix # obs_time: observation dates in year fractions (starting at the first date) # 988 steps from -3.835... to 0 on the WTI NYMEX data num_c = 6 * 7 # add '* 20' to see if a larger training set helps num_o = 6 * 7 train_set = sets_test[-1].iloc[:num_c] test_set = sets_test[-1].iloc[num_c:num_c + num_o + 1] num_of_test_curves = len(test_set) self.test_set = test_set tenors = maturities self.tenors = tenors[:, np.newaxis] self.rates = np.array(train_set) index = pd.Series(train_set.index) end_num = toYearFraction(sets_test[-1].index[-1]) dates_as_decimal = np.array( index.apply(lambda x: toYearFraction(x, end_num))) self.dates_as_decimal = dates_as_decimal[:, np.newaxis] print("test_set.shape", np.array(test_set).shape) smape_results = [] for i in np.arange(100): simulated_rates = self.simulate(num_of_test_curves) smape_result = smape(simulated_rates, test_set) smape_results.append(smape_result) print("simulate rates", i) print("simulated, real", np.array(simulated_rates).shape, np.array(test_set).shape) print("smape:", smape_result) print("=============\n") # self.plotting.plot_3d("real", test_set, show_title=False) # self.plotting.plot_3d("AMM_simulated_" + str(i), simulated_rates, show_title=False) # # cov_log_returns = cov_log_returns_over_features(simulated_rates) # self.plotting.plot_3d_cov("AMM_simulated_" + str(i) + "_cov", cov_log_returns, show_title=False) smape_results = np.array(smape_results) # print("smape_results:", smape_results) print("smape mean and std:", np.mean(smape_results), np.std(smape_results))