def g2_vola_heat_map(): file_name = du.data_dir + 'swo_gbp_g2pp_nn_adj_err_s0.5_0-264_mse_lr_1.0e-04_ex6_lay9_d20_bn_res_1_rlr_5.0e-01_rlrmin_5.0e-06_rlrpat_10_estop_41.p' model = nn.read_model(file_name) model_dict = inst.g2 swo = inst.get_swaptiongen(model_dict) errs = swo.history_heatmap(model, dates=swo._dates[264:308])
def plot_history(file_name): file_name = "../data_corr_mid_2014/" + file_name model = nn.read_model(file_name) data = np.array(model.history['history']['val_loss']) data.shape = (data.shape[0], 1) du.plot_data(None, data) return data
def run(x, total = 499000., compare=False, epochs=500, prefix='SWO GBP ', postfix='', dropout_first=None, dropout_middle=None, dropout_last=None, dropout=0.2, earlyStopPatience=125, reduceLRPatience=40, reduceLRFactor=0.5, reduceLRMinLR=0.000009, save=True, layers=4, lr=0.001, exponent=6, load=False, file_name=None, model_dict=inst.g2, residual_cells=1, train_file=None, do_transform=True, loss='mean_squared_error'): assert residual_cells >= 0 lossid = "".join(map(lambda x: x[0], loss.split('_'))) postfix += "_" + lossid + '_lr_%.1e_ex%s_lay%s_d%s' % (lr, exponent, layers, int(dropout*100)) if residual_cells > 0: postfix = postfix + '_bn_res_%s' % residual_cells else: postfix = postfix + '_simple' postfix = postfix + '_rlr_%.1e_rlrmin_%.1e_rlrpat_%s_estop_%s' % (reduceLRFactor, reduceLRMinLR, reduceLRPatience, earlyStopPatience) print('run ' + str(x) + ' ' + postfix) if x < 1.0: nn.total_size = x else: nn.total_size = x/total nn.valid_size = nn.total_size*0.2 nn.test_size = 0.0 if load: assert(file_name is not None) model = nn.read_model(file_name) else: model = nn.hullwhite_fnn(exponent=exponent, layers=layers, lr=lr, prefix=prefix, postfix=postfix, dropout=dropout, dropout_first=dropout_first, dropout_middle=dropout_middle, dropout_last=dropout_last, earlyStopPatience=earlyStopPatience, reduceLRPatience=reduceLRPatience, reduceLRFactor=reduceLRFactor, reduceLRMinLR=reduceLRMinLR, model_dict=model_dict, residual_cells=residual_cells, train_file=train_file, do_transform=do_transform, activation="elu") model.train(epochs) if save: nn.write_model(model) if compare: swo = inst.get_swaptiongen(model_dict) (dates, values) = swo.compare_history(model) file_name = du.data_dir + 'test' + postfix + '_' np.save(file_name+'values', values) np.save(file_name+'dates', dates) np.save(file_name+'val_hist', model.history['history']['val_loss']) np.save(file_name+'train_hist', model.history['history']['loss']) return 0
def run(x, total=239575., compare=False, epochs=500, prefix='SWO GBP ', postfix='', dropout_first=0.2, dropout_middle=0.2, dropout_last=0.2, save=True, layers=4, lr=0.001, exponent=6, load=False, file_name=None): print 'run ' + str(x) + ' ' + postfix nn.total_size = x / total nn.valid_size = nn.total_size * 0.2 nn.test_size = 0.0 if load: assert (file_name is not None) model = nn.read_model(file_name) else: model = nn.hullwhite_elu(exponent=exponent, layers=layers, lr=lr, prefix=prefix, postfix=postfix, dropout_first=dropout_first, dropout_middle=dropout_middle, dropout_last=dropout_last) model.train(epochs) if save: nn.write_model(model) if compare: swo = inst.getSwaptionGen(inst.hullwhite_analytic) (dates, values) = swo.compare_history(model) file_name = du.data_dir + 'test' + postfix + '_fnn_l' \ + str(layers) + '_e' + str(exponent) + '_epoch' + str(epochs) + '_' np.save(file_name + 'values', values) np.save(file_name + 'dates', dates) np.save(file_name + 'val_hist', model.history['history']['val_loss']) np.save(file_name + 'train_hist', model.history['history']['loss']) return 0
def g2_objective_graph(): mark_read_1 = 'adj_err_s' mark_read_2 = '_mse_lr_1.0e-04_ex6_lay9_d20_bn_res_1_rlr_5.0e-01_rlrmin_5.0e-06_rlrpat_10_estop_41' mark_write = 'history_adj_err_4m' data_labels = ('Simulated Annealing', 'Neural Network') labels = ('0.5_0-264', '0.5_44-308', '0.5_88-352', '0.5_132-396', '0.5_176-440', '0.5_220-484', '0.5_264-528', '0.5_308-572', '0.5_352-616', '0.99_396-660', '0.99_440-704', '0.99_484-748', '0.99_528-792', '0.99_572-836', '0.99_616-880') labels = ('0.5_0-264', '0.5_88-352', '0.5_176-440', '0.5_264-528', '0.5_352-616', '0.99_440-704', '0.99_528-792', '0.99_616-880') model_dict = inst.g2 swo = inst.get_swaptiongen(model_dict) max_rank = len(labels) - 1 prev = 0 npv = None vola = None for rank, label in enumerate(labels): dates, values, _, _ = get_fnn(mark_read_1 + label + mark_read_2) if npv is None: npv = np.empty((dates.shape[0], len(data_labels))) npv.fill(np.nan) vola = np.empty((dates.shape[0], len(data_labels))) vola.fill(np.nan) out_of_sample = int(label.split('_')[1].split('-')[1]) file_name = du.data_dir + 'swo_gbp_g2pp_nn_' + mark_read_1 + label + mark_read_2 + '.p' model = nn.read_model(file_name) if rank < max_rank: max_date = int(labels[rank + 1].split('_')[1].split('-')[1]) else: max_date = -1 #Objective prior npv[prev:max_date, 1], vola[prev:max_date, 1] = swo.objective_values(model, prev, max_date) temp = values[prev:max_date, 4] #History temp3 = values[prev:max_date, 3] #Default starting point filt = temp3 < temp temp[filt] = temp3[filt] npv[prev:max_date, 0] = temp temp_v = values[prev:max_date, 1] #History temp3_v = values[prev:max_date, 0] #Default starting point filt = temp3_v < temp_v temp_v[filt] = temp3_v[filt] vola[prev:max_date, 0] = temp_v prev = max_date vola *= 100 #colors = ('#66c2a5', '#fc8d62', '#8da0cb') colors = ('#fc8d62', '#8da0cb') du.plot_data(dates, npv, figsize=(21, 12), labels=data_labels, save=du.data_dir + mark_write + '_npv_error_fnn.eps', colors=colors, legend_fontsize=22, legend_color='black', xlabel_fontsize=22, xlabel_color='black', ylabel_fontsize=22, ylabel_color='black', xtick_fontsize=18, xtick_color='black', yticks_format='{:.2f}', ytick_fontsize=18, ytick_color='black', title='NPV Mean Square Error', title_fontsize=26, out_of_sample=out_of_sample) du.plot_data(dates, vola, figsize=(21, 12), labels=data_labels, save=du.data_dir + mark_write + '_vola_error_fnn.eps', colors=colors, legend_fontsize=22, legend_color='black', xlabel_fontsize=22, xlabel_color='black', ylabel_fontsize=22, ylabel_color='black', xtick_fontsize=18, xtick_color='black', yticks_format='{:.2f} %', ytick_fontsize=18, ytick_color='black', title='Average Volatility Error', title_fontsize=26, out_of_sample=out_of_sample) temp = vola[:, 1] - vola[:, 0] temp = temp.reshape((temp.shape[0], 1)) du.plot_data(dates, temp, figsize=(21, 12), labels=None, save=du.data_dir + mark_write + '_vola_diff_error_fnn.eps', colors=colors, legend_fontsize=22, legend_color='black', xlabel_fontsize=22, xlabel_color='black', ylabel_fontsize=22, ylabel_color='black', xtick_fontsize=18, xtick_color='black', yticks_format='{:.2f} %', ytick_fontsize=18, ytick_color='black', title='Difference in Average Volatility Error', title_fontsize=26, out_of_sample=out_of_sample) return (npv, vola)
def run(x, total=499000., compare=False, epochs=500, prefix='SWO GBP ', postfix='', dropout_first=None, dropout_middle=None, dropout_last=None, dropout=0.2, earlyStopPatience=125, reduceLRPatience=40, reduceLRFactor=0.5, reduceLRMinLR=0.000009, save=True, layers=4, lr=0.001, exponent=6, load=False, file_name=None, model_dict=inst.g2, residual_cells=1, train_file=None, do_transform=True, loss='mean_squared_error'): assert residual_cells >= 0 lossid = "".join(map(lambda x: x[0], loss.split('_'))) postfix += "_" + lossid + '_lr_%.1e_ex%s_lay%s_d%s' % ( lr, exponent, layers, int(dropout * 100)) if residual_cells > 0: postfix = postfix + '_bn_res_%s' % residual_cells else: postfix = postfix + '_simple' postfix = postfix + '_rlr_%.1e_rlrmin_%.1e_rlrpat_%s_estop_%s' % ( reduceLRFactor, reduceLRMinLR, reduceLRPatience, earlyStopPatience) print('run ' + str(x) + ' ' + postfix) if x < 1.0: nn.total_size = x else: nn.total_size = x / total nn.valid_size = nn.total_size * 0.2 nn.test_size = 0.0 if load: assert (file_name is not None) model = nn.read_model(file_name) else: model = nn.hullwhite_fnn(exponent=exponent, layers=layers, lr=lr, prefix=prefix, postfix=postfix, dropout=dropout, dropout_first=dropout_first, dropout_middle=dropout_middle, dropout_last=dropout_last, earlyStopPatience=earlyStopPatience, reduceLRPatience=reduceLRPatience, reduceLRFactor=reduceLRFactor, reduceLRMinLR=reduceLRMinLR, model_dict=model_dict, residual_cells=residual_cells, train_file=train_file, do_transform=do_transform, activation="elu") model.train(epochs) if save: nn.write_model(model) if compare: swo = inst.get_swaptiongen(model_dict) (dates, values) = swo.compare_history(model) file_name = du.data_dir + 'test' + postfix + '_' np.save(file_name + 'values', values) np.save(file_name + 'dates', dates) np.save(file_name + 'val_hist', model.history['history']['val_loss']) np.save(file_name + 'train_hist', model.history['history']['loss']) return 0