def plot_signifant_region(ax1, max_mu, min_mu, max_std, min_std, max_abs): ## For the not significant region mu_grid = np.array([-max_abs * 10, 0, max_abs * 10]) y_grid = np.abs(mu_grid) / 2 gl.fill_between(mu_grid, 10 * np.ones(mu_grid.size), y_grid, alpha=0.2, color="r", ax=ax1, legend=["95% non-significant"])
def create_Bayesian_analysis_charts(model, X_data_tr, Y_data_tr, X_data_val, Y_data_val, tr_loss, val_loss, KL_loss, final_loss_tr, final_loss_val, xgrid_real_func, ygrid_real_func, folder_images, epoch_i=None): # Configurations of the plots alpha_points = 0.2 color_points_train = "dark navy blue" color_points_val = "amber" color_train_loss = "cobalt blue" color_val_loss = "blood" color_truth = "k" color_mean = "b" color_most_likey = "y" ############################# Data computation ####################### if (type(X_data_tr) == type([])): pass else: if (X_data_tr.shape[1] == 1): # Regression Example x_grid, all_y_grid, most_likely_ygrid = compute_regression_1D_data( model, X_data_tr, X_data_val, Nsamples=100) elif (X_data_tr.shape[1] == 2): # Classification Example xx, yy, all_y_grid, most_likely_ygrid = compute_classification_2D_data( model, X_data_tr, X_data_val, Nsamples=100) else: # RNN x_grid, all_y_grid, most_likely_ygrid = compute_RNN_1D_data( model, X_data_tr, X_data_val, Nsamples=100) ################################ Divide in plots ############################## gl.init_figure() ax1 = gl.subplot2grid((6, 3), (0, 0), rowspan=3, colspan=1) ax2 = gl.subplot2grid((6, 3), (3, 0), rowspan=3, colspan=1, sharex=ax1, sharey=ax1) ax3 = gl.subplot2grid((6, 3), (0, 1), rowspan=2, colspan=1) ax4 = gl.subplot2grid((6, 3), (2, 1), rowspan=2, colspan=1, sharex=ax3) ax5 = gl.subplot2grid((6, 3), (4, 1), rowspan=2, colspan=1, sharex=ax3) ax6 = gl.subplot2grid((6, 3), (0, 2), rowspan=3, colspan=1) ax7 = gl.subplot2grid((6, 3), (3, 2), rowspan=3, colspan=1, sharex=ax6) if (type(X_data_tr) == type([])): Xtrain = [ torch.tensor(X_data_tr[i], device=model.cf_a.device, dtype=model.cf_a.dtype) for i in range(len(X_data_tr)) ] Ytrain = torch.tensor(Y_data_tr, device=model.cf_a.device, dtype=torch.int64) Xval = [ torch.tensor(X_data_val[i], device=model.cf_a.device, dtype=model.cf_a.dtype) for i in range(len(X_data_val)) ] Yval = torch.tensor(Y_data_val, device=model.cf_a.device, dtype=torch.int64) confusion = model.get_confusion_matrix(Xtrain, Ytrain) plot_confusion_matrix(confusion, model.languages, ax1) confusion = model.get_confusion_matrix(Xval, Yval) plot_confusion_matrix(confusion, model.languages, ax2) else: if (X_data_tr.shape[1] == 1): # Regression Example plot_data_regression_1d_2axes( X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2) elif (X_data_tr.shape[1] == 2): # Classification Example plot_data_classification_2d_2axes( X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, xx, yy, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2) else: # RNN example plot_data_RNN_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2) # gl.fill_between (x_grid, [mean_samples_grid + 2*std_samples_grid, mean_samples_grid - 2*std_samples_grid] # , ax = ax2, alpha = 0.10, color = "b", legend = ["Mean realizaions"]) ## ax2: The uncertainty of the prediction !! # gl.plot (x_grid, std_samples_grid, ax = ax2, labels = ["Std (%i)"%(Nsamples),"X","f(X)"], legend = [" std predictions"], fill = 1, alpha = 0.3) ############## ax3 ax4 ax5: Loss Evolution !! ###################### ## ax3: Evolutoin of the data loss gl.plot([], tr_loss, ax=ax3, lw=3, labels=["Losses", "", "Data loss"], legend=["train"], color=color_train_loss) gl.plot([], val_loss, ax=ax3, lw=3, legend=["validation"], color=color_val_loss, AxesStyle="Normal - No xaxis") ## ax4: The evolution of the KL loss gl.plot([], KL_loss, ax=ax4, lw=3, labels=["", "", "KL loss"], legend=["Bayesian Weights"], AxesStyle="Normal - No xaxis", color="k") ## ax5: Evolutoin of the total loss gl.plot([], final_loss_tr, ax=ax5, lw=3, labels=["", "epoch", "Total Loss (Bayes)"], legend=["train"], color=color_train_loss) gl.plot([], final_loss_val, ax=ax5, lw=3, legend=["validation"], color=color_val_loss) ############## ax6 ax7: Variational Weights !! ###################### create_plot_variational_weights(model, ax6, ax7) ## Plot in chart 7 the acceptable mu = 2sigma -> sigma = |mu|/2sigma mu_grid = np.linspace(-3, 3, 100) y_grid = np.abs(mu_grid) / 2 gl.fill_between(mu_grid, 10 * np.ones(mu_grid.size), y_grid, alpha=0.2, color="r", ax=ax7, legend=["95% non-significant"]) gl.set_zoom(ax=ax6, ylim=[-0.1, 10]) gl.set_zoom(ax=ax7, xlim=[-2.5, 2.5], ylim=[ -0.05, np.exp(model.cf_a.input_layer_prior["log_sigma2"]) * (1 + 0.15) ]) # gl.set_zoom (ax = ax7, xlim = [-2.5, 2.5], ylim = [-0.1,2]) # Set final properties and save figure gl.set_fontSizes(ax=[ax1, ax2, ax3, ax4, ax5, ax6, ax7], title=20, xlabel=20, ylabel=20, legend=10, xticks=12, yticks=12) gl.subplots_adjust(left=.09, bottom=.10, right=.90, top=.95, wspace=.30, hspace=0.10) if (type(epoch_i) == type(None)): gl.savefig(folder_images + "../" + 'Final_values_regression_1D_' + str(model.cf_a.eta_KL) + '.png', dpi=100, sizeInches=[20, 10]) else: gl.savefig(folder_images + '%i.png' % epoch_i, dpi=100, sizeInches=[20, 10], close=True, bbox_inches="tight")
"Number of clusters (K)", "Average LL of a sample" ], lw=3, color="k") gl.plot(Klusters, mean_tr_ll + 2 * std_tr_ll, color="k", nf=0, lw=1, ls="--", legend=["Mean Train LL +- 2std"]) gl.plot(Klusters, mean_tr_ll - 2 * std_tr_ll, color="k", nf=0, lw=1, ls="--") gl.fill_between(Klusters, mean_tr_ll - 2 * std_tr_ll, mean_tr_ll + 2 * std_tr_ll, c="k", alpha=0.5) for i in range(len(logl_tr_CVs)): for k_i in range(len(Klusters)): gl.scatter(np.ones((len(logl_tr_CVs[i][k_i]), 1)) * Klusters[k_i], logl_tr_CVs[i][k_i], color="k", alpha=0.2, lw=1) gl.plot(Klusters, mean_val_ll, nf=0, color="r", legend=["Mean Validation LL (EM)"],
ax0 = gl.subplot2grid((1,4), (0,0), rowspan=1, colspan=3) for i in range(Nrealizations): f_prime = np.random.randn(N,1) error = L.dot(f_prime) gl.plot(tgrid,error, lw = 3, color = "b", ls = "-", alpha = 0.5, legend = legend, labels = labels) # gl.scatter(tgrid,f_prime, lw = 1, alpha = 0.3, color = "b") if (flag == 1): flag = 0 legend = [] #Variance of each prediction v = np.diagonal(K) gl.fill_between(tgrid, -2*np.sqrt(v), 2*np.sqrt(v), lw = 3, alpha = 0.5, color = "yellow", legend = ["95% confidence interval"]); gl.plot(tgrid, 2*np.sqrt(v), lw= 1, alpha = 0.5, color = "yellow", legend = ["95% confidence interval"]); gl.plot(tgrid, 2*np.sqrt(v), lw= 1, alpha = 0.5, color = "yellow"); ## Plot the covariance matrix ax1 = gl.subplot2grid((1,4), (0,3), rowspan=1, colspan=1) cmap = cm.get_cmap('jet', 30) cax = ax1.imshow(K[0:Nshow,0:Nshow], interpolation="nearest", cmap=cmap) # ax1.grid(True) plt.title('Covariance matrix of Noise') # labels=[str(x) for x in range(Nshow )] # ax1.set_xticklabels(labels,fontsize=20) # ax1.set_yticklabels(labels,fontsize=20) # Add colorbar, make sure to specify tick locations to match desired ticklabels
symbol_names=[symbols[indx]])[0] gl.init_figure() ########################## AX0 ######################## ### Plot the initial Price Volume gl.subplot2grid((Ndiv, 4), (0, 0), rowspan=HPV, colspan=4) # gl.plot_indicator(timeData, Ndiv = Ndiv, HPV = HPV) gl.plot(dates, prices[:, indx], legend=["Price"], nf=0, labels=["Xing Average Strategy", "Price", "Time"]) Volume = timeData.get_timeSeries(seriesNames=["Volume"]) gl.plot(dates, Volume, nf=0, na=1, lw=0, alpha=0) gl.fill_between(dates, Volume) axes = gl.get_axes() axP = axes[0] # Price axes axV = axes[1] # Volumne exes axV.set_ylim(0, 3 * max(Volume)) gl.plot(dates, EMAfast[:, indx], ax=axP, legend=["EMA = %i" % (n_fast)], nf=0) gl.plot(dates, EMAslow[:, indx], ax=axP, legend=["EMA = %i" % (n_slow)],
labels=["Averages", "Time", "Value"], legend=["Price", "SMA", "EMA"]) ########################################################################### # Bollinger Bands, Pivot points Resistences and Supports and ATR BB = timeData.BBANDS(seriesNames=["Close"], n=10) ATR = timeData.ATR(n=20) PPSR = timeData.PPSR() gl.set_subplots(3, 1) gl.plot(dates, [price, BB[:, 0], BB[:, 1]], nf=1, labels=["Averages", "Time", "Value"], legend=["Price", "Bollinger Bands"]) gl.fill_between(x=dates, y1=BB[:, 0], y2=BB[:, 1], alpha=0.5) gl.plot(dates, price, nf=1, labels=["Averages", "Time", "Value"], legend=["Price"]) gl.plot(dates, PPSR, nf=0, legend=["Supports and Resistances"]) gl.plot(dates, price, nf=1, labels=["Averages", "Time", "Value"], legend=["Price"]) gl.plot(dates, ATR,
gl.plot(dates, [price, SMA, EMA] , nf = 1, labels = ["Averages","Time","Value"], legend = ["Price", "SMA", "EMA"]) ########################################################################### # Bollinger Bands, Pivot points Resistences and Supports and ATR BB = timeData.BBANDS(seriesNames = ["Close"], n = 10) ATR = timeData.ATR(n = 20) PPSR = timeData.PPSR() gl.set_subplots(3,1) gl.plot(dates, [price, BB[:,0],BB[:,1]] , nf = 1, labels = ["Averages","Time","Value"], legend = ["Price", "Bollinger Bands"]) gl.fill_between(x = dates, y1 = BB[:,0], y2 = BB[:,1], alpha = 0.5) gl.plot(dates, price, nf = 1, labels = ["Averages","Time","Value"], legend = ["Price"]) gl.plot(dates, PPSR , nf = 0, legend = [ "Supports and Resistances"]) gl.plot(dates, price , nf = 1, labels = ["Averages","Time","Value"], legend = ["Price"]) gl.plot(dates, ATR , nf = 0, na = 1, labels = ["Averages","Time","Value"], legend = ["ATR"], fill = 1) pandas_lib1 = 0
X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, None, ax1) pf.create_plot_variational_weights(model, ax1, ax2, plot_pdf=False) all_axes.append(ax1) all_axes.append(ax2) mu_grid = np.linspace(-3, 3, 100) y_grid = np.abs(mu_grid) / 2 gl.fill_between(mu_grid, 10 * np.ones(mu_grid.size), y_grid, alpha=0.2, color="r", ax=ax2, legend=["95% non-significant"]) gl.set_zoom(ax=ax2, xlim=[-2.5, 2.5], ylim=[-0.05, model.linear1.prior.sigma1 * (1 + 0.30)]) eta_KL = eta_values[i] ax1.set_title("Model estimations for $\zeta = " + str(eta_KL) + "$") ax2.set_title("Variational Weights for $\zeta = " + str(eta_KL) + "$") # gl.set_zoom (ax = ax7, xlim = [-2.5, 2.5], ylim = [-0.1,2]) # Set final properties and save figure gl.set_fontSizes(ax=all_axes,
ls="-", alpha=0.5, legend=legend, labels=labels) # gl.scatter(tgrid,f_prime, lw = 1, alpha = 0.3, color = "b") if (flag == 1): flag = 0 legend = [] #Variance of each prediction v = np.diagonal(K) gl.fill_between(tgrid, -2 * np.sqrt(v), 2 * np.sqrt(v), lw=3, alpha=0.5, color="yellow", legend=["95% confidence interval"]) gl.plot(tgrid, 2 * np.sqrt(v), lw=1, alpha=0.5, color="yellow", legend=["95% confidence interval"]) gl.plot(tgrid, 2 * np.sqrt(v), lw=1, alpha=0.5, color="yellow") ## Plot the covariance matrix ax1 = gl.subplot2grid((1, 4), (0, 3), rowspan=1, colspan=1) cmap = cm.get_cmap('jet', 30) cax = ax1.imshow(K[0:Nshow, 0:Nshow], interpolation="nearest", cmap=cmap)
def plot_signifant_region(ax1, max_mu,min_mu,max_std,min_std,max_abs): ## For the not significant region mu_grid = np.array([-max_abs *10,0,max_abs*10]) y_grid = np.abs(mu_grid)/2 gl.fill_between(mu_grid, 10*np.ones(mu_grid.size), y_grid, alpha = 0.2, color = "r", ax = ax1, legend = ["95% non-significant"])
################################################################################################################ gl.init_figure() title = "Validation of Number of clusters for a %i-CV. "%Nfolds; if (clusters_relation == "MarkovChain1"): title += "HMM" else: title += "EM" ax1 = gl.plot(Klusters,mean_tr_ll, legend = ["Mean Train LL"], labels = [title,"Number of clusters (K)","Average LL of a sample"], lw = 3, color = "k") gl.plot(Klusters,mean_tr_ll + 2*std_tr_ll , color = "k", nf = 0, lw = 1, ls = "--", legend = ["Mean Train LL +- 2std"]) gl.plot(Klusters,mean_tr_ll - 2*std_tr_ll , color = "k", nf = 0, lw = 1,ls = "--") gl.fill_between(Klusters, mean_tr_ll - 2*std_tr_ll, mean_tr_ll + 2*std_tr_ll, c = "k", alpha = 0.5) for i in range(len(logl_tr_CVs)): for k_i in range(len(Klusters)): gl.scatter(np.ones((len(logl_tr_CVs[i][k_i]),1))*Klusters[k_i], logl_tr_CVs[i][k_i], color = "k", alpha = 0.2, lw = 1) gl.plot(Klusters,mean_val_ll, nf = 0, color = "r", legend = ["Mean Validation LL"], lw = 3) gl.plot(Klusters,mean_val_ll + 2*std_val_ll , color = "r", nf = 0, lw = 1, ls = "--", legend = ["Mean Validation LL +- 2std"]) gl.plot(Klusters,mean_val_ll - 2*std_val_ll , color = "r", nf = 0, lw = 1, ls = "--") gl.fill_between(Klusters, mean_val_ll - 2*std_val_ll, mean_val_ll + 2*std_val_ll, c = "r", alpha = 0.1) for i in range(len(logl_tr_CVs)): for k_i in range(len(Klusters)): gl.scatter(np.ones((len(logl_val_CVs[i][k_i]),1))*Klusters[k_i], logl_val_CVs[i][k_i], color = "r", alpha = 0.5, lw = 1) gl.set_fontSizes(ax = ax1, title = 20, xlabel = 20, ylabel = 20,
if (plotting_some == 1): Ndiv = 6; HPV = 2 ### PLOT THE ORIGINAL PRICE AND MOVING AVERAGES timeData = Cartera.get_timeDataObj(period = -1, symbol_names = [symbols[indx]])[0] gl.init_figure() ########################## AX0 ######################## ### Plot the initial Price Volume gl.subplot2grid((Ndiv,4), (0,0), rowspan=HPV, colspan=4) # gl.plot_indicator(timeData, Ndiv = Ndiv, HPV = HPV) gl.plot(dates, prices[:,indx], legend = ["Price"], nf = 0, labels = ["Xing Average Strategy","Price","Time"]) Volume = timeData.get_timeSeries(seriesNames = ["Volume"]) gl.plot(dates, Volume, nf = 0, na = 1, lw = 0, alpha = 0) gl.fill_between(dates, Volume) axes = gl.get_axes() axP = axes[0] # Price axes axV = axes[1] # Volumne exes axV.set_ylim(0,3 * max(Volume)) gl.plot(dates, EMAfast[:,indx],ax = axP, legend = ["EMA = %i" %(n_fast)], nf = 0) gl.plot(dates, EMAslow[:,indx],ax = axP, legend = ["EMA = %i" %(n_slow)], nf = 0) ########################## AX1 ######################## pos = 0 gl.subplot2grid((Ndiv,4), (HPV + pos,0), rowspan=1, colspan=4, sharex = axP) gl.plot(dates, Y_data, nf = 0, legend = ["Y data filtered"]) ########################## AX2 ########################
def create_Bayesian_analysis_charts(model, X_data_tr, Y_data_tr, X_data_val, Y_data_val, tr_loss, val_loss, KL_loss,final_loss_tr,final_loss_val, xgrid_real_func, ygrid_real_func, folder_images, epoch_i = None): # Configurations of the plots alpha_points = 0.2 color_points_train = "dark navy blue" color_points_val = "amber" color_train_loss = "cobalt blue" color_val_loss = "blood" color_truth = "k" color_mean = "b" color_most_likey = "y" ############################# Data computation ####################### if(type(X_data_tr) == type([])): pass else: if (X_data_tr.shape[1] == 1): # Regression Example x_grid, all_y_grid,most_likely_ygrid = compute_regression_1D_data( model,X_data_tr,X_data_val, Nsamples = 100) elif(X_data_tr.shape[1] == 2): # Classification Example xx,yy , all_y_grid,most_likely_ygrid = compute_classification_2D_data( model,X_data_tr,X_data_val, Nsamples = 100) else: # RNN x_grid, all_y_grid,most_likely_ygrid = compute_RNN_1D_data( model,X_data_tr,X_data_val, Nsamples = 100) ################################ Divide in plots ############################## gl.init_figure(); ax1 = gl.subplot2grid((6,3), (0,0), rowspan=3, colspan=1) ax2 = gl.subplot2grid((6,3), (3,0), rowspan=3, colspan=1, sharex = ax1, sharey = ax1) ax3 = gl.subplot2grid((6,3), (0,1), rowspan=2, colspan=1) ax4 = gl.subplot2grid((6,3), (2,1), rowspan=2, colspan=1, sharex = ax3) ax5 = gl.subplot2grid((6,3), (4,1), rowspan=2, colspan=1, sharex = ax3) ax6 = gl.subplot2grid((6,3), (0,2), rowspan=3, colspan=1) ax7 = gl.subplot2grid((6,3), (3,2), rowspan=3, colspan=1, sharex = ax6) if(type(X_data_tr) == type([])): Xtrain = [torch.tensor(X_data_tr[i],device=model.cf_a.device, dtype=model.cf_a.dtype) for i in range(len(X_data_tr))] Ytrain = torch.tensor(Y_data_tr,device=model.cf_a.device, dtype=torch.int64) Xval = [torch.tensor(X_data_val[i],device=model.cf_a.device, dtype=model.cf_a.dtype) for i in range(len(X_data_val))] Yval = torch.tensor(Y_data_val,device=model.cf_a.device, dtype=torch.int64) confusion = model.get_confusion_matrix(Xtrain, Ytrain) plot_confusion_matrix(confusion,model.languages, ax1 ) confusion = model.get_confusion_matrix(Xval, Yval) plot_confusion_matrix(confusion,model.languages, ax2 ) else: if (X_data_tr.shape[1] == 1): # Regression Example plot_data_regression_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid,all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey,color_mean,color_truth, ax1,ax2) elif(X_data_tr.shape[1] == 2): # Classification Example plot_data_classification_2d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, xx,yy,all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey,color_mean, color_truth, ax1,ax2) else: # RNN example plot_data_RNN_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid,all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey,color_mean,color_truth, ax1,ax2) # gl.fill_between (x_grid, [mean_samples_grid + 2*std_samples_grid, mean_samples_grid - 2*std_samples_grid] # , ax = ax2, alpha = 0.10, color = "b", legend = ["Mean realizaions"]) ## ax2: The uncertainty of the prediction !! # gl.plot (x_grid, std_samples_grid, ax = ax2, labels = ["Std (%i)"%(Nsamples),"X","f(X)"], legend = [" std predictions"], fill = 1, alpha = 0.3) ############## ax3 ax4 ax5: Loss Evolution !! ###################### ## ax3: Evolutoin of the data loss gl.plot([], tr_loss, ax = ax3, lw = 3, labels = ["Losses", "","Data loss"], legend = ["train"], color = color_train_loss) gl.plot([], val_loss,ax = ax3, lw = 3, legend = ["validation"], color = color_val_loss, AxesStyle = "Normal - No xaxis") ## ax4: The evolution of the KL loss gl.plot([], KL_loss, ax = ax4, lw = 3, labels = ["", "","KL loss"], legend = ["Bayesian Weights"], AxesStyle = "Normal - No xaxis", color = "k") ## ax5: Evolutoin of the total loss gl.plot([], final_loss_tr, ax = ax5, lw = 3, labels = ["", "epoch","Total Loss (Bayes)"], legend = ["train"], color = color_train_loss) gl.plot([], final_loss_val,ax = ax5, lw = 3, legend = ["validation"], color = color_val_loss) ############## ax6 ax7: Variational Weights !! ###################### create_plot_variational_weights(model,ax6,ax7) ## Plot in chart 7 the acceptable mu = 2sigma -> sigma = |mu|/2sigma mu_grid = np.linspace(-3,3,100) y_grid = np.abs(mu_grid)/2 gl.fill_between(mu_grid, 10*np.ones(mu_grid.size), y_grid, alpha = 0.2, color = "r", ax = ax7, legend = ["95% non-significant"]) gl.set_zoom (ax = ax6, ylim = [-0.1,10]) gl.set_zoom (ax = ax7, xlim = [-2.5, 2.5], ylim = [-0.05, np.exp(model.cf_a.input_layer_prior["log_sigma2"])*(1 + 0.15)]) # gl.set_zoom (ax = ax7, xlim = [-2.5, 2.5], ylim = [-0.1,2]) # Set final properties and save figure gl.set_fontSizes(ax = [ax1,ax2,ax3,ax4,ax5,ax6,ax7], title = 20, xlabel = 20, ylabel = 20, legend = 10, xticks = 12, yticks = 12) gl.subplots_adjust(left=.09, bottom=.10, right=.90, top=.95, wspace=.30, hspace=0.10) if (type(epoch_i) == type(None)): gl.savefig(folder_images +"../"+'Final_values_regression_1D_' +str(model.cf_a.eta_KL) +'.png', dpi = 100, sizeInches = [20, 10]) else: gl.savefig(folder_images +'%i.png'%epoch_i, dpi = 100, sizeInches = [20, 10], close = True, bbox_inches = "tight")