def create_image_training_epoch(X_data_tr, Y_data_tr, X_data_val, Y_data_val, tr_loss, val_loss, x_grid, y_grid, cf_a, video_fotograms_folder, epoch_i): """ Creates the image of the training and validation accuracy """ gl.init_figure(); ax1 = gl.subplot2grid((2,1), (0,0), rowspan=1, colspan=1) ax2 = gl.subplot2grid((2,1), (1,0), rowspan=1, colspan=1) plt.title("Training") ## First plot with the data and predictions !!! ax1 = gl.scatter(X_data_tr, Y_data_tr, ax = ax1, lw = 3,legend = ["tr points"], labels = ["Analysis of training", "X","Y"]) gl.scatter(X_data_val, Y_data_val, lw = 3,legend = ["val points"]) gl.plot (x_grid, y_grid, legend = ["Prediction function"]) gl.set_zoom(xlimPad = [0.2, 0.2], ylimPad = [0.2,0.2], X = X_data_tr, Y = Y_data_tr) ## Second plot with the evolution of parameters !!! ax2 = gl.plot([], tr_loss, ax = ax2, lw = 3, labels = ["RMSE. lr: %.3f"%cf_a.lr, "epoch","RMSE"], legend = ["train"]) gl.plot([], val_loss, lw = 3, legend = ["validation"], loc = 3) gl.set_fontSizes(ax = [ax1,ax2], title = 20, xlabel = 20, ylabel = 20, legend = 20, xticks = 12, yticks = 12) # Set final properties and save figure gl.subplots_adjust(left=.09, bottom=.10, right=.90, top=.95, wspace=.30, hspace=0.30) gl.savefig(video_fotograms_folder +'%i.png'%epoch_i, dpi = 100, sizeInches = [14, 10], close = True, bbox_inches = None)
def plot_learnt_function(X_data_tr, Y_data_tr, X_data_val, Y_data_val, x_grid, y_grid, cf_a, folder_images): gl.init_figure() ax1 = gl.scatter(X_data_tr, Y_data_tr, lw=3, legend=["tr points"], labels=["Data", "X", "Y"], alpha=0.2) ax2 = gl.scatter(X_data_val, Y_data_val, lw=3, legend=["val points"], alpha=0.2) gl.set_fontSizes(ax=[ax1, ax2], title=20, xlabel=20, ylabel=20, legend=20, xticks=12, yticks=12) gl.plot(x_grid, y_grid, legend=["training line"]) gl.savefig(folder_images + 'Training_Example_Data.png', dpi=100, sizeInches=[14, 4])
def update_data(information): time, data = information.time, information.data ## Read data to update !! information.serial.flush() data.append( float(information.serial.readline().decode("utf-8").split("\n")[0])) time.append(update_data.index) update_data.index += 1 window = 100 start = max([update_data.index - window, 0]) print(start, data[-1]) # option 2, remove all lines and collections for artist in plt.gca().lines + plt.gca().collections: artist.remove() gl.plot(np.array(time)[start:update_data.index], np.array(data)[start:update_data.index], labels=["Sensors values", "time (s)", "Temperature"], color="k", ax=data_axes) gl.set_zoom(xlimPad=[0.2, 0.2], ylimPad=[0.1, 0.1])
def plot_means(Nclasses, X_train, y_train, colors = ["r","k"], normalize = True): print Nclasses ## Get the time average profile of every label. # For every label, we average across time to get the time profile. # We kind of should assume that the trials are somewhat time-aligned # X_data_ave = dp.get_timeSeries_average_by_label(X_All_labels, channel_sel = channel_sel) X_data_ave = dp.get_average_from_train(Nclasses, X_train, y_train, normalize = normalize) # Evolution of the means of each class in time in time representation gl.plot([0],[0]) for i in range(1): max_val = 0 if (i >= 1): max_val += np.max(np.abs(X_data_ave[i-1])) + np.max(np.abs(X_data_ave[i])) gl.plot([], X_data_ave[i] + max_val, color = colors[i], nf = 0, labels = ["Mean value of the 70 Channels","Time Index","Channels"]) # Evolution of the means of each class in time in Spherical representation gl.scatter_3D(0, 0,0, nf = 1, na = 0) for i in range(Nclasses): gl.scatter_3D(X_data_ave[i][:,0], X_data_ave[i][:,1],X_data_ave[i][:,2], color = colors[i], nf = 0, na = 0, labels = ["Mean Time Evolution the different classes", "D1","D2","D3"])
def plot_data_regression_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid,all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey,color_mean, color_truth, ax1,ax2): """ This function plots the outputs of the Regression model for the 1D example """ ## Compute mean and std of regression std_samples_grid = np.std(all_y_grid, axis = 1) mean_samples_grid = np.mean(all_y_grid, axis = 1) ############## ax1: Data + Mostlikely + Real + Mean !! ######################## if(type(ax1) != type(None)): gl.scatter(X_data_tr, Y_data_tr, ax = ax1, lw = 3, #legend = ["tr points"], labels = ["Data and predictions", "","Y"], alpha = alpha_points, color = color_points_train) gl.scatter(X_data_val, Y_data_val, ax = ax1, lw = 3, #legend = ["val points"], alpha = alpha_points, color = color_points_val) gl.plot (xgrid_real_func, ygrid_real_func, ax = ax1, alpha = 0.90, color = color_truth, legend = ["Truth"]) gl.plot (x_grid, most_likely_ygrid, ax = ax1, alpha = 0.90, color = color_most_likey, legend = ["Most likely"]) gl.plot (x_grid, mean_samples_grid, ax = ax1, alpha = 0.90, color = color_mean, legend = ["Posterior mean"], AxesStyle = "Normal - No xaxis") ############## ax2: Data + Realizations of the function !! ###################### if(type(ax2) != type(None)): gl.scatter(X_data_tr, Y_data_tr, ax = ax2, lw = 3, # legend = ["tr points"], labels = ["", "X","Y"], alpha = alpha_points, color = color_points_train) gl.scatter(X_data_val, Y_data_val, ax = ax2, lw = 3, # legend = ["val points"], alpha = alpha_points, color = color_points_val) gl.plot (x_grid, all_y_grid, ax = ax2, alpha = 0.15, color = "k") gl.plot (x_grid, mean_samples_grid, ax = ax2, alpha = 0.90, color = "b", legend = ["Mean realization"]) gl.set_zoom(xlimPad = [0.2,0.2], ylimPad = [0.2,0.2], ax = ax2, X = X_data_tr, Y = Y_data_tr)
def IFE_b(self, year_start=1996, year_finish=2016, window=10): ## Question b of the asqued thing all_returns = [] all_covMatrices = [] all_dates = [] # To store the dates of the estimation for year_test in range(year_start, year_finish - window + 1): # +1 !! # Set the dates self.pf.set_interval(dt.datetime(year_test, 1, 1), dt.datetime(year_test + window, 1, 1)) ret = self.yearly_Return(self.get_MeanReturns()) covMat = self.yearly_covMatrix(self.get_covMatrix()) all_covMatrices.append(covMat) all_returns.append(ret) # Get the dates from any of the symbols of the portfolio dates = self.get_dates() all_dates.append(dates[-1]) ## Plotting the returns all_returns = np.array(all_returns) # gl.plot(all_dates, all_returns[:,0], # labels = ["Returns", "Time", "Return"], # legend = [self.pf.symbols.keys()[0]]) # # gl.plot(all_dates, all_returns[:,1], # legend = [self.pf.symbols.keys()[1]], nf = 0, na = 0) ## 1) Plot the returns of all of them together for the eleven windows gl.plot(all_dates, all_returns, labels=[ "Average Return in 10 years", "Time (years)", "Anual return of Assets" ], legend=self.symbol_names) gl.savefig(folder_images + 'returnsAveAll.png', dpi=150, sizeInches=[2 * 8, 1.5 * 6]) ## 2) Plot the covariance matrix for 9 years gl.set_subplots(2, 3) for i in range(6): gl.bar_3D(self.symbol_names, self.symbol_names, all_covMatrices[i], labels=[str(year_start + window + i), "", ""], fontsize=30, fontsizeA=19) gl.savefig(folder_images + 'covsAveAll.png', dpi=80, sizeInches=[4 * 8, 3 * 6])
def plot_timeSeriesCumReturn(self, nf = 1): dates = self.dates timeSeries = self.get_timeSeriesCumReturn() gl.plot(dates, timeSeries, nf = nf, labels = [self.symbol + "(" + str(self.period) + ")", "Time (" + str(self.period) + ")", "CumReturn Prices"], legend = self.seriesNames)
def plot_timeSeries(self, nf = 1, na = 0): dates = self.dates timeSeries = self.get_timeSeries() gl.plot(dates, timeSeries, nf = nf, labels = [self.symbol + "(" + str(self.period) + ")", "Time (" + str(self.period) + ")", "Prices"], legend = self.seriesNames, na = na)
def plots_weights_layer(mu_W, sigma_W, sigma_b, mu_b, ax1, ax2, legend_layer, plot_pdf=1): """ Plot the given weights of the layer """ # For each of the weights we plot them !! color = gl.get_color(None) if (plot_pdf): for i in range(sigma_W.size): x_grid, y_val = bMA.gaussian1D_points(mean=mu_W[i], std=sigma_W[i], std_K=3) gl.plot( x_grid, y_val, ax=ax1, fill=1, alpha=0.15, color=color, labels=["Bayesian weights", "", "p(w)"], alpha_line=0.15 # ,legend = ["W:%i"%(i+1)] ) ###legend = ["M: %.2e, std: %.2e"%(mu_W2[i], sigma_W2[i])]) gl.scatter(mu_W, sigma_W, ax=ax2, labels=["", r"$\mu_w$", r"$\sigma_w$"], color=color, legend=legend_layer, alpha=0.3) if (plot_pdf): for i in range(sigma_b.size): x_grid, y_val = bMA.gaussian1D_points(mean=mu_b[i], std=sigma_b[i], std_K=3) # color = gl.get_color(None) gl.plot( x_grid, y_val, ax=ax1, color=color, fill=1, alpha=0.3, alpha_line=0.15, AxesStyle="Normal - No xaxis", ls="--" # ,legend = ["b:%i"%(i+1)] ) ###legend = ["M: %.2e, std: %.2e"%(mu_W2[i], sigma_W2[i])]) gl.scatter(mu_b, sigma_b, ax=ax2, color=color, marker="s", alpha=0.3)
def create_image_training_epoch(X_data_tr, Y_data_tr, X_data_val, Y_data_val, tr_loss, val_loss, x_grid, y_grid, cf_a, video_fotograms_folder, epoch_i): """ Creates the image of the training and validation accuracy """ gl.init_figure() ax1 = gl.subplot2grid((2, 1), (0, 0), rowspan=1, colspan=1) ax2 = gl.subplot2grid((2, 1), (1, 0), rowspan=1, colspan=1) plt.title("Training") ## First plot with the data and predictions !!! ax1 = gl.scatter(X_data_tr, Y_data_tr, ax=ax1, lw=3, legend=["tr points"], labels=["Analysis of training", "X", "Y"]) gl.scatter(X_data_val, Y_data_val, lw=3, legend=["val points"]) gl.plot(x_grid, y_grid, legend=["Prediction function"]) gl.set_zoom(xlimPad=[0.2, 0.2], ylimPad=[0.2, 0.2], X=X_data_tr, Y=Y_data_tr) ## Second plot with the evolution of parameters !!! ax2 = gl.plot([], tr_loss, ax=ax2, lw=3, labels=["RMSE. lr: %.3f" % cf_a.lr, "epoch", "RMSE"], legend=["train"]) gl.plot([], val_loss, lw=3, legend=["validation"], loc=3) gl.set_fontSizes(ax=[ax1, ax2], title=20, xlabel=20, ylabel=20, legend=20, xticks=12, yticks=12) # Set final properties and save figure gl.subplots_adjust(left=.09, bottom=.10, right=.90, top=.95, wspace=.30, hspace=0.30) gl.savefig(video_fotograms_folder + '%i.png' % epoch_i, dpi=100, sizeInches=[14, 10], close=True, bbox_inches=None)
def plot_evolution_RMSE(tr_loss, val_loss, cf_a, folder_images): gl.init_figure() ax1 = gl.plot([], tr_loss, lw = 3, labels = ["RMSE loss and parameters. Learning rate: %.3f"%cf_a.lr, "","RMSE"], legend = ["train"]) gl.plot([], val_loss, lw = 3, legend = ["validation"]) gl.set_fontSizes(ax = [ax1], title = 20, xlabel = 20, ylabel = 20, legend = 20, xticks = 12, yticks = 12) gl.savefig(folder_images +'Training_Example_Parameters.png', dpi = 100, sizeInches = [14, 7])
def plot_BollingerBands(self, new_figure=0, L=21): if (new_figure == 1): self.new_plot(title="Bollinger Bands", xlabel="time", ylabel="Close") SMA = self.get_SMA(L=L) BB = self.get_BollingerBand(L=L) self.plot_timeSeries() gl.plot(self.dates, SMA + BB, legend=["SMA + BB"], nf=0) gl.plot(self.dates, SMA - BB, legend=["SMA - BB"], nf=0)
def plot_BollingerBands(self, new_figure = 0, L = 21): if (new_figure == 1): self.new_plot(title = "Bollinger Bands", xlabel = "time", ylabel = "Close") SMA = self.get_SMA(L = L) BB = self.get_BollingerBand(L = L) self.plot_timeSeries() gl.plot(self.dates,SMA + BB, legend = ["SMA + BB"], nf = 0) gl.plot(self.dates,SMA - BB, legend = ["SMA - BB"], nf = 0)
def plot_timeSeriesCumReturn(self, nf=1): dates = self.dates timeSeries = self.get_timeSeriesCumReturn() gl.plot(dates, timeSeries, nf=nf, labels=[ self.symbol + "(" + str(self.period) + ")", "Time (" + str(self.period) + ")", "CumReturn Prices" ], legend=self.seriesNames)
def IFE_a(self, year_start=1996, year_finish=2016, window=10): ## Basic, just look at the bloody graphs self.pf.set_interval(dt.datetime(year_start, 1, 1), dt.datetime(year_finish, 1, 1)) dates = self.get_dates() prices = self.pf.get_timeSeries(self.period) returns = self.get_Returns() # print returns.shape gl.plot( dates, prices, labels=["Monthly price of Symbols", "Time (years)", "Price (dolar)"], legend=self.pf.symbols.keys(), loc=2) gl.savefig(folder_images + 'pricesAll.png', dpi=150, sizeInches=[2 * 8, 1.5 * 6]) gl.plot( dates, returns, labels=["Monthly return of the Symbols", "Time (years)", "Return (%)"], legend=self.pf.symbols.keys()) gl.savefig(folder_images + 'returnsAll.png', dpi=150, sizeInches=[2 * 8, 1.5 * 6]) ## Distribution obtaining gl.set_subplots(2, 2) for i in range(4): gl.histogram(returns[:, i], labels=[self.symbol_names[i]]) gl.savefig(folder_images + 'returnDistribution.png', dpi=150, sizeInches=[2 * 8, 1.5 * 6]) ############## Posible Transformations ################## ws = [3, 4, 6, 8] gl.set_subplots(2, 2) for w in ws: means, ranges = bMl.get_meanRange(prices[:, 1], w) gl.scatter(means, ranges, lw=4, labels=["", "mean", "range"], legend=["w = %i" % (w)]) gl.savefig(folder_images + 'rangeMean.png', dpi=150, sizeInches=[2 * 8, 1.5 * 6])
def plot_single_trials(Nclasses, X_train, y_train, n_trials_to_show = 2 , colors = ["r","k"]): gl.plot([0],[0]) for i in range(Nclasses): X_train_class_i = [ X_train[j] for j in np.where(np.array(y_train) == i)[0]] max_val = 0 if (i >= 1): max_val += np.max(np.abs(X_train_class_i[i-1])) + np.max(np.abs(X_train_class_i[i])) for ntr in range (n_trials_to_show): gl.plot([], X_train_class_i[ntr] + max_val, color = colors[i], nf = 0, labels = ["Some trials evolution","time","signal"])
def plot_allocations(self,allocations, labels = ['Porfolios', "Risk (std)", "Return"], legend = ["Portfolios"], lw = 5, alpha = 1.0, color = None, nf = 1): ## Given a set of allocations, this function # plots them into a graph. returns, risks = self.compute_allocations(allocations) ## Scatter the random portfolios gl.plot(risks, returns,labels = labels, legend = legend, nf = nf, lw = lw, alpha = alpha, color = color)
def plot_learnt_function(X_data_tr, Y_data_tr, X_data_val, Y_data_val, x_grid, y_grid, cf_a, folder_images): gl.init_figure() ax1 = gl.scatter(X_data_tr, Y_data_tr, lw = 3,legend = ["tr points"], labels = ["Data", "X","Y"], alpha = 0.2) ax2 = gl.scatter(X_data_val, Y_data_val, lw = 3,legend = ["val points"], alpha = 0.2) gl.set_fontSizes(ax = [ax1,ax2], title = 20, xlabel = 20, ylabel = 20, legend = 20, xticks = 12, yticks = 12) gl.plot (x_grid, y_grid, legend = ["training line"]) gl.savefig(folder_images +'Training_Example_Data.png', dpi = 100, sizeInches = [14, 4])
def plot_timeSeries(self, nf=1, na=0): dates = self.dates timeSeries = self.get_timeSeries() gl.plot(dates, timeSeries, nf=nf, labels=[ self.symbol + "(" + str(self.period) + ")", "Time (" + str(self.period) + ")", "Prices" ], legend=self.seriesNames, na=na)
def plot_data_RNN_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid,all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey,color_mean, color_truth, ax1,ax2): """ This function plots the outputs of the Regression model for the 1D example """ ## Compute mean and std of regression std_samples_grid = np.std(all_y_grid, axis = 1) mean_samples_grid = np.mean(all_y_grid, axis = 1) ############## ax1: Data + Mostlikely + Real + Mean !! ######################## # gl.scatter(X_data_tr, Y_data_tr, ax = ax1, lw = 3, #legend = ["tr points"], # labels = ["Data and predictions", "","Y"], alpha = alpha_points, color = color_points_train) gl.plot([], x_grid.T, ax = ax1, lw = 2, ls = "--", #legend = ["val points"], alpha = 0.8, color = color_points_train) # gl.plot (xgrid_real_func, ygrid_real_func, ax = ax1, alpha = 0.90, color = color_truth, legend = ["Truth"]) gl.plot ([], most_likely_ygrid.T, ax = ax1, alpha = 0.90, color = color_most_likey, legend = ["Most likely"], ls = "--", lw = 2) # gl.plot ([], mean_samples_grid.T, ax = ax1, alpha = 0.90, color = color_mean, legend = ["Posterior mean"], # AxesStyle = "Normal - No xaxis") ############## ax2: Data + Realizations of the function !! ###################### gl.plot([], x_grid.T, ax = ax2, lw = 2, ls = "--", #legend = ["val points"], alpha = 0.8, color = color_points_val) for y_grid_sample in all_y_grid: gl.plot([], y_grid_sample.T, ax = ax2, lw = 2, ls = "--", #legend = ["val points"], alpha = 0.3, color = "k")
def plot_decomposition(timeSeries): trend, seasonal, residual = tsa.seasonal_decompose(timeSeries) gl.set_subplots(4, 1) gl.plot([], timeSeries, labels=["", "time", "Original"], legend=['Original'], loc="best") gl.plot([], trend, labels=["", "time", "trend"], legend=['trend'], loc="best") gl.plot([], seasonal, labels=["", "time", "seasonal"], legend=['seasonal'], loc="best") gl.plot([], residual, labels=["", "time", "residual"], legend=['residual'], loc="best")
def plot_TrCrMr(self): ## Plots the Three deadly cross thingy. timeSeries = self.get_timeSeries() TrCrMr = self.get_TrCrMr() labels = ["Trio de la Muerte","Time","Price"] gl.plot(self.dates,timeSeries, labels = labels, legend = ["Price"], color = "k",nf = 1) gl.plot(self.dates,TrCrMr, labels = labels, legend = ["Trio de la Muerte"], color = "b",nf = 0)
def IFE_b(self,year_start = 1996, year_finish = 2016, window = 10): ## Question b of the asqued thing all_returns = [] all_covMatrices = [] all_dates = [] # To store the dates of the estimation for year_test in range(year_start,year_finish - window + 1): # +1 !! # Set the dates self.pf.set_interval(dt.datetime(year_test,1,1),dt.datetime(year_test + window,1,1)) ret = self.yearly_Return(self.get_MeanReturns()) covMat = self.yearly_covMatrix(self.get_covMatrix()) all_covMatrices.append(covMat) all_returns.append(ret) # Get the dates from any of the symbols of the portfolio dates = self.get_dates() all_dates.append(dates[-1]) ## Plotting the returns all_returns = np.array(all_returns) # gl.plot(all_dates, all_returns[:,0], # labels = ["Returns", "Time", "Return"], # legend = [self.pf.symbols.keys()[0]]) # # gl.plot(all_dates, all_returns[:,1], # legend = [self.pf.symbols.keys()[1]], nf = 0, na = 0) ## 1) Plot the returns of all of them together for the eleven windows gl.plot(all_dates, all_returns, labels = ["Average Return in 10 years", "Time (years)", "Anual return of Assets"], legend = self.symbol_names) gl.savefig(folder_images +'returnsAveAll.png', dpi = 150, sizeInches = [2*8, 1.5*6]) ## 2) Plot the covariance matrix for 9 years gl.set_subplots(2,3) for i in range(6): gl.bar_3D(self.symbol_names, self.symbol_names, all_covMatrices[i], labels = [str(year_start +window+i),"",""], fontsize = 30, fontsizeA = 19) gl.savefig(folder_images +'covsAveAll.png', dpi = 80, sizeInches = [4*8, 3*6])
def marketTiming(self,returns = [], ind_ret = [], mode = "Treynor-Mazuy"): # Investigate if the model is good. # We put a cuatric term of the error. returns = ul.fnp(returns) ind_ret = ul.fnp(ind_ret) if (returns.size == 0): returns = self.get_PortfolioReturn() if (ind_ret.size == 0): ind_ret = self.get_indexReturns() # Instead of fitting a line, we fit a parabola, to try to see # if we do better than the market return. If when Rm is higher, we have # higher beta, and if when Rm is lower, we have lower beta. So higher # and lowr return fitting a curve, cuatric, gl.scatter(ind_ret, returns, labels = ["Treynor-Mazuy", "Index Return", "Portfolio Return"], legend = ["Returns"]) ## Linear regression: Xres = ind_ret coeffs = bMl.get_linearRef(Xres, returns) Npoints = 10000 x_grid = np.array(range(Npoints))/float(Npoints) x_grid = x_grid*(max(ind_ret) - min(ind_ret)) + min(ind_ret) x_grid = x_grid.reshape(Npoints,1) x_grid_2 = np.concatenate((np.ones((Npoints,1)),x_grid), axis = 1) y_grid = x_grid_2.dot(np.array(coeffs)) gl.plot(x_grid, y_grid, legend = ["Linear Regression"], nf = 0) Xres = np.concatenate((ind_ret,np.power(ind_ret,2)),axis = 1) coeffs = bMl.get_linearRef(Xres, returns) x_grid_2 = np.concatenate((np.ones((Npoints,1)),x_grid,np.power(x_grid,2).reshape(Npoints,1) ),axis = 1) y_grid = x_grid_2.dot(np.array(coeffs)) # print y_grid.shape gl.plot(x_grid, y_grid, legend = ["Quadratic Regression"], nf = 0) print coeffs return 1
def plot_data_RNN_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2): """ This function plots the outputs of the Regression model for the 1D example """ ## Compute mean and std of regression std_samples_grid = np.std(all_y_grid, axis=1) mean_samples_grid = np.mean(all_y_grid, axis=1) ############## ax1: Data + Mostlikely + Real + Mean !! ######################## # gl.scatter(X_data_tr, Y_data_tr, ax = ax1, lw = 3, #legend = ["tr points"], # labels = ["Data and predictions", "","Y"], alpha = alpha_points, color = color_points_train) gl.plot( [], x_grid.T, ax=ax1, lw=2, ls="--", #legend = ["val points"], alpha=0.8, color=color_points_train) # gl.plot (xgrid_real_func, ygrid_real_func, ax = ax1, alpha = 0.90, color = color_truth, legend = ["Truth"]) gl.plot([], most_likely_ygrid.T, ax=ax1, alpha=0.90, color=color_most_likey, legend=["Most likely"], ls="--", lw=2) # gl.plot ([], mean_samples_grid.T, ax = ax1, alpha = 0.90, color = color_mean, legend = ["Posterior mean"], # AxesStyle = "Normal - No xaxis") ############## ax2: Data + Realizations of the function !! ###################### gl.plot( [], x_grid.T, ax=ax2, lw=2, ls="--", #legend = ["val points"], alpha=0.8, color=color_points_val) for y_grid_sample in all_y_grid: gl.plot( [], y_grid_sample.T, ax=ax2, lw=2, ls="--", #legend = ["val points"], alpha=0.3, color="k")
def print_chain(nu): x_values, y_values = get_xy_chain_values(nu) lambda_0 = get_costate_value(nu_values,0) ax1 = gl.plot(x_values, y_values, lw = 3, labels = [" Suspended chains for different number of elements N", "z","y"], legend = ["N = %i"%(N) + ". $\lambda_0 = [%.3f,%.3f]$"%(lambda_0[0],lambda_0[1]) ], AxesStyle = "Normal ") return ax1
def plot_TrCrMr(self): ## Plots the Three deadly cross thingy. timeSeries = self.get_timeSeries() TrCrMr = self.get_TrCrMr() labels = ["Trio de la Muerte", "Time", "Price"] gl.plot(self.dates, timeSeries, labels=labels, legend=["Price"], color="k", nf=1) gl.plot(self.dates, TrCrMr, labels=labels, legend=["Trio de la Muerte"], color="b", nf=0)
def plot_allocations(self, allocations, labels=['Porfolios', "Risk (std)", "Return"], legend=["Portfolios"], lw=5, alpha=1.0, color=None, nf=1): ## Given a set of allocations, this function # plots them into a graph. returns, risks = self.compute_allocations(allocations) ## Scatter the random portfolios gl.plot(risks, returns, labels=labels, legend=legend, nf=nf, lw=lw, alpha=alpha, color=color)
def IFE_a(self, year_start = 1996, year_finish = 2016, window = 10): ## Basic, just look at the bloody graphs self.pf.set_interval(dt.datetime(year_start,1,1),dt.datetime(year_finish,1,1)) dates = self.get_dates() prices = self.pf.get_timeSeries(self.period) returns = self.get_Returns() # print returns.shape gl.plot(dates, prices, labels = ["Monthly price of Symbols", "Time (years)", "Price (dolar)"], legend = self.pf.symbols.keys(), loc = 2) gl.savefig(folder_images +'pricesAll.png', dpi = 150, sizeInches = [2*8, 1.5*6]) gl.plot(dates, returns, labels = ["Monthly return of the Symbols", "Time (years)", "Return (%)"], legend = self.pf.symbols.keys()) gl.savefig(folder_images +'returnsAll.png', dpi = 150, sizeInches = [2*8, 1.5*6]) ## Distribution obtaining gl.set_subplots(2,2) for i in range(4): gl.histogram(returns[:,i], labels = [self.symbol_names[i]]) gl.savefig(folder_images +'returnDistribution.png', dpi = 150, sizeInches = [2*8, 1.5*6]) ############## Posible Transformations ################## ws = [3, 4, 6, 8] gl.set_subplots(2,2) for w in ws: means, ranges = bMl.get_meanRange(prices[:,1], w) gl.scatter(means, ranges, lw = 4, labels = ["", "mean","range"], legend = ["w = %i" %(w)]) gl.savefig(folder_images +'rangeMean.png', dpi = 150, sizeInches = [2*8, 1.5*6])
def plot_single_trials(Nclasses, X_train, y_train, n_trials_to_show=2, colors=["r", "k"]): gl.plot([0], [0]) for i in range(Nclasses): X_train_class_i = [ X_train[j] for j in np.where(np.array(y_train) == i)[0] ] max_val = 0 if (i >= 1): max_val += np.max(np.abs(X_train_class_i[i - 1])) + np.max( np.abs(X_train_class_i[i])) for ntr in range(n_trials_to_show): gl.plot([], X_train_class_i[ntr] + max_val, color=colors[i], nf=0, labels=["Some trials evolution", "time", "signal"])
def plot_means(Nclasses, X_train, y_train, colors=["r", "k"], normalize=True): print Nclasses ## Get the time average profile of every label. # For every label, we average across time to get the time profile. # We kind of should assume that the trials are somewhat time-aligned # X_data_ave = dp.get_timeSeries_average_by_label(X_All_labels, channel_sel = channel_sel) X_data_ave = dp.get_average_from_train(Nclasses, X_train, y_train, normalize=normalize) # Evolution of the means of each class in time in time representation gl.plot([0], [0]) for i in range(1): max_val = 0 if (i >= 1): max_val += np.max(np.abs(X_data_ave[i - 1])) + np.max( np.abs(X_data_ave[i])) gl.plot( [], X_data_ave[i] + max_val, color=colors[i], nf=0, labels=["Mean value of the 70 Channels", "Time Index", "Channels"]) # Evolution of the means of each class in time in Spherical representation gl.scatter_3D(0, 0, 0, nf=1, na=0) for i in range(Nclasses): gl.scatter_3D(X_data_ave[i][:, 0], X_data_ave[i][:, 1], X_data_ave[i][:, 2], color=colors[i], nf=0, na=0, labels=[ "Mean Time Evolution the different classes", "D1", "D2", "D3" ])
def print_chain(nu): s_values, x_s_values = get_values(nu) x_s_values = np.array(x_s_values) lambda_0 = get_costate_value(nu_values, 0) ax1 = gl.plot( x_s_values[:, 0], x_s_values[:, 1], lw=3, labels=["Continuous version of the suspended chain", "z", "y"], legend=["$\lambda_0 = [%.3f,%.3f]$" % (lambda_0[0], lambda_0[1])], AxesStyle="Normal ") return ax1
def plot_evolution_RMSE(tr_loss, val_loss, cf_a, folder_images): gl.init_figure() ax1 = gl.plot([], tr_loss, lw=3, labels=[ "RMSE loss and parameters. Learning rate: %.3f" % cf_a.lr, "", "RMSE" ], legend=["train"]) gl.plot([], val_loss, lw=3, legend=["validation"]) gl.set_fontSizes(ax=[ax1], title=20, xlabel=20, ylabel=20, legend=20, xticks=12, yticks=12) gl.savefig(folder_images + 'Training_Example_Parameters.png', dpi=100, sizeInches=[14, 7])
def update_data(information): time,data = information.time, information.data ## Read data to update !! information.serial.flush() data.append(float(information.serial.readline().decode("utf-8").split("\n")[0])) time.append(update_data.index) update_data.index += 1; window = 100 start = max([update_data.index - window, 0]) print (start, data[-1]) # option 2, remove all lines and collections for artist in plt.gca().lines + plt.gca().collections: artist.remove() gl.plot(np.array(time)[start:update_data.index], np.array(data)[start:update_data.index], labels = ["Sensors values", "time (s)", "Temperature"], color = "k", ax = data_axes); gl.set_zoom(xlimPad = [0.2,0.2] ,ylimPad = [0.1, 0.1])
def plot_reconstruction_data(Xtrain_sample_cpu, Xtrain_reconstruction, Xtrain_reconstruction_samples, ax1, ax2): title = "Original sample and reconstructions" x = [] alpha = 0.9 cumulated_samples = 0 gl.plot(x, Xtrain_sample_cpu, ax=ax1, legend=["Original"], color="k", alpha=alpha, labels=[title, "", r"Rate"], AxesStyle="Normal - No xaxis") gl.plot(x, Xtrain_reconstruction, ax=ax1, legend=["Reconstruction"], color="b", alpha=alpha, labels=[title, "", r"Rate"], AxesStyle="Normal - No xaxis") ### SEVERAL OF THE transformations ###### gl.plot(x, Xtrain_sample_cpu, ax=ax2, legend=["Reconstruction"], color="k", alpha=alpha, labels=[title, "", r"Rate"], AxesStyle="Normal - No xaxis") alpha = 0.2 gl.plot(x, Xtrain_reconstruction_samples, ax=ax2, legend=[], color="b", alpha=alpha, labels=[title, "", r"Rate"], AxesStyle="Normal - No xaxis")
def plots_weights_layer(mu_W, sigma_W,sigma_b, mu_b, ax1, ax2, legend_layer, plot_pdf = 1): """ Plot the given weights of the layer """ # For each of the weights we plot them !! color = gl.get_color(None) if (plot_pdf): for i in range(sigma_W.size): x_grid, y_val = bMA.gaussian1D_points(mean = mu_W[i], std = sigma_W[i], std_K = 3) gl.plot(x_grid, y_val, ax = ax1, fill = 1, alpha = 0.15, color = color, labels = ["Bayesian weights","","p(w)"],alpha_line = 0.15 # ,legend = ["W:%i"%(i+1)] ) ###legend = ["M: %.2e, std: %.2e"%(mu_W2[i], sigma_W2[i])]) gl.scatter( mu_W, sigma_W, ax = ax2, labels = ["",r"$\mu_w$",r"$\sigma_w$"], color = color, legend = legend_layer, alpha = 0.3) if (plot_pdf): for i in range(sigma_b.size): x_grid, y_val = bMA.gaussian1D_points(mean = mu_b[i], std = sigma_b[i], std_K = 3) # color = gl.get_color(None) gl.plot(x_grid, y_val, ax = ax1, color = color, fill = 1, alpha = 0.3, alpha_line = 0.15, AxesStyle = "Normal - No xaxis", ls = "--" # ,legend = ["b:%i"%(i+1)] ) ###legend = ["M: %.2e, std: %.2e"%(mu_W2[i], sigma_W2[i])]) gl.scatter(mu_b, sigma_b, ax = ax2, color = color, marker = "s", alpha = 0.3)
def plot_corrab(self, symbol, nf = 1): # This function plots the returns of a symbol compared # to the index, and computes the regresion and correlation parameters. index = self.Sindex # The index sym_ret = self.pf.symbols[symbol].TDs[self.period].get_timeSeriesReturn() ind_ret = self.get_indexReturns() # Mean and covariance data = np.concatenate((sym_ret,ind_ret),axis = 1) means = np.mean(data, axis = 0) cov = np.cov(data) # Regression coeffs = bMl.get_linearRef(ind_ret, sym_ret) gl.scatter(ind_ret, sym_ret, labels = ["Gaussianity study", "Index: " + self.Sindex,symbol], legend = ["Returns"], nf = nf) ## Linear regression: Xres = ind_ret coeffs = bMl.get_linearRef(Xres, sym_ret) Npoints = 10000 x_grid = np.array(range(Npoints))/float(Npoints) x_grid = x_grid*(max(ind_ret) - min(ind_ret)) + min(ind_ret) x_grid = x_grid.reshape(Npoints,1) x_grid_2 = np.concatenate((np.ones((Npoints,1)),x_grid), axis = 1) y_grid = x_grid_2.dot(np.array(coeffs)) gl.plot(x_grid, y_grid, legend = ["b: %.2f ,a: %.2f" % (coeffs[1], coeffs[0])], nf = 0)
def print_half_chain(nu_z): x_values, y_values = get_xy_half_chain_values(nu_z) lambda_0 = get_costate_value_half(nu_values, 0) ax1 = gl.plot( x_values, y_values, lw=3, labels=[ " Half Suspended chains for different number of elements N", "z", "y" ], legend=[ "N = %i" % (N) + ". $\lambda_0 = [%.3f,%.3f]$" % (lambda_0[0], lambda_0[1]) ], AxesStyle="Normal ")
def print_chain(lambda_0): x_values, y_values = get_xy_chain_values(lambda_0) lambda_0 = get_costate_value(lambda_0, 0) ax1 = gl.plot( x_values, y_values, lw=3, labels=[ " Suspended chains for different number of elements N (Pontryagins)", "z", "y" ], legend=[ "N = %i" % (N) + ". $\lambda_0 = [%.3f,%.3f]$" % (lambda_0[0], lambda_0[1]) ], AxesStyle="Normal ") return ax1
def plot_decomposition(timeSeries): trend, seasonal, residual = tsa.seasonal_decompose(timeSeries) gl.set_subplots(4,1) gl.plot([], timeSeries, labels = ["", "time", "Original"], legend = ['Original'], loc = "best") gl.plot([], trend, labels = ["", "time", "trend"], legend = ['trend'], loc = "best") gl.plot([], seasonal, labels = ["", "time", "seasonal"], legend = ['seasonal'], loc = "best") gl.plot([], residual, labels = ["", "time", "residual"], legend = ['residual'], loc = "best")
def plot_compound_understanding(): years = 50 # Total duration of the investment years = range(1, years + 1) frequencies = [0.1, 0.5, 1.0, 2, 12, 52, 365] R = 0.05 # Annual Rate retruns = [] flag_plot = 1 for freq in frequencies: returns_i = [] for year in years: returns_i.append(compound_interest(R / freq, year * freq)) gl.plot(years, returns_i, labels=["Bond compound Return", "Years", "Return"], legend=["Frequency %f" % freq], nf=flag_plot, loc=2) flag_plot = 0 # We also plot the one with simple interest returns_i = [] for year in years: returns_i.append(simple_interest(R / freq, year * freq)) gl.plot(years, returns_i, labels=["Bond compound Return", "Years", "Return"], legend=["Simple interest"], lw=5, nf=flag_plot, loc=2) # We also plot the one with spot interest returns_i = [] for year in years: returns_i.append(spot_interest(R / freq, year * freq)) gl.plot(years, returns_i, legend=["Spot interest"], lw=5, nf=flag_plot, loc=2)
def plot_compound_understanding(): years = 50 # Total duration of the investment years = range(1,years+1) frequencies = [0.1, 0.5, 1.0, 2, 12, 52, 365] R = 0.05 # Annual Rate retruns = [] flag_plot = 1 for freq in frequencies: returns_i = [] for year in years: returns_i.append(compound_interest(R/freq,year*freq)) gl.plot(years,returns_i, labels = ["Bond compound Return", "Years","Return"], legend = ["Frequency %f" % freq], nf = flag_plot, loc = 2) flag_plot = 0 # We also plot the one with simple interest returns_i = [] for year in years: returns_i.append(simple_interest(R/freq,year*freq)) gl.plot(years,returns_i, labels = ["Bond compound Return", "Years","Return"], legend = ["Simple interest"], lw = 5, nf = flag_plot, loc = 2) # We also plot the one with spot interest returns_i = [] for year in years: returns_i.append(spot_interest(R/freq,year*freq)) gl.plot(years,returns_i, legend = ["Spot interest"], lw = 5, nf = flag_plot, loc = 2) ## The limit is the Spot Rate !! When we can do it continuously ## In this case the return is e^(RT)
def plot_data_regression_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2): """ This function plots the outputs of the Regression model for the 1D example """ ## Compute mean and std of regression std_samples_grid = np.std(all_y_grid, axis=1) mean_samples_grid = np.mean(all_y_grid, axis=1) ############## ax1: Data + Mostlikely + Real + Mean !! ######################## if (type(ax1) != type(None)): gl.scatter( X_data_tr, Y_data_tr, ax=ax1, lw=3, #legend = ["tr points"], labels=["Data and predictions", "", "Y"], alpha=alpha_points, color=color_points_train) gl.scatter( X_data_val, Y_data_val, ax=ax1, lw=3, #legend = ["val points"], alpha=alpha_points, color=color_points_val) gl.plot(xgrid_real_func, ygrid_real_func, ax=ax1, alpha=0.90, color=color_truth, legend=["Truth"]) gl.plot(x_grid, most_likely_ygrid, ax=ax1, alpha=0.90, color=color_most_likey, legend=["Most likely"]) gl.plot(x_grid, mean_samples_grid, ax=ax1, alpha=0.90, color=color_mean, legend=["Posterior mean"], AxesStyle="Normal - No xaxis") ############## ax2: Data + Realizations of the function !! ###################### if (type(ax2) != type(None)): gl.scatter( X_data_tr, Y_data_tr, ax=ax2, lw=3, # legend = ["tr points"], labels=["", "X", "Y"], alpha=alpha_points, color=color_points_train) gl.scatter( X_data_val, Y_data_val, ax=ax2, lw=3, # legend = ["val points"], alpha=alpha_points, color=color_points_val) gl.plot(x_grid, all_y_grid, ax=ax2, alpha=0.15, color="k") gl.plot(x_grid, mean_samples_grid, ax=ax2, alpha=0.90, color="b", legend=["Mean realization"]) gl.set_zoom(xlimPad=[0.2, 0.2], ylimPad=[0.2, 0.2], ax=ax2, X=X_data_tr, Y=Y_data_tr)
def create_Bayesian_analysis_charts_simplified(model, train_dataset, validation_dataset, tr_loss, val_loss, KL_loss, folder_images, epoch_i=None): # Configurations of the plots alpha_points = 0.2 color_points_train = "dark navy blue" color_points_val = "amber" color_train_loss = "cobalt blue" color_val_loss = "blood" color_truth = "k" color_mean = "b" color_most_likey = "y" ################################ Divide in plots ############################## gl.init_figure() ax1 = gl.subplot2grid((6, 3), (0, 0), rowspan=3, colspan=1) ax2 = gl.subplot2grid((6, 3), (3, 0), rowspan=3, colspan=1, sharex=ax1, sharey=ax1) ax3 = gl.subplot2grid((6, 3), (0, 1), rowspan=2, colspan=1) ax4 = gl.subplot2grid((6, 3), (2, 1), rowspan=2, colspan=1, sharex=ax3) ax5 = gl.subplot2grid((6, 3), (4, 1), rowspan=2, colspan=1, sharex=ax3) ax6 = gl.subplot2grid((6, 3), (0, 2), rowspan=3, colspan=1) ax7 = gl.subplot2grid((6, 3), (3, 2), rowspan=3, colspan=1, sharex=ax6) ####### ax1, ax2: Get confusion matrices ########## labels_classes, confusion = model.get_confusion_matrix(train_dataset) plot_confusion_matrix(confusion, labels_classes, ax1) labels_classes, confusion = model.get_confusion_matrix(validation_dataset) plot_confusion_matrix(confusion, labels_classes, ax2) ############## ax3 ax4 ax5: Loss Evolution !! ###################### ## ax3: Evolutoin of the data loss gl.plot([], tr_loss, ax=ax3, lw=3, labels=["Losses", "", "Data loss (MSE)"], legend=["train"], color=color_train_loss) gl.plot([], val_loss, ax=ax3, lw=3, legend=["validation"], color=color_val_loss, AxesStyle="Normal - No xaxis") ## ax4: The evolution of the KL loss gl.plot([], KL_loss, ax=ax4, lw=3, labels=["", "", "KL loss"], legend=["Bayesian Weights"], AxesStyle="Normal - No xaxis", color="k") ## ax5: Evolutoin of the total loss gl.plot([], tr_loss, ax=ax5, lw=3, labels=["", "epoch", "Total Loss (Bayes)"], legend=["train"], color=color_train_loss) gl.plot([], val_loss, ax=ax5, lw=3, legend=["validation"], color=color_val_loss) ############## ax6 ax7: Variational Weights !! ###################### create_plot_variational_weights(model, ax6, ax7) gl.set_zoom(ax=ax6, ylim=[-0.1, 10]) gl.set_zoom(ax=ax7, xlim=[-2.5, 2.5], ylim=[-0.1, 0.5]) # Set final properties and save figure gl.set_fontSizes(ax=[ax1, ax2, ax3, ax4, ax5, ax6, ax7], title=20, xlabel=20, ylabel=20, legend=10, xticks=12, yticks=12) gl.subplots_adjust(left=.09, bottom=.10, right=.90, top=.95, wspace=.30, hspace=0.10) if (type(epoch_i) == type(None)): gl.savefig(folder_images + 'Training_Example_Data_Bayesian.png', dpi=100, sizeInches=[20, 10]) else: gl.savefig(folder_images + '%i.png' % epoch_i, dpi=100, sizeInches=[20, 10], close=True, bbox_inches="tight")
# Create the t- values tgrid = np.linspace(t0,tf, int(float(tf-t0)/delta_t)) tgrid = tgrid.reshape(tgrid.size,1) N = tgrid.size # Create the signal X = mean_function(tgrid, f1 = 1, f2 = 5, a1 = 0.4, a2 = 0.1, phi2 = 2*np.pi/7, m = 0.1 ) if (plot_mean_signal and plot_flag): ## Plot the orginal function gl.scatter(tgrid,X, lw = 1, alpha = 0.9, color = "k", nf = 1, labels = ["The true determinist signal mu(t)", "t", "mu(t)" ]) gl.plot(tgrid,X, lw = 2, color = "k", ls = "--", legend = ["True signal"]) gl.set_fontSizes( title = 20, xlabel = 20, ylabel = 20, legend = 20, xticks = 20, yticks = 20) gl.savefig(folder_images +'GP_mean.png', dpi = 100, sizeInches = [2*8, 2*2]) ########################################################################### ############### Generate the structural noise ############################# ########################################################################### """ Now we generate the stocastic process that we add to X(t), generating noisy signal Y(t) = X(t) + e(t) Where we will assume e(t) is Gaussian with mean 0 e(t) \sim N(0,\sigma_t) So we have a Gaussian process, since each set of samples forms a jointly gaussian distribution. The relation between the noises will be given by the covariance matrix C. This will tell how big the noises are and how they relate
gl.plot_timeSeriesRange(dates, EstimatedPrice,sigmaXhatList, legend = ["Estate"], nf = 0) # Plot the one Step prediction dates_OneStepPred = ul.fnp(range(0, dates.size + 1)) gl.plot_timeSeriesRange(dates_OneStepPred, PredictedPrice[:,:],sigmaXpredList[:,:], legend = ["Prediction"], nf = 0) ## Plot the future prediction dates_test = ul.fnp(range(dates.size, dates.size + Ntest)) gl.plot_timeSeriesRange(dates_test, PredictedPriceTest[:-1,:],sigmaXpredtestList[:-1,:], legend = ["Prediction future"], nf = 0) # Using the state formulate, plot the prediction line from each state point. for n in range (0,Ns): start_point = XhatList[:,[n]] end_point = A.dot(start_point) gl.plot([dates_OneStepPred[n], dates_OneStepPred[n+1]],[start_point[0,0], end_point[0,0]], nf = 0, color = "k") ########################################################################### # TODO: Obtener las mejores componented de una serie, hacemos PCA y detransformamos ? # Utiliar forma de relacionar las time series para crear muchos puntos que luego el GP # obtenga la senal inicial ? # Usar la piFilter library y los processos gaussianos de sklearn # TODO: Use this df["date"] = pd.to_datetime(df.index) # Perform module for linear regression as in Time Series. # Estimation and Prediction of Variance and True Value. # Use ARMA models. # Gaussian Process # HP filtering # Normal Averages and BB. # Kalman Filter
### Set the initial parameters theta_init = None model_theta_init = None ############# PERFORM THE EM ############# logl,theta_list,model_theta_list = myEM.fit(Xdata, model_theta_init = model_theta_init, theta_init = theta_init) spf.print_final_clusters(myDManager,clusters_relation, theta_list[-1], model_theta_list[-1]) ####################################################################################################################### #### Plot the evolution of the centroids likelihood ! ##################################################### ####################################################################################################################### gl.init_figure() gl.plot(range(1,np.array(logl).flatten()[1:].size +1),np.array(logl).flatten()[1:], legend = ["EM LogLikelihood"], labels = ["Convergence of LL with generated data","Iterations","LL"], lw = 2) gl.savefig(folder_images +'Likelihood_Evolution. K_G:'+str(K_G)+ ', K_W:' + str(K_W) + ', K_vMF:' + str(K_vMF)+ '.png', dpi = 100, sizeInches = [12, 6]) if(perform_HMM_after_EM): Ninit = 1 ############# Create the EM object and fit the data to it. ############# clusters_relation = "MarkovChain1" # MarkovChain1 independent myEM = CEM.CEM( distribution = myDManager, clusters_relation = clusters_relation, T = T, Ninit = Ninit, delta_ll = delta_ll, verbose = verbose, time_profiling = time_profiling) if(0): theta_init = theta_list[-1] A_init = np.concatenate([model_theta_list[-1][0] for k in range(K)], axis = 0)
nfolds=5) if (i == 0): ll_train_best, ll_test_best, All_Ks_params_best = ll_train, ll_test, All_Ks_params else: for K_i in range(len(Klusters)): for ic in range(Nclasses): if (ll_train[ic, K_i] > ll_train_best[ic, K_i]): ll_train_best[ic, K_i] = copy.deepcopy(ll_train[ic, K_i]) ll_test_best[ic, K_i] = copy.deepcopy(ll_test[ic, K_i]) All_Ks_params_best[K_i] = copy.deepcopy( All_Ks_params[K_i]) for ic in range(Nclasses): gl.plot(Klusters, np.array([ll_train_best[ic], ll_test_best[ic]]).T, legend=["tr", "Val"], labels=["EM class = " + str(ic), "States", "loglike"]) gl.savefig( file_dir="./OnePerson_5fold_cluster" + str(ic) + "/Iteration" + str(i) + ".png", bbox_inches='tight', sizeInches=[], # The size in inches as a list close=True, # If we close the figure once saved dpi=100 ) # Density of pixels !! Same image but more cuality ! Pixels loading_precomputed_centroids = 1 if (loading_precomputed_centroids): # pkl.store_pickle("./OnePerson1FoldEM.pkl",[ll_train_best, ll_test_best, All_Ks_params_best]) cosas = pkl.load_pickle("./OnePerson1FoldEM.pkl") class_i = 1
## Plot the loss function against the parameters !! ## Get the surface for the loss ####### PLOT THE EVOLUTION OF RMSE AND PARAMETERS ############ gl.init_figure() ax1 = gl.scatter(X_data_tr, Y_data_tr, lw = 3,legend = ["tr points"], labels = ["Data", "X","Y"]) ax2 = gl.scatter(X_data_val, Y_data_val, lw = 3,legend = ["val points"]) gl.set_fontSizes(ax = [ax1,ax2], title = 20, xlabel = 20, ylabel = 20, legend = 20, xticks = 12, yticks = 12) x_grid = np.linspace(np.min([X_data_tr]) -1, np.max([X_data_val]) +1, 100) y_grid = x_grid * W_values + b_values gl.plot (x_grid, y_grid, legend = ["training line"]) gl.savefig(folder_images +'Training_Example_Data.png', dpi = 100, sizeInches = [14, 4]) ####### PLOT THE EVOLUTION OF RMSE AND PARAMETERS ############ gl.set_subplots(2,1) ax1 = gl.plot([], tr_loss, nf = 1, lw = 3, labels = ["RMSE loss and parameters. Learning rate: %.3f"%train_config.lr, "","RMSE"], legend = ["train"]) gl.plot([], val_loss, lw = 3, legend = ["validation"]) ax2 = gl.plot([], W_list, nf = 1, lw = 3, sharex = ax1, labels = ["", "","Parameters"], legend = ["W"], color ="b") gl.plot([], b_list, lw = 3, labels = ["", "epochs","Parameters"], legend = ["b"],color ="g") gl.set_fontSizes(ax = [ax1,ax2], title = 20, xlabel = 20, ylabel = 20, legend = 20, xticks = 12, yticks = 12)
def IFE_f2(self, ObjectiveRlist=[0.003], Rf=0.0, year_start=1996, year_finish=2016, window=10): ### The official one can be done executing the exercise c with another Rf ## Just another graph to show that now we should not use all the data. # Just, choose a desired return, # Using training Samples calculate using the market line # the optimal porfolio for that. # Then calculate for the next year, the real return # for that portfolio. # Do this for several years as well. self.set_Rf(Rf) All_returns = [] All_vars = [] windowslist = range(1, 13) ObjectiveR = 0.03 for window in windowslist: PortfolioReturns = [] all_dates = [] for year_test in range(year_start, year_finish - window + 1 - 1): # +1 !! # Set the dates self.pf.set_interval(dt.datetime(year_test, 1, 1), dt.datetime(year_test + window, 1, 1)) # Obtain the market line !! w = self.TangentPortfolio(Rf=Rf) # Obtain allocation self.set_allocation(w) # Obtain the expected return and std when using all our money ! expRet, stdRet = self.get_metrics(investRf="no") param = bMl.obtain_equation_line(Rf, expRet, stdRet) bias, slope = param X = (ObjectiveR - Rf) / (expRet - Rf) wdesired = w * X self.pf.set_interval(dt.datetime(year_test + window, 1, 1), dt.datetime(year_test + window + 1, 1, 1)) self.set_allocation(wdesired) # Set the allocation expRet, stdRet = self.get_metrics( ) # Get the expected return for that year PortfolioRet = self.yearly_Return(expRet) # Get yearly returns PortfolioReturns.append(PortfolioRet) dates = self.get_dates() all_dates.append(dates[0]) All_returns.append(np.mean(PortfolioReturns)) All_vars.append(np.std(PortfolioReturns) / np.sqrt(np.sqrt(12 * 12))) # All_returns = np.array(All_returns).reshape(len(ObjectiveRlist),10) # print All_returns All_means = All_returns print All_returns # All_means = np.mean(All_returns, axis = 1) print ul.fnp(All_returns).shape print All_means # print All_means - ObjectiveRlist # All_means = np.divide((All_means - ObjectiveRlist),ObjectiveRlist) # print All_means ## Graph with the desired, the obtained returns and the returns of the index gl.bar(windowslist, All_means, labels=["Obtained returns", "Time (years)", "Return (%)"], legend=["Index Return"], alpha=0.8, nf=1) gl.plot(windowslist, All_vars, labels=["Obtained returns", "Time (years)", "Return (%)"], legend=["Index Return"], alpha=0.8, nf=0) gl.savefig(folder_images + 'best_Objective.png', dpi=150, sizeInches=[2 * 8, 2 * 6])
def IFE_f(self, ObjectiveR=0.003, Rf=0.0, year_start=1996, year_finish=2016, window=10): ### The official one can be done executing the exercise c with another Rf ## Just another graph to show that now we should not use all the data. # Just, choose a desired return, # Using training Samples calculate using the market line # the optimal porfolio for that. # Then calculate for the next year, the real return # for that portfolio. # Do this for several years as well. self.set_Rf(Rf) nf_flag = 1 All_stds = [] PortfolioReturns = [] IndexReturns = [] all_dates = [] for year_test in range(year_start, year_finish - window + 1 - 1): # +1 !! # Set the dates self.pf.set_interval(dt.datetime(year_test, 1, 1), dt.datetime(year_test + window, 1, 1)) # Obtain the market line !! w = self.TangentPortfolio(Rf=Rf) # Obtain allocation self.set_allocation(w) # Obtain the expected return and std when using all our money ! expRet, stdRet = self.get_metrics(investRf="no") param = bMl.obtain_equation_line(Rf, expRet, stdRet) bias, slope = param X = (ObjectiveR - Rf) / (expRet - Rf) wdesired = w * X ## Check that the output of this portfolio is the desired one. self.set_allocation(wdesired) # Set the allocation expRet, stdRet = self.get_metrics( ) # Get the expected return for that year # print ret ## Now that we have the desired w*X, we will calculate the resturn of ## the portfolio in the following year. # To do so, we set the dates, only to the next year, set the portfolio allocation # And calculate the yearly expected return !! # Set the dates to only the next year !! # Also, one month before in order to get the returns of the first month. self.pf.set_interval(dt.datetime(year_test + window, 1, 1), dt.datetime(year_test + window + 1, 1, 1)) self.set_allocation(wdesired) # Set the allocation expRet, stdRet = self.get_metrics( ) # Get the expected return for that year PortfolioRet = self.yearly_Return(expRet) # Get yearly returns PortfolioReturns.append(PortfolioRet) All_stds.append(self.yearly_covMatrix(stdRet)) indexRet = self.get_indexMeanReturn() indexRet = self.yearly_Return(indexRet) IndexReturns.append(indexRet) # dates = self.get_dates() all_dates.append(year_test + window + 1) ## Graph with the evolutio of the portfolio price after the assignment gl.plot(range(1, 13), np.cumsum(self.get_PortfolioReturn()), nf=nf_flag, labels=[ "Evolution of returns by month", "Months passed", "Cumulative Return" ], legend=[str(year_test + window + 1)]) nf_flag = 0 # print ret gl.savefig(folder_images + 'returnsEvolMonth.png', dpi=150, sizeInches=[2 * 8, 2 * 6]) ## Graph with the desired, the obtained returns and the returns of the index gl.bar(all_dates[:], IndexReturns, labels=["Obtained returns", "Time (years)", "Return (%)"], legend=["Index Return"], alpha=0.8, nf=1) gl.bar(all_dates[:], PortfolioReturns, labels=["Returns of year", "Year", "Value"], legend=["Porfolio Return"], alpha=0.8, nf=0) gl.scatter(all_dates[:], self.yearly_Return(ObjectiveR) * np.ones( (len(all_dates[:]), 1)), legend=["Objective Return"], nf=0) gl.scatter(all_dates[:], All_stds, legend=["Std of the portfolio return"], nf=0) gl.savefig(folder_images + 'returnsEvolYears.png', dpi=150, sizeInches=[2 * 8, 2 * 6]) #### Crazy idea !! Lets plot where the f*****g efficient frontier went nf_flag = 1 PortfolioReturns = [] IndexReturns = [] all_dates = [] gl.set_subplots(2, 3) for year_test in range(year_start, year_start + 6): # +1 !! # Set the dates self.pf.set_interval(dt.datetime(year_test, 1, 1), dt.datetime(year_test + window, 1, 1)) optimal, portfolios = self.efficient_frontier(kind="Tangent") self.plot_allocations( portfolios, labels=["Evolution of the efficient frontier"], legend=["Frontier " + str(year_test + window) + " before"], color="k", nf=1) self.pf.set_interval(dt.datetime(year_test + window, 1, 1), dt.datetime(year_test + window + 1, 1, 1)) self.set_allocation(self.TangentPortfolio(Rf=Rf)) self.plot_allocations( portfolios, legend=["Frontier " + str(year_test + window) + " after"], color="r", nf=0) gl.savefig(folder_images + 'effEvol.png', dpi=80, sizeInches=[4 * 8, 3 * 6])
Yjoint = Xjoint square_unit = square_unit cov = np.cov(Yjoint) mu = np.mean(Yjoint,axis = 1) ax1 = gl.subplot2grid((1,4), (0,0), rowspan=1, colspan=1) gl.scatter(Yjoint[0,:],Yjoint[1,:], alpha = 0.5, ax = ax1, lw = 4, AxesStyle = "Normal", labels = ["Original","", ""], color = "dark navy blue") xx, yy, zz = bMA.get_gaussian2D_pdf( xbins=40j, ybins=40j, mu = mu, cov = cov, std_K = std_K, x_grid = None) ax1.contour(xx, yy, zz, linewidths = 3, linestyles = "solid", alpha = 0.8, colors = None, zorder = 100) gl.plot(square_unit[0,:],square_unit[1,:], color = "y") ## 1 Rotation graph Yjoint = Vherm.dot(Xjoint) square_unit = Vherm.dot( square_unit) cov = np.cov(Yjoint) mu = np.mean(Yjoint,axis = 1) mu = mu.reshape(mu.size,1) ax1 = gl.subplot2grid((1,4), (0,1), rowspan=1, colspan=1, sharex = ax1, sharey = ax1) gl.scatter(Yjoint[0,:],Yjoint[1,:], alpha = 0.5, ax = ax1, lw = 4, AxesStyle = "Normal", labels = ["Rotation V","", ""], color = "dark navy blue") xx, yy, zz = bMA.get_gaussian2D_pdf( xbins=40j, ybins=40j, mu = mu, cov = cov, std_K = std_K, x_grid = None) ax1.contour(xx, yy, zz, linewidths = 3, linestyles = "solid", alpha = 0.8, colors = None, zorder = 100)
def yieldPriceStudy(self, initial_price = 80): # The initial price is for the aproximation of the # funciton with a cuadratic equation in one point. #### Obtain the yield-price curve from the structure Np = 100 ytm_list = np.linspace(0.001, 0.40, Np) prices = [] mdurations = [] convexities = [] for ytm in ytm_list: price = self.get_price(ytm = ytm) mduration = self.get_mduration(price = price) convexity = self.get_convexity(price = price) prices.append(self.get_price(ytm = ytm)) mdurations.append(mduration) convexities.append(convexity) gl.set_subplots(2,2) gl.plot(ytm_list,prices, labels = ["Yield curve", "Yield to maturity" ,"Price of Bond"], legend = ["Yield curve"], loc = 3) gl.plot(ytm_list,prices, labels = ["Duration and Yield", "Yield to maturity" ,"Price of Bond"], legend = ["Yield curve"], loc = 3) gl.plot(ytm_list,mdurations, na = 1, nf = 0, legend = ["Duration"], loc = 1) gl.plot(ytm_list,prices, labels = ["Convexity and Yield","Yield to maturity" ,"Price of Bond"], legend = ["Yield curve"], loc = 3) gl.plot(ytm_list,convexities, na = 1, nf = 0, legend = ["Convexity"], loc = 1) ### Estimation of the yield courve around a point using the ## Duration and convexity. price = initial_price ytm = self.get_ytm(price) dytmList = np.linspace(-0.10, 0.10, 100) ## Obtain estimations estimations = [] for dytm in dytmList: eprice = self.estimate_price_DC(price = price, dytm = dytm, dy = 0.01) estimations.append(eprice) ## Obtain real values rael_values = [] for dytm in dytmList: rprice = self.get_price(ytm = ytm + dytm) rael_values.append(rprice) # Calculate the error ! rael_values = ul.fnp(rael_values) estimations = ul.fnp(estimations) error = np.abs(np.power(rael_values - estimations, 1)) gl.plot(ytm_list,prices, labels = ["Convexity and Yield","Yield to maturity" ,"Price of Bond"], legend = ["Yield curve"], loc = 3, lw = 4, color = "r") gl.scatter([ytm],[price], nf = 0, legend = ["Initial price"], loc = 3, lw = 4, color = "g") gl.plot(dytmList + ytm,estimations, nf = 0, legend = ["Estimation"], loc = 3, lw = 2, color = "b") gl.plot(dytmList + ytm,error, nf = 0, na = 1, legend = ["Error"], loc = 1, lw = 2, color = "b") ## The limit is the Spot Rate !! When we can do it continuously ## In this case the return is e^(RT)
symbols = ["Amazon", "Alcoa_Inc"] periods = [15] ######## SELECT DATE LIMITS ########### sdate = dt.datetime.strptime("21-11-2016", "%d-%m-%Y") edate = dt.datetime.strptime("25-11-2016", "%d-%m-%Y") ######## CREATE THE OBJECT AND LOAD THE DATA ########## # Tell which company and which period we want timeData = CTD.CTimeData(symbols[0],periods[0]) TD = DBl.load_TD_from_csv(storage_folder, symbols[1],periods[0]) timeData.set_csv(storage_folder) # Load the data into the model timeData.set_TD(TD) ############## Obtain time series ########################### price = timeData.get_timeSeries(["Close", "Average"]); ############# Plot time Series and save it to disk ######### gl.plot([],price) datafolder = "../maildata/" picdir = datafolder + "pene.png" gl.savefig(picdir) ########################################################################### ############## BASIC PLOTING FUNC ######################################### ########################################################################### user = "******" pwd = "Goldenegg" #user = "******" #pwd = "manumon7g.@"
EM_list = [] F1_list = [] for i in range(len(DataSet_statistics_list)): EM_list.append(100 * np.mean(DataSet_statistics_list[i]["em"])) F1_list.append(100 * np.mean(DataSet_statistics_list[i]["f1"])) gl.init_figure() ax1 = gl.subplot2grid((3, 1), (0, 0), rowspan=1, colspan=1) ax2 = gl.subplot2grid((3, 1), (1, 0), rowspan=1, colspan=1, sharex=ax1) ax3 = gl.subplot2grid((3, 1), (2, 0), rowspan=1, colspan=1, sharex=ax1) ## Accuracies !! gl.plot(trim_mu_sigma, EM_list, ax=ax1, legend=["EM"], labels=[ "Trimming of the weights and biases by $|\mu_w|/\sigma_w$", "", "Accuracy" ]) gl.plot(trim_mu_sigma, F1_list, ax=ax1, legend=["F1"]) ## Systems ! list_all_labels = [ "$W_{E}$", "$W_{H}$", "$W_{S}$", "$W_{(p^1)}$", "$W_{(p^2)}$" ] list_final_pcts_w = [] list_final_pcts_b = [] for i in range(len(list_all_labels)): list_final_pcts_w.append([]) list_final_pcts_b.append([]) for j in range(len(DataSet_statistics_list)):
timeData = CTD.CTimeData(symbols[0],periods[0]) timeData.set_csv(storage_folder) # Load the data into the model timeData.set_interval(sdate,edate) # Set the interval period to be analysed ########################################################################### ############## TIME SERIES INDICATORS ##################################### ########################################################################### advanced_smoothing_f = 0 if (advanced_smoothing_f == 1): price = timeData.get_timeSeries(["Average"]); casd = ul.get_Elliot_Trends(price,10); timeData.plot_timeSeries() flag_p = 0 for trend in casd: gl.plot(timeData.dates[trend], price[trend], lw = 5, nf = flag_p) flag_p = 0 ########################################################################### ############## Trials ############################################ ########################################################################### try_ind_f = 0 if (try_ind_f == 1): price = timeData.get_timeSeries(["Average"]); dates = timeData.dates a,b = indl.MDD(price, 100) gl.plot(dates, price, labels = ["Maximum DrawDown","", "Price"], legend = ["Price"]) gl.plot(dates, [a,b], nf = 0, na = 1, labels = ["MDD", "", "MMDd"],
prob = Wad.Watson_pdf(Xsample, mu, kappa) # Draw 2D samples as transformation of the angle Xalpha = np.linspace(0, 2*np.pi, Nsa) Xdata = np.array([np.cos(Xalpha), np.sin(Xalpha)]) probs = [] # Vector with probabilities for i in range(Nsa): probs.append(Wad.Watson_pdf(Xdata[:,i],mu,kappa )) ## Plotting gl.set_subplots(1,3) ## Plot it in terms of (angle, prob) gl.plot(Xalpha,np.array(probs), legend = ["pdf k:%f, mu_angle: %f"%(kappa,mu_angle)], labels = ["Watson Distribution", "Angle(rad)", "pdf"], nf = 1, na = 1) # Plot it in polar coordinates X1 = probs * np.cos(Xalpha) X2 = probs * np.sin(Xalpha) gl.plot(X1,X2, legend = ["pdf k:%f, mu_angle: %f"%(kappa,mu_angle)], labels = ["Watson Distribution", "Angle(rad)", "pdf"], nf = 1, na = 1) ## Generate samples RandWatson = Was.randWatson(Nsampling, mu, kappa) mu_est2, kappa_est2 = Wae.get_Watson_muKappa_ML(RandWatson)
price = timeData.get_timeSeries(["Close"]); dates = timeData.get_dates() df = timeData.get_timeData() # Momentum and Rate of convergence nMOM = 20 nROC = 20 MOM = timeData.MOM(n = nMOM) ROC = timeData.ROC(n = nROC) # Plotting gl.set_subplots(2,1) gl.plot(dates, price , nf = 1, labels = ["Momentum Indicators MOM and ROC","","Price"], legend = ["Price", " Momentum", "ROC"]) gl.plot(dates, MOM , nf = 1, na = 0, legend = ["MOM(%i)"%nMOM]) # Normalize ROC to MOM ROC = ROC * np.max(np.abs(np.nan_to_num(MOM)))/ np.max(np.abs(np.nan_to_num(ROC))) gl.plot(dates, ROC, nf = 0, na = 0, legend = ["ROC(%i)"%nROC]) # The nect plot is just so that the vision starts in the first date gl.plot(dates, np.zeros((dates.size,1)) , nf = 0, na = 0) gl.subplots_adjust(left=.09, bottom=.10, right=.90, top=.95, wspace=.20, hspace=0) gl.savefig(folder_images +'OscillatorsMOM.png',
############################################################## alpha_stem = 0.5 marker_stem = [".", 1, None] train_color = "b" test_color = "r" gl.init_figure() ax1 = gl.subplot2grid((4, 1), (0, 0), rowspan=1, colspan=1) ax2 = gl.subplot2grid((4, 1), (1, 0), rowspan=1, colspan=1, sharex=ax1) ax3 = gl.subplot2grid((4, 1), (2, 0), rowspan=1, colspan=1, sharex=ax1) ax4 = gl.subplot2grid((4, 1), (3, 0), rowspan=1, colspan=1, sharex=ax1) ## Ax1 = Close price at the end of the sessions gl.plot(days_keys, C, ax=ax1, labels=["Results " + key_classifier, "", "Close Rate"], AxesStyle="Normal - No xaxis", legend=["Close Rate"]) ## Ax2 = 1 if the stock has gone up, zero if it has gone down gl.stem(dates_train, Ytrain_reg, ax=ax2, labels=["", "", "Target_reg"], bottom=0.0, AxesStyle="Normal - No xaxis", alpha=alpha_stem, marker=marker_stem, color=train_color, legend=["tr"]) gl.stem(dates_test,
def create_Bayesian_analysis_charts(model, X_data_tr, Y_data_tr, X_data_val, Y_data_val, tr_loss, val_loss, KL_loss, final_loss_tr, final_loss_val, xgrid_real_func, ygrid_real_func, folder_images, epoch_i=None): # Configurations of the plots alpha_points = 0.2 color_points_train = "dark navy blue" color_points_val = "amber" color_train_loss = "cobalt blue" color_val_loss = "blood" color_truth = "k" color_mean = "b" color_most_likey = "y" ############################# Data computation ####################### if (type(X_data_tr) == type([])): pass else: if (X_data_tr.shape[1] == 1): # Regression Example x_grid, all_y_grid, most_likely_ygrid = compute_regression_1D_data( model, X_data_tr, X_data_val, Nsamples=100) elif (X_data_tr.shape[1] == 2): # Classification Example xx, yy, all_y_grid, most_likely_ygrid = compute_classification_2D_data( model, X_data_tr, X_data_val, Nsamples=100) else: # RNN x_grid, all_y_grid, most_likely_ygrid = compute_RNN_1D_data( model, X_data_tr, X_data_val, Nsamples=100) ################################ Divide in plots ############################## gl.init_figure() ax1 = gl.subplot2grid((6, 3), (0, 0), rowspan=3, colspan=1) ax2 = gl.subplot2grid((6, 3), (3, 0), rowspan=3, colspan=1, sharex=ax1, sharey=ax1) ax3 = gl.subplot2grid((6, 3), (0, 1), rowspan=2, colspan=1) ax4 = gl.subplot2grid((6, 3), (2, 1), rowspan=2, colspan=1, sharex=ax3) ax5 = gl.subplot2grid((6, 3), (4, 1), rowspan=2, colspan=1, sharex=ax3) ax6 = gl.subplot2grid((6, 3), (0, 2), rowspan=3, colspan=1) ax7 = gl.subplot2grid((6, 3), (3, 2), rowspan=3, colspan=1, sharex=ax6) if (type(X_data_tr) == type([])): Xtrain = [ torch.tensor(X_data_tr[i], device=model.cf_a.device, dtype=model.cf_a.dtype) for i in range(len(X_data_tr)) ] Ytrain = torch.tensor(Y_data_tr, device=model.cf_a.device, dtype=torch.int64) Xval = [ torch.tensor(X_data_val[i], device=model.cf_a.device, dtype=model.cf_a.dtype) for i in range(len(X_data_val)) ] Yval = torch.tensor(Y_data_val, device=model.cf_a.device, dtype=torch.int64) confusion = model.get_confusion_matrix(Xtrain, Ytrain) plot_confusion_matrix(confusion, model.languages, ax1) confusion = model.get_confusion_matrix(Xval, Yval) plot_confusion_matrix(confusion, model.languages, ax2) else: if (X_data_tr.shape[1] == 1): # Regression Example plot_data_regression_1d_2axes( X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2) elif (X_data_tr.shape[1] == 2): # Classification Example plot_data_classification_2d_2axes( X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, xx, yy, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2) else: # RNN example plot_data_RNN_1d_2axes(X_data_tr, Y_data_tr, xgrid_real_func, ygrid_real_func, X_data_val, Y_data_val, x_grid, all_y_grid, most_likely_ygrid, alpha_points, color_points_train, color_points_val, color_most_likey, color_mean, color_truth, ax1, ax2) # gl.fill_between (x_grid, [mean_samples_grid + 2*std_samples_grid, mean_samples_grid - 2*std_samples_grid] # , ax = ax2, alpha = 0.10, color = "b", legend = ["Mean realizaions"]) ## ax2: The uncertainty of the prediction !! # gl.plot (x_grid, std_samples_grid, ax = ax2, labels = ["Std (%i)"%(Nsamples),"X","f(X)"], legend = [" std predictions"], fill = 1, alpha = 0.3) ############## ax3 ax4 ax5: Loss Evolution !! ###################### ## ax3: Evolutoin of the data loss gl.plot([], tr_loss, ax=ax3, lw=3, labels=["Losses", "", "Data loss"], legend=["train"], color=color_train_loss) gl.plot([], val_loss, ax=ax3, lw=3, legend=["validation"], color=color_val_loss, AxesStyle="Normal - No xaxis") ## ax4: The evolution of the KL loss gl.plot([], KL_loss, ax=ax4, lw=3, labels=["", "", "KL loss"], legend=["Bayesian Weights"], AxesStyle="Normal - No xaxis", color="k") ## ax5: Evolutoin of the total loss gl.plot([], final_loss_tr, ax=ax5, lw=3, labels=["", "epoch", "Total Loss (Bayes)"], legend=["train"], color=color_train_loss) gl.plot([], final_loss_val, ax=ax5, lw=3, legend=["validation"], color=color_val_loss) ############## ax6 ax7: Variational Weights !! ###################### create_plot_variational_weights(model, ax6, ax7) ## Plot in chart 7 the acceptable mu = 2sigma -> sigma = |mu|/2sigma mu_grid = np.linspace(-3, 3, 100) y_grid = np.abs(mu_grid) / 2 gl.fill_between(mu_grid, 10 * np.ones(mu_grid.size), y_grid, alpha=0.2, color="r", ax=ax7, legend=["95% non-significant"]) gl.set_zoom(ax=ax6, ylim=[-0.1, 10]) gl.set_zoom(ax=ax7, xlim=[-2.5, 2.5], ylim=[ -0.05, np.exp(model.cf_a.input_layer_prior["log_sigma2"]) * (1 + 0.15) ]) # gl.set_zoom (ax = ax7, xlim = [-2.5, 2.5], ylim = [-0.1,2]) # Set final properties and save figure gl.set_fontSizes(ax=[ax1, ax2, ax3, ax4, ax5, ax6, ax7], title=20, xlabel=20, ylabel=20, legend=10, xticks=12, yticks=12) gl.subplots_adjust(left=.09, bottom=.10, right=.90, top=.95, wspace=.30, hspace=0.10) if (type(epoch_i) == type(None)): gl.savefig(folder_images + "../" + 'Final_values_regression_1D_' + str(model.cf_a.eta_KL) + '.png', dpi=100, sizeInches=[20, 10]) else: gl.savefig(folder_images + '%i.png' % epoch_i, dpi=100, sizeInches=[20, 10], close=True, bbox_inches="tight")