def performance_plot(model, latent_dim, n): ''' Plotting the generated points ''' plt.clf() # Ground truth x_real, y_real = generate_real_samples(n) # Generated points with torch.no_grad(): x_input = generate_latent_points(latent_dim, n).to(device) results = model(x_input).cpu().data.numpy() # Normalization colors norm_ax1 = plt.Normalize(x_real[:, 2].min(), x_real[:, 2].max()) norm_ax2 = plt.Normalize(results[:, 2].min(), results[:, 2].max()) colors_ax1 = cm.viridis(norm_ax1(x_real[:, 2])) colors_ax2 = cm.viridis(norm_ax2(results[:, 2])) # Plotting results ax = plt.figure() ax1 = ax.add_subplot(1, 2, 1, projection='3d') #ax1.plot_trisurf(x_real[:,0], x_real[:,1], x_real[:,2], facecolors=colors, linewidth=0, antialiased=False) ax1.scatter(x_real[:, 0], x_real[:, 1], x_real[:, 2], facecolors=colors_ax1, linewidth=0, antialiased=False) ax2 = ax.add_subplot(1, 2, 2, projection='3d') ax2.scatter(results[:, 0], results[:, 1], results[:, 2], facecolors=colors_ax2, linewidth=0, antialiased=False) plt.title('Performance plot') plt.show()
def plot_ti_auc(fs, betas, name="", lower_bound=True): # colors = cm.rainbow(np.linspace(0, 1, len(betas))) colors = cm.viridis(np.linspace(0, 1, len(betas))) fig, ax = plt.subplots(figsize=(6, 3)) patches = ax.bar(betas[:-1], fs, width=betas[1:] - betas[:-1], align="edge") for i in range(len(betas) - 1): patches[i].set_facecolor(colors[i]) if lower_bound: text = r"$\sum_k F_k = {:.2f}$".format(np.sum(fs)) ylabel = r"$F_{k}$" else: text = r"$\sum_k \tilde{{F}}_k = {:.2f}$".format(np.sum(fs)) ylabel = r"$\tilde{{F}}_{k}$" text_box = AnchoredText( text, frameon=False, loc=4, pad=0, prop={"size": 12}, ) plt.setp(text_box.patch, facecolor="white", alpha=0.5) plt.gca().add_artist(text_box) plt.xlabel(r"$\beta$") plt.ylabel(ylabel) plt.savefig("/tmp/{}.pdf".format(name), bbox_inches="tight", pad_inches=0)
def plot_timeseries(df, ITMs_list, directory, Date_Now): plt.rc('text', usetex=True) plt.rc('font', family='serif') color = iter(cm.viridis(np.linspace(0, 1, len(ITMs_list)))) fig, ax = plt.subplots(1, 1) for ITMs in ITMs_list: df_ITM = df.loc[df['ITMs'] == ITMs] df_ITM['necrotic norm'] = df_ITM['necrotic'] * 100 / 2500. c = next(color) necrotic = df_ITM.groupby('timestep').mean()['necrotic norm'] necrotic_std = df_ITM.groupby('timestep').std()['necrotic norm'] ax.errorbar(x=range(len(necrotic)), y=necrotic, yerr=necrotic_std, label='', color=c, alpha=0.05) ax.plot(range(len(necrotic)), necrotic, label=str(ITMs) + ' ITMs', color=c) plt.xticks(fontsize=16) plt.yticks(fontsize=16) ax.set_xlabel('Time Steps', fontsize=25) ax.set_ylabel('\% Necrosis', fontsize=25) ax.legend(fontsize=16) plt.savefig(directory + Date_Now + '_CA_timesteps_mean', dpi=300, bbox_inches="tight") plt.show()
def plot(database_dir, names, det_no): run = get_run_names(names, database_dir, run_database) plt.figure() colors = cm.viridis(np.linspace(0, 1, len(run))) for i, r in enumerate(run): if 'na_' in r: continue print r data = get_numpy_arr(database_dir, run_database, r, numpy_dir, prefix, True) ql = [] for data_index, datum in enumerate(data): print data_index, datum f_in = np.load(numpy_dir + datum) data = f_in['data'] ql_det = data['ql'][np.where( (data['det_no'] == det_no ))] # pat/plastic det 0, bvert det 1, cpvert det 2 ql.extend(ql_det) plt.hist(ql, bins=1000, histtype='step', label=r, normed=True, color=colors[i]) plt.plot([0.476] * 10, np.linspace(0, 4, 10), 'k--', linewidth=0.5, alpha=0.25) plt.xlim(0, 1.3) plt.title(r) plt.legend()
def plot_results(*methods, true_minimum=None, max_n_calls=np.inf, choice='x_error', x_mark='n', target_time=0, max_time=1000): ax = plt.gca() ax.set_title("Convergence plot") if x_mark == 'n': ax.set_xlabel("Number of calls $n$") else: ax.set_xlabel("Time Consumption (seconds)") ax.set_ylabel(choice) ax.grid() colors = cm.viridis(np.linspace(0.25, 1.0, len(methods))) for result, color in zip(methods, colors): # print(result) name = result['name'] records = result['result'].records n_calls = int(np.min([len(records), max_n_calls])) mins = [records[r['best']][choice] for r in records[:n_calls]] if x_mark == 'n': ax.plot(range(1, n_calls + 1), mins, c=color, marker=".", markersize=12, lw=2, label=name) else: t0 = records[0]['output_time'] time_consume = [] for r in records[:n_calls]: time_consume.append(r['input_time'] - t0) t0 += r['output_time'] - r['input_time'] - target_time if time_consume[-1] > max_time: mins = mins[:len(time_consume)] break ax.plot(time_consume, mins, c=color, marker=".", markersize=12, lw=2, label=name) if true_minimum is not None: ax.axhline(true_minimum, linestyle="--", color="r", lw=1, label="True minimum") ax.legend(loc="best") return ax
def solve_problem(): # rc = raycaster(math.radians(-10)) # T0 = rc.transfermatrix(rc.vector()) # M1 = rc.mirrormatrix() # v0 = rc.vector() # print("T0") # print(T0) # print("M1") # print(M1) # print("T0*M1") # print(T0*M1) # v1 = np.array(np.dot((T0*M1),v0))[0] # print("v0", v0) # print("v1", v1) # print(type(v1)) rc = raycast3r(np.array([0,0]), 10) # return # plot triangle A,B,C = rc.get_trinangle_points() pl = plot0r() pl.plot_line(A,B) pl.plot_line(A,C) pl.plot_line(B,C) # plot first ray bounces = 4 color=iter(cm.viridis(np.linspace(0,1,bounces+bounces))) for i in range(bounces): rays = rc.cast() # rays = rays[0:1] for ray in rays: pl.plot_line(ray.points[0], ray.points[1], style = "-", color=next(color)) # rays = rc.cast() ## print(rays) # for ray in rays: # pl.plot_line(ray.points[0], ray.points[1], style = "y-") # P1, P2 = rc.two_points_from_vector(v0) # Pm1 = rc.intersection_of_vector_with_triangle() # Pm1 = [Pm1[0].x, Pm1[0].y] # pl.plot_line(P1, Pm1, style = "b-") # plot second ray # P1, P2 = rc.two_points_from_vector(v1) ## Pm1 = rc.intersection_of_vector_with_triangle() ## Pm1 = [Pm1[0].x, Pm1[0].y] # pl.plot_line(P1, P2, style = "b-") plt.show()
def plot_activation_distributions_development( show_title=True, name_func=lambda name: name, **samples_all_time_step_activations): """ Plot how the distributions of activation values change for multiple samples as a box and whiskers plot. """ num_samples = len(samples_all_time_step_activations.keys()) num_timesteps = len(list(samples_all_time_step_activations.values())[0]) fig, axes = plt.subplots(nrows=1, ncols=num_timesteps, sharey=True) colors = cm.viridis(np.linspace(0, 1, num_samples)) bplots = [] for t, axis in enumerate(axes): bplot = axis.boxplot( [ all_time_step_activations[t] for all_time_step_activations in samples_all_time_step_activations.values() ], vert=True, sym="", patch_artist=True, whis=10000) # Show min and max by setting whis very high axis.set_xlabel("t={}".format(t)) axis.set_xticks([]) bplots.append(bplot) # Coloring for patch, color in zip(bplot["boxes"], colors): patch.set_facecolor(color) # Add legend fig.subplots_adjust(bottom=0.2) axes[int(num_timesteps / 2)].legend( [bplot for bplot in bplots[0]["boxes"]], [ name_func(name) for name in list(samples_all_time_step_activations.keys()) ], loc="lower left", bbox_to_anchor=(-2.75, -0.2), borderaxespad=0.1, ncol=num_samples) if show_title: fig.suptitle( "Distributions of activation values of {} samples over {} time steps" .format(num_samples, num_timesteps)) plt.show()
def updateGraphs(self, *_): # remove old plots while self.plotCurves: self.plotCurves.pop().remove() while self.plotFits: self.plotFits.pop().remove() self.mpl.relim() numPlottedCurves = 0 # and create the new ones for i in self.results.curselection(): # Extract frame q value from the human-readable string d = int(re.search(r'(\d+)', self.results.get(i)).group(1)) # Plot the q-vs-dt slice data = self.correlation[:, d] # Randomly choose a colour from the palette random.seed( d) # but make sure it's always the same for this curve, too colour = cm.viridis(random.random()) curveRef = self.mpl.plot(range(len(data)), data, '-', color=colour)[0] self.plotCurves.append(curveRef) # too many legends breaks the layout if numPlottedCurves < 10: curveRef.set_label(f'q = {d}') numPlottedCurves += 1 if self.fitCurves is not None: # avoid "no handles" MPL warning fitData = self.fitCurves[d, :] fitRef = self.mpl.plot(range(len(fitData)), fitData, '--', color=colour)[0] self.plotFits.append(fitRef) if len(self.mpl.get_lines()): self.mpl.legend(loc='upper left', fontsize='x-small') self.mpl.relim() # recalculate axis limits self.rerender() # push the new plots to the rasterised UI element
def plot_dists(dists, betas, name=""): plt.figure(figsize=(6, 3)) # colors = cm.rainbow(np.linspace(0, 1, len(dists))) colors = cm.viridis(np.linspace(0, 1, len(dists))) for i, dist in enumerate(dists): # print(name, betas[i], dist.mean, dist.variance) mu, sigma = dist.mean, np.sqrt(dist.variance) x = np.linspace(-10, 10, 500) plt.plot( x, stats.norm.pdf(x, mu, sigma), color=colors[i % len(colors)], label="{:.02f}".format(betas[i]), ) plt.xlabel(r"$z$") plt.ylabel(r"$q(z)$") # plt.legend() plt.ylim([-0.02, 1.0]) plt.savefig("/tmp/{}.pdf".format(name), bbox_inches="tight", pad_inches=0)
i = 1 r = 0 beta = 0.3 gamma = 0.05 col = 10 for x in range(1, 11): time = 0 s = N - i - 1000 * x if s > 0: I = [1] while time < 1000: mc = beta * (I[-1] / N) #probility of infection a = np.random.choice(range(2), s, p=[1 - mc, mc]) a = sum(a) b = np.random.choice(range(2), i, p=[1 - gamma, gamma]) b = sum(b) s = s - a i = i + a - b r = r + b I.append(i) time = time + 1 else: I = [0] plt.plot(I, label=str(x * 10) + '%', color=cm.viridis(col)) col = col + 33 plt.savefig("SIR_vaccination", type="png") plt.legend() plt.show()
def feature_plot_3D(dataset, label, features=[0, 1, 2], lvals=['PEG', 'PS'], randsel=True, randcount=200, **kwargs): """Plots three features against each other from feature dataset. Parameters ---------- dataset : pandas.core.frames.DataFrame Must comtain a group column and numerical features columns labels : string or int Group column name features : list of int Names of columns to be plotted randsel : bool If True, downsamples from original dataset randcount : int Size of downsampled dataset **kwargs : variable figsize : tuple of int or float Size of output figure dotsize : float or int Size of plotting markers alpha : float or int Transparency factor xlim : list of float or int X range of output plot ylim : list of float or int Y range of output plot zlim : list of float or int Z range of output plot legendfontsize : float or int Font size of legend labelfontsize : float or int Font size of labels fname : string Filename of output figure Returns ------- xy : list of lists Coordinates of data on plot """ defaults = { 'figsize': (8, 8), 'dotsize': 70, 'alpha': 0.7, 'xlim': None, 'ylim': None, 'zlim': None, 'legendfontsize': 12, 'labelfontsize': 10, 'fname': None, 'dpi': 300, 'noticks': True, 'ticksize': 10 } for defkey in defaults.keys(): if defkey not in kwargs.keys(): kwargs[defkey] = defaults[defkey] axes = {} fig = plt.figure(figsize=(14, 14)) axes[1] = fig.add_subplot(221, projection='3d') axes[2] = fig.add_subplot(222, projection='3d') axes[3] = fig.add_subplot(223, projection='3d') axes[4] = fig.add_subplot(224, projection='3d') color = iter(cm.viridis(np.linspace(0, 0.9, 3))) angle1 = [60, 0, 0, 0] angle2 = [240, 240, 10, 190] tgroups = {} xy = {} counter = 0 #labels = dataset[label].unique() for lval in lvals: tgroups[counter] = dataset[dataset[label] == lval] #print(lval) #print(tgroups[counter].shape) counter = counter + 1 N = len(tgroups) color = iter(cm.viridis(np.linspace(0, 0.9, N))) counter = 0 for key in tgroups: c = next(color) xy = [] if randsel: #print(range(0, len(tgroups[key][0].tolist()))) to_plot = random.sample(range(0, len(tgroups[key][0].tolist())), randcount) for key2 in features: xy.append(list(tgroups[key][key2].tolist()[i] for i in to_plot)) else: for key2 in features: xy.append(tgroups[key][key2]) acount = 0 for ax in axes: axes[ax].scatter(xy[0], xy[1], xy[2], c=c, s=kwargs['dotsize'], alpha=kwargs['alpha']) #, label=labels[counter]) if kwargs['xlim'] is not None: axes[ax].set_xlim3d(kwargs['xlim'][0], kwargs['xlim'][1]) if kwargs['ylim'] is not None: axes[ax].set_ylim3d(kwargs['ylim'][0], kwargs['ylim'][1]) if kwargs['zlim'] is not None: axes[ax].set_zlim3d(kwargs['zlim'][0], kwargs['zlim'][1]) axes[ax].view_init(angle1[acount], angle2[acount]) axes[ax].set_xlabel('{}'.format(features[0]), fontsize=kwargs['labelfontsize']) axes[ax].set_ylabel('{}'.format(features[1]), fontsize=kwargs['labelfontsize']) axes[ax].set_zlabel('{}'.format(features[2]), fontsize=kwargs['labelfontsize']) if kwargs['noticks']: axes[ax].set_xticklabels('') axes[ax].set_yticklabels('') axes[ax].set_zticklabels('') else: axes[ax].xaxis.set_tick_params(labelsize=kwargs['ticksize']) axes[ax].yaxis.set_tick_params(labelsize=kwargs['ticksize']) axes[ax].zaxis.set_tick_params(labelsize=kwargs['ticksize']) acount = acount + 1 counter = counter + 1 # plt.legend(fontsize=kwargs['legendfontsize'], frameon=False) axes[3].set_xticks([]) axes[4].set_xticks([]) if kwargs['fname'] is None: plt.show() else: plt.savefig(kwargs['fname'], dpi=kwargs['dpi'])
def feature_plot_2D(dataset, label, features=[0, 1], lvals=['PEG', 'PS'], randsel=True, randcount=200, **kwargs): """Plots two features against each other from feature dataset. Parameters ---------- dataset : pandas.core.frames.DataFrame Must comtain a group column and numerical features columns labels : string or int Group column name features : list of int Names of columns to be plotted randsel : bool If True, downsamples from original dataset randcount : int Size of downsampled dataset **kwargs : variable figsize : tuple of int or float Size of output figure dotsize : float or int Size of plotting markers alpha : float or int Transparency factor xlim : list of float or int X range of output plot ylim : list of float or int Y range of output plot legendfontsize : float or int Font size of legend labelfontsize : float or int Font size of labels fname : string Filename of output figure Returns ------- xy : list of lists Coordinates of data on plot """ defaults = { 'figsize': (8, 8), 'dotsize': 70, 'alpha': 0.7, 'xlim': None, 'ylim': None, 'legendfontsize': 12, 'labelfontsize': 20, 'fname': None, 'legendloc': 2 } for defkey in defaults.keys(): if defkey not in kwargs.keys(): kwargs[defkey] = defaults[defkey] tgroups = {} xy = {} counter = 0 labels = dataset[label].unique() for lval in lvals: tgroups[counter] = dataset[dataset[label] == lval] counter = counter + 1 N = len(tgroups) color = iter(cm.viridis(np.linspace(0, 0.9, N))) fig = plt.figure(figsize=kwargs['figsize']) ax1 = fig.add_subplot(111) counter = 0 for key in tgroups: c = next(color) xy = [] if randsel: to_plot = random.sample(range(0, len(tgroups[key][0].tolist())), randcount) for key2 in features: xy.append(list(tgroups[key][key2].tolist()[i] for i in to_plot)) else: for key2 in features: xy.append(tgroups[key][key2]) ax1 = plt.scatter(xy[0], xy[1], c=c, s=kwargs['dotsize'], alpha=kwargs['alpha'], label=labels[counter]) counter = counter + 1 if kwargs['xlim'] is not None: plt.xlim(kwargs['xlim']) if kwargs['ylim'] is not None: plt.ylim(kwargs['ylim']) plt.legend(fontsize=kwargs['legendfontsize'], frameon=False, borderaxespad=0., bbox_to_anchor=(1.05, 1)) plt.xlabel('Prin. Component {}'.format(features[0]), fontsize=kwargs['labelfontsize']) plt.ylabel('Prin. Component {}'.format(features[1]), fontsize=kwargs['labelfontsize']) if kwargs['fname'] is None: plt.show() else: plt.savefig(kwargs['fname']) return xy
def plot_pca(datasets, figsize=(8, 8), lwidth=8.0, labels=['Sample1', 'Sample2'], savefig=True, filename='test.png', rticks=np.linspace(-2, 2, 5), dpi=300, labelsize=20): """Plots the average output features from a PCA analysis in polar coordinates Parameters ---------- datasets : dict of numpy.ndarray Dictionary with n samples and p features to plot. figize : list Dimensions of output figure e.g. (8, 8) lwidth : float Width of plotted lines in figure labels : list of str Labels to display in legend. savefig : bool If True, saves figure filename : str Desired output filename """ fig = plt.figure(figsize=figsize) for key in datasets: N = datasets[key].shape[0] width = (2 * np.pi) / N color = iter(cm.viridis(np.linspace(0, 0.9, len(datasets)))) theta = np.linspace(0.0, 2 * np.pi, N + 1, endpoint=True) radii = {} bars = {} ax = plt.subplot(111, polar=True) counter = 0 for key in datasets: c = next(color) radii[key] = np.append(datasets[key], datasets[key][0]) bars[key] = ax.plot(theta, radii[key], linewidth=lwidth, color=c, label=labels[counter]) counter = counter + 1 plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0., frameon=False, fontsize=labelsize + 4) # # Use custom colors and opacity # for r, bar in zip(radii, bars): # bar.set_facecolor(plt.cm.jet(np.abs(r / 2.5))) # bar.set_alpha(0.8) ax.set_xticks(np.pi / 180. * np.linspace(0, 360, N, endpoint=False)) ax.set_xticklabels(list(range(0, N)), fontsize=labelsize) ax.set_ylim([min(rticks), max(rticks) + 1]) ax.set_yticks(rticks) ax.yaxis.set_tick_params(labelsize=labelsize) if savefig: plt.savefig(filename, bbox_inches='tight', dpi=dpi) plt.show()
def plot_all_experiments(experiments, bucket='ccurtis.data', folder='test', yrange=(10**-1, 10**1), fps=100.02, xrange=(10**-2, 10**0), upload=True, outfile='test.png', exponential=True): """Plots precision-weighted averages of MSD datasets. Plots pre-calculated precision-weighted averages of MSD datasets calculated from precision_averaging and stored in an AWS S3 bucket. Parameters ---------- group : list of str List of experiment names to plot. Each experiment must have an MSD and SEM file associated with it in s3. bucket : str S3 bucket from which to download data. folder : str Folder in s3 bucket from which to download data. yrange : list of float Y range of plot xrange: list of float X range of plot upload : bool True to upload to S3 outfile : str Filename of output image """ n = len(experiments) color = iter(cm.viridis(np.linspace(0, 0.9, n))) fig = plt.figure(figsize=(8.5, 8.5)) plt.xlim(xrange[0], xrange[1]) plt.ylim(yrange[0], yrange[1]) plt.xlabel('Tau (s)', fontsize=25) plt.ylabel(r'Mean Squared Displacement ($\mu$m$^2$)', fontsize=25) geo = {} gstder = {} counter = 0 for experiment in experiments: aws.download_s3('{}/geomean_{}.csv'.format(folder, experiment), 'geomean_{}.csv'.format(experiment), bucket_name=bucket) aws.download_s3('{}/geoSEM_{}.csv'.format(folder, experiment), 'geoSEM_{}.csv'.format(experiment), bucket_name=bucket) geo[counter] = np.genfromtxt('geomean_{}.csv'.format(experiment)) gstder[counter] = np.genfromtxt('geoSEM_{}.csv'.format(experiment)) geo[counter] = ma.masked_equal(geo[counter], 0.0) gstder[counter] = ma.masked_equal(gstder[counter], 0.0) frames = np.shape(gstder[counter])[0] xpos = np.linspace(0, frames-1, frames)/fps c = next(color) if exponential: plt.loglog(xpos, np.exp(geo[counter]), c=c, linewidth=6, label=experiment) plt.loglog(xpos, np.exp(geo[counter] - 1.96*gstder[counter]), c=c, dashes=[6, 2], linewidth=4) plt.loglog(xpos, np.exp(geo[counter] + 1.96*gstder[counter]), c=c, dashes=[6, 2], linewidth=4) else: plt.loglog(xpos, geo[counter], c=c, linewidth=6, label=experiment) plt.loglog(xpos, geo[counter] - 1.96*gstder[counter], c=c, dashes=[6, 2], linewidth=4) plt.loglog(xpos, geo[counter] + 1.96*gstder[counter], c=c, dashes=[6, 2], linewidth=4) counter = counter + 1 plt.legend(frameon=False, prop={'size': 16}) if upload: fig.savefig(outfile, bbox_inches='tight') aws.upload_s3(outfile, folder+'/'+outfile, bucket_name=bucket)
def plot_regret(*args, **kwargs): """Plot one or several cumulative regret traces. Parameters ---------- args[i] : `OptimizeResult`, list of `OptimizeResult`, or tuple The result(s) for which to plot the cumulative regret trace. - if `OptimizeResult`, then draw the corresponding single trace; - if list of `OptimizeResult`, then draw the corresponding cumulative regret traces in transparency, along with the average cumulative regret trace; - if tuple, then `args[i][0]` should be a string label and `args[i][1]` an `OptimizeResult` or a list of `OptimizeResult`. ax : Axes`, optional The matplotlib axes on which to draw the plot, or `None` to create a new one. true_minimum : float, optional The true minimum value of the function, if known. yscale : None or string, optional The scale for the y-axis. Returns ------- ax : `Axes` The matplotlib axes. """ # <3 legacy python ax = kwargs.get("ax", None) true_minimum = kwargs.get("true_minimum", None) yscale = kwargs.get("yscale", None) if ax is None: ax = plt.gca() ax.set_title("Cumulative regret plot") ax.set_xlabel("Number of calls $n$") ax.set_ylabel(r"$\sum_{i=0}^n(f(x_i) - optimum)$ after $n$ calls") ax.grid() if yscale is not None: ax.set_yscale(yscale) colors = cm.viridis(np.linspace(0.25, 1.0, len(args))) if true_minimum is None: results = [] for res in args: if isinstance(res, tuple): res = res[1] if isinstance(res, OptimizeResult): results.append(res) elif isinstance(res, list): results.extend(res) true_minimum = np.min([np.min(r.func_vals) for r in results]) for results, color in zip(args, colors): if isinstance(results, tuple): name, results = results else: name = None if isinstance(results, OptimizeResult): n_calls = len(results.x_iters) regrets = [ np.sum(results.func_vals[:i] - true_minimum) for i in range(1, n_calls + 1) ] ax.plot(range(1, n_calls + 1), regrets, c=color, marker=".", markersize=12, lw=2, label=name) elif isinstance(results, list): n_calls = len(results[0].x_iters) iterations = range(1, n_calls + 1) regrets = [[ np.sum(r.func_vals[:i] - true_minimum) for i in iterations ] for r in results] for cr in regrets: ax.plot(iterations, cr, c=color, alpha=0.2) ax.plot(iterations, np.mean(regrets, axis=0), c=color, marker=".", markersize=12, lw=2, label=name) if name: ax.legend(loc="best") return ax
#pass #c=next(colour) #ax.scatter(dataarr_acorns[0, A.forest[tree].trunk.cluster_members], dataarr_acorns[1,A.forest[tree].trunk.cluster_members], \ # marker='o', s=3., c='black',linewidth=0, alpha=0.7) ax.scatter(dataarr_acorns[0, A.forest[tree].trunk.cluster_members], dataarr_acorns[1,A.forest[tree].trunk.cluster_members], \ marker='o', s=5., c='None', edgecolors = c ,alpha=0.9, linewidth = 0.8) else: #c=next(colour) #pass #ax.scatter(dataarr_acorns[0, A.forest[tree].trunk.cluster_members], dataarr_acorns[1,A.forest[tree].trunk.cluster_members], \ # marker='o', s=3., c='black',linewidth=0, alpha=0.7) ax.scatter(dataarr_acorns[0, A.forest[tree].trunk.cluster_members], dataarr_acorns[1,A.forest[tree].trunk.cluster_members], \ marker='o', s=5., c='None', edgecolors = c ,alpha=0.9, linewidth = 0.8) n = len(A.forest[tree].leaves) col = iter(cm.viridis(np.linspace(0, 1, n))) for leaf in A.forest[tree].leaves: c = next(col) #pass ax.scatter(dataarr_acorns[0,leaf.cluster_members], dataarr_acorns[1,leaf.cluster_members], \ marker='o', s=5., c=c, edgecolors = 'k',alpha=1.0, linewidth = 0.1) ax.azim = 180 ax.elev = 0 plt.show() fig = plt.figure(figsize=(8.0, 8.0)) ax = fig.add_subplot(111) ax.set_xlim([-1, 25]) ax.set_ylim([-0.0001, 0.002])
def plot_convergence(*args, **kwargs): """Plot one or several convergence traces. Parameters ---------- * `args[i]` [`OptimizeResult`, list of `OptimizeResult`, or tuple]: The result(s) for which to plot the convergence trace. - if `OptimizeResult`, then draw the corresponding single trace; - if list of `OptimizeResult`, then draw the corresponding convergence traces in transparency, along with the average convergence trace; - if tuple, then `args[i][0]` should be a string label and `args[i][1]` an `OptimizeResult` or a list of `OptimizeResult`. * `ax` [`Axes`, optional]: The matplotlib axes on which to draw the plot, or `None` to create a new one. * `true_minimum` [float, optional]: The true minimum value of the function, if known. * `yscale` [None or string, optional]: The scale for the y-axis. Returns ------- * `ax`: [`Axes`]: The matplotlib axes. """ # <3 legacy python ax = kwargs.get("ax", None) true_minimum = kwargs.get("true_minimum", None) yscale = kwargs.get("yscale", None) if ax is None: ax = plt.gca() ax.set_title("Convergence plot") ax.set_xlabel("Number of calls $n$") ax.set_ylabel(r"$\min f(x)$ after $n$ calls") ax.grid() if yscale is not None: ax.set_yscale(yscale) colors = cm.viridis(np.linspace(0.25, 1.0, len(args))) for results, color in zip(args, colors): if isinstance(results, tuple): name, results = results else: name = None if isinstance(results, OptimizeResult): n_calls = len(results.x_iters) mins = [np.min(results.func_vals[:i]) for i in range(1, n_calls + 1)] ax.plot(range(n_calls), mins, c=color, marker=".", markersize=12, lw=2, label=name) elif isinstance(results, list): n_calls = len(results[0].x_iters) mins = [[np.min(r.func_vals[:i]) for i in range(1, n_calls + 1)] for r in results] for m in mins: ax.plot(range(n_calls), m, c=color, alpha=0.2) ax.plot(range(n_calls), np.mean(mins, axis=0), c=color, marker=".", markersize=12, lw=2, label=name) if true_minimum: ax.axhline(true_minimum, linestyle="--", color="r", lw=1, label="True minimum") if true_minimum or name: ax.legend(loc="best") return ax
fig.savefig("/home/lunet/gytm3/Everest2019/Research/Weather/Figures/Winds_during_climbs.png",dpi=300) t,p=stats.ttest_ind(spring_suc_wind,spring_fail_wind) print("Spring results...") print(np.mean(spring_suc_wind),np.mean(spring_fail_wind)) print(t,p) t,p=stats.ttest_ind(wint_suc_wind,wint_fail_wind) print("Winter results...") print(np.mean(wint_suc_wind),np.mean(wint_fail_wind)) print(t,p) # Plots and climatology (max winds +/- 12 hours) color=cm.viridis(np.linspace(0,1,len(pctls))) fig,ax=plt.subplots(2,1) fig.set_size_inches(7,10) month_dug_s.plot(ax=ax.flat[0],color='k',linestyle='--',linewidth=3); ax2=ax.flat[0].twinx() month_urc.plot(ax=ax2,color='grey',linestyle='--',linewidth=1.5,label="") #month_dug.plot(ax=ax) vs=mon_stats.columns for i in range(len(vs)): y=mon_stats[vs[i]] y.plot(ax=ax2,c=color[i],label="") print("Max for %.2f percentile = %.2f" % (vs[i],np.max(y) )) a1=ax2.scatter(summit_winds.index.dayofyear,summit_winds.values[:],color='green',\ alpha=0.3, s=summit_winds.values[:]/np.nanmax(summit_winds)*50,label="Summited") a5=ax2.scatter(turn_winds.index.dayofyear,turn_winds.values[:],color='blue',alpha=0.3, s=turn_winds.values[:]/np.nanmax(turn_winds)*50,label="Turned")
def plot_sed(method, components, flux_df, filter_df, pandas_dfs, gal_row, samples_df, filter_dict, units, distance): gal_name = flux_df['name'][gal_row] #Read in the DustEM grid sCM20_df,\ lCM20_df,\ aSilM5_df,\ wavelength_df = pandas_dfs wavelength = wavelength_df['wavelength'].values.copy() #Take redshift into account z = z_at_value(Planck15.luminosity_distance,flux_df['dist'][gal_row]*u.Mpc) wavelength_redshifted = wavelength * (1+z) frequency = 3e8/(wavelength*1e-6) #Convert the samples dataframe back into an array for plotting samples = np.zeros(samples_df.shape) i = 0 for col_name in samples_df.dtypes.index: col_values = samples_df[col_name].tolist() samples[:,i] = col_values i += 1 #Pull out fluxes obs_flux = [] obs_error = [] obs_wavelength = [] obs_flag = [] keys = [] for key in filter_dict: try: if np.isnan(flux_df[key][gal_row]) == False: if flux_df[key][gal_row] > 0: #Fit only the data with no flags try: if pd.isnull(flux_df[key+'_flag'][gal_row]): obs_wavelength.append(filter_df[key][0]) obs_flux.append(flux_df[key][gal_row]) obs_error.append(flux_df[key+'_err'][gal_row]) obs_flag.append(0) else: obs_wavelength.append(filter_df[key][0]) obs_flux.append(flux_df[key][gal_row]) obs_error.append(flux_df[key+'_err'][gal_row]) obs_flag.append(1) keys.append(key) except: obs_wavelength.append(filter_df[key][0]) obs_flux.append(flux_df[key][gal_row]) obs_error.append(flux_df[key+'_err'][gal_row]) obs_flag.append(0) keys.append(key) except KeyError: pass obs_flux = np.array(obs_flux) obs_error = np.array(obs_error) obs_wavelength = np.array(obs_wavelength) obs_flag = np.array(obs_flag) idx = np.where( obs_wavelength[obs_flag == 0] == np.min(obs_wavelength[obs_flag == 0]) ) filtered_keys = [] for i in range(len(obs_flag)): if not obs_flag[i]: filtered_keys.append(keys[i]) idx_key = filtered_keys[idx[0][0]] #Generate stars stars = general.define_stars(flux_df, gal_row, filter_df, frequency, idx_key) samples_to_pull = 150 #For errorbars y_to_percentile_stars = np.zeros([len(frequency), samples_to_pull]) y_to_percentile_small = np.zeros([len(frequency), samples_to_pull, components]) y_to_percentile_large = np.zeros([len(frequency), samples_to_pull, components]) y_to_percentile_silicates = np.zeros([len(frequency), samples_to_pull, components]) y_to_percentile_total = np.zeros([len(frequency), samples_to_pull, components]) for i in range(samples_to_pull): idx = np.random.randint(len(samples)) omega_star = samples[idx,0] y_to_percentile_stars[:,i] = omega_star*stars if method == 'ascfree': alpha = samples[idx,1] for component in range(components): if method == 'default': isrf = samples[idx,2*component+1] dust_scaling = samples[idx,2*component+2] y_sCM20 = 1 y_lCM20 = 1 y_aSilM5 = 1 alpha = 5 if method == 'abundfree': isrf = samples[idx,5*component+1] y_sCM20 = samples[idx,5*component+2] y_lCM20 = samples[idx,5*component+3] y_aSilM5 = samples[idx,5*component+4] dust_scaling = samples[idx,5*component+5] alpha = 5 if method == 'ascfree': isrf = samples[idx,5*component+2] y_sCM20 = samples[idx,5*component+3] y_lCM20 = samples[idx,5*component+4] y_aSilM5 = samples[idx,5*component+5] dust_scaling = samples[idx,5*component+6] small_grains,\ large_grains,\ silicates = general.read_sed(isrf, alpha, sCM20_df, lCM20_df, aSilM5_df) y = y_sCM20*small_grains*dust_scaling y_to_percentile_small[:,i,component] = y y = y_lCM20*large_grains*dust_scaling y_to_percentile_large[:,i,component] = y y = y_aSilM5*silicates*dust_scaling y_to_percentile_silicates[:,i,component] = y #FIX THIS, the factor of 2 is wrong! y = dust_scaling*(y_sCM20*small_grains+\ y_lCM20*large_grains+\ y_aSilM5*silicates)+\ omega_star/2*stars y_to_percentile_total[:,i,component] = y y_upper_stars = np.percentile(y_to_percentile_stars,84, axis=1) y_lower_stars = np.percentile(y_to_percentile_stars,16, axis=1) y_upper_small = np.percentile(y_to_percentile_small,84, axis=1) y_lower_small = np.percentile(y_to_percentile_small,16, axis=1) y_upper_large = np.percentile(y_to_percentile_large,84, axis=1) y_lower_large = np.percentile(y_to_percentile_large,16, axis=1) y_upper_silicates = np.percentile(y_to_percentile_silicates,84, axis=1) y_lower_silicates = np.percentile(y_to_percentile_silicates,16, axis=1) y_upper_total = np.percentile(y_to_percentile_total,84, axis=1) y_lower_total = np.percentile(y_to_percentile_total,16, axis=1) #And calculate the median lines y_median_stars = np.percentile(y_to_percentile_stars,50, axis=1) y_median_small = np.percentile(y_to_percentile_small,50, axis=1) y_median_large = np.percentile(y_to_percentile_large,50, axis=1) y_median_silicates = np.percentile(y_to_percentile_silicates,50, axis=1) y_median_total = np.percentile(y_to_percentile_total,50, axis=1) y_upper = np.zeros(len(frequency)) y_lower = np.zeros(len(frequency)) y_median = np.zeros(len(frequency)) for i in range(components): y_upper += y_upper_total[:,i] y_lower += y_lower_total[:,i] y_median += y_median_total[:,i] #If outputting luminosity, convert all these fluxes accordingly if units in ['luminosity']: y_upper_stars = general.convert_to_luminosity(y_upper_stars, distance, frequency) y_lower_stars = general.convert_to_luminosity(y_lower_stars, distance, frequency) y_upper_small = general.convert_to_luminosity(y_upper_small, distance, frequency) y_lower_small = general.convert_to_luminosity(y_lower_small, distance, frequency) y_upper_large = general.convert_to_luminosity(y_upper_large, distance, frequency) y_lower_large = general.convert_to_luminosity(y_lower_large, distance, frequency) y_upper_silicates = general.convert_to_luminosity(y_upper_silicates, distance, frequency) y_lower_silicates = general.convert_to_luminosity(y_lower_silicates, distance, frequency) y_upper = general.convert_to_luminosity(y_upper, distance, frequency) y_lower = general.convert_to_luminosity(y_upper, distance, frequency) y_median_stars = general.convert_to_luminosity(y_median_stars, distance, frequency) y_median_small = general.convert_to_luminosity(y_median_small, distance, frequency) y_median_large = general.convert_to_luminosity(y_median_large, distance, frequency) y_median_silicates = general.convert_to_luminosity(y_median_silicates, distance, frequency) y_median_total = general.convert_to_luminosity(y_median_total, distance, frequency) #And the actual fluxes! obs_flux = general.convert_to_luminosity(obs_flux, distance, 3e8/(obs_wavelength*1e-6)) obs_error = general.convert_to_luminosity(obs_error, distance, 3e8/(obs_wavelength*1e-6)) #Calculate residuals flux_model = filter_convolve(y_median, wavelength, filter_dict, keys) residuals = (obs_flux-flux_model)/obs_flux residual_err = obs_error/obs_flux residuals = np.array(residuals)*100 residual_err = np.array(residual_err)*100 residual_upper = (y_upper-y_median)*100/y_median residual_lower = (y_lower-y_median)*100/y_median fig1 = plt.figure(figsize=(10,6)) frame1 = fig1.add_axes((.1,.3,.8,.6)) #Plot the best fit and errorbars. The flux errors here are only #RMS, so include overall calibration from Clark et al. (2018). for i in range(len(keys)): calib_uncert = {'Spitzer_3.6':0.03, 'Spitzer_4.5':0.03, 'Spitzer_5.8':0.03, 'Spitzer_8.0':0.03, 'Spitzer_24':0.05, 'Spitzer_70':0.1, 'Spitzer_160':0.12, 'WISE_3.4':0.029, 'WISE_4.6':0.034, 'WISE_12':0.046, 'WISE_22':0.056, 'PACS_70':0.07, 'PACS_100':0.07, 'PACS_160':0.07, 'SPIRE_250':0.055, 'SPIRE_350':0.055, 'SPIRE_500':0.055, 'Planck_350':0.064, 'Planck_550':0.061, 'Planck_850':0.0078, 'SCUBA2_450':0.12, 'SCUBA2_850':0.08, 'IRAS_12':0.2, 'IRAS_25':0.2, 'IRAS_60':0.2, 'IRAS_100':0.2}[keys[i]] obs_error[i] += calib_uncert*obs_flux[i] plt.fill_between(wavelength,y_lower_stars,y_upper_stars, facecolor='m', interpolate=True,lw=0.5, edgecolor='none', alpha=0.3) plt.plot(wavelength,y_median_stars, c='m', ls='--', label='Stars') #Dust component models plt.fill_between(wavelength,y_lower,y_upper, facecolor='k', interpolate=True,lw=0.5, edgecolor='none', alpha=0.4) plt.plot(wavelength,y_median, c='k', label='Total') if components == 1: plt.fill_between(wavelength,y_lower_small[:,0],y_upper_small[:,0], facecolor='b', interpolate=True,lw=0.5, edgecolor='none', alpha=0.3) plt.fill_between(wavelength,y_lower_large[:,0],y_upper_large[:,0], facecolor='g', interpolate=True,lw=0.5, edgecolor='none', alpha=0.3) plt.fill_between(wavelength,y_lower_silicates[:,0],y_upper_silicates[:,0], facecolor='r', interpolate=True,lw=0.5, edgecolor='none', alpha=0.3) plt.plot(wavelength,y_median_small[:,0], c='b', ls='-.', label='sCM20') plt.plot(wavelength,y_median_large[:,0], c='g', dashes=[2,2,2,2], label='lCM20') plt.plot(wavelength,y_median_silicates[:,0], c='r', dashes=[5,2,10,2], label='aSilM5') else: plot_colour = iter(cm.viridis(np.linspace(0,1,components))) for i in range(components): c = next(plot_colour) plt.fill_between(wavelength,y_lower_total[:,i],y_upper_total[:,i], facecolor=c, interpolate=True,lw=0.5, edgecolor='none', alpha=0.4) plt.plot(wavelength,y_median_total[:,i], c=c,ls='--', label='Component '+str(i+1)) #Observed fluxes plt.errorbar(obs_wavelength[obs_flag == 0], obs_flux[obs_flag == 0], yerr=obs_error[obs_flag == 0], c='r', marker='o', markersize=4, ls='none', zorder=99) plt.errorbar(obs_wavelength[obs_flag == 1], obs_flux[obs_flag == 1], yerr=obs_error[obs_flag == 1], c='r', mfc='white', marker='o', markersize=4, ls='none', zorder=98) plt.xscale('log') plt.yscale('log') plt.xlim([1,1000]) plt.ylim([0.5*10**np.floor(np.log10(np.min(obs_flux[obs_flag == 0]))-1), 10**np.ceil(np.log10(np.max(obs_flux[obs_flag == 0]))+1)]) #Move the legend outside of the plot so it doesn't overlap with anything plt.subplots_adjust(left=0.1,right = 0.75) plt.legend(loc=2, fontsize=14, frameon=False, bbox_to_anchor=(1.01, 0.5)) if units in ['flux']: plt.ylabel(r'$F_\nu$ (Jy)', fontsize=14) elif units in ['luminosity']: plt.ylabel(r'$\lambda L_\lambda$ ($L_\odot$)', fontsize=14) else: print('Unknown unit type specified! Defaulting to Jy') plt.ylabel(r'$F_\nu$ (Jy)', fontsize=14) plt.yticks(fontsize=14) plt.tick_params(labelbottom=False) #Add in residuals frame2=fig1.add_axes((.1,.1,.8,.2)) #Include the one-sigma errors in the residuals plt.fill_between(wavelength,residual_lower,residual_upper, facecolor='k', interpolate=True,lw=0.5, edgecolor='none', alpha=0.4) plt.errorbar(obs_wavelength[obs_flag == 0], residuals[obs_flag == 0], yerr=residual_err[obs_flag == 0], c='r', marker='o', markersize=4, ls='none', zorder=99) plt.errorbar(obs_wavelength[obs_flag == 1], residuals[obs_flag == 1], yerr=residual_err[obs_flag == 1], c='r', mfc='white', marker='o', markersize=4, ls='none', zorder=98) plt.axhline(0,ls='--',c='k') plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.xscale('log') plt.xlabel(r'$\lambda$ ($\mu$m)', fontsize=14) plt.ylabel('Residual (%)', fontsize=14) plt.xlim([1,1000]) plt.ylim([-100,100]) plt.savefig('../plots/sed/'+gal_name+'_'+method+'_'+str(components)+'comp.png', bbox_inches='tight', dpi=150) plt.savefig('../plots/sed/'+gal_name+'_'+method+'_'+str(components)+'comp.pdf', bbox_inches='tight')
proj_choice = ccrs.PlateCarree() # Mollweide is the closest to aitoff plt.figure(figsize=(10, 6)) ax = plt.axes(projection=proj_choice ) # PlateCarree and Mercator have functioning gridlines ax.stock_img() plt.title('Plate Carree Projection') gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, zorder=2) gl.xlabels_top = False gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER #plt.plot([ny_lon, delhi_lon], color='gray', transform=ccrs.PlateCarree()) color_cm = iter(cm.viridis(np.linspace(0, 1, len(data_df)))) for i in range(len(data_df)): color_choice = next(color_cm) plt.plot(list(data_df['long'])[i], list(data_df['lat'])[i], 'o', color='red', transform=ccrs.Geodetic(), markersize=2, zorder=3) plt.plot([ list(data_df['city_long_in'])[i], list(data_df['city_long_out'])[i] ], [list(data_df['city_lat_in'])[i],
wlr,eff=np.loadtxt(f,unpack=True) wls.append(wlr) wl=np.concatenate((wl,wlr)) filts.append(eff) ax1.fill(wlr,eff,label=f.split('.')[0],edgecolor="none",color=filtercolor[i]) ax1.axhline(spec,color="black",lw=3,alpha=.5) # ax1.set_xlabel(r"$\lambda$ in $\AA$") ax1.set_ylabel("Throughput") ax1.axes.get_xaxis().set_visible(False) wl=np.sort(wl) corrections=np.empty((len(filters),len(coords))) mags_notred=np.empty(len(filters)) mags_red=np.empty((len(filters),len(coords))) alambdas=[ [[] for _ in coords] for _ in filts] color=cm.viridis(np.linspace(0,1,len(coords))) for i,c in enumerate(coords): C = coord.SkyCoord(str(c[0])+" "+str(c[1]),unit="deg",frame="fk5") table=IrsaDust.get_query_table(C,radius=None) eb_v=table["ext SandF mean"] #print eb_v.data[0] al_plot=f99(wl,eb_v.data[0]*3.1) for j,f in enumerate(filts): alambdas[j][i]=f99(wls[j],eb_v.data[0]*3.1) ax2.plot(wl,al_plot,label=str(c[0])[:6]+" "+str(c[1])[:4],color=color[i]) ax2.set_xlabel(r"$\lambda$ in $\rm \AA$") ax2.set_ylabel("Extinction in magnitudes") ax2.set_ylim([0,0.07]) alambdas=np.array(alambdas) for j,f in enumerate(filts):
# plt.axis([1536,1542,-2,0]) plt.xlabel("Wavelength [nm]") plt.ylabel("Transmission") plt.legend(loc="upper right") plt.show() ############################################################################## if args.Data == 'EO': ## EO modulation performance sheet = xl.parse(5) WL = np.array(sheet[['WL[nm]']]) WL = Range(WL) EDFA = np.array(sheet[['EDFA[W]']]) * 10**9 T_arr = getArray3(101, 851, sheet, args.pos) # T_arr = getArray(41, 851, sheet, args.pos) color = iter(cm.viridis(np.linspace(0, 1, T_arr.shape[0] // 5 + 1))) for ii in range(0, T_arr.shape[0], 5): spec = T_arr[ii] #norm (T_arr[ii], EDFA) spec = spec.reshape((851, 1)) spec = norm(spec, EDFA) plt.plot(WL, spec, c=next(color), label='V' + str(ii // 5), linewidth=2) # plt.axis([1536,1542,-2,0]) plt.xlabel("Wavelength [nm]") plt.ylabel("Transmission") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.show()