def plotFromPath(path, npy_file, y_lim=[0.5, 1]): trans_dict = dict([("accord.npy", u"Indeks Zgodności"), ("neigh.npy", u"Korygowany Współczynnik Przekrywania")]) data_file = open(pathJoin(path, "data.txt")) matrix = np.load(pathJoin(path, npy_file)) if npy_file == "accord.npy": matrix = matrix[:, :20] if npy_file == "neigh.npy": matrix = matrix[:, :190] means = np.mean(matrix, axis=1) errors = np.std(matrix, axis=1) data_dict = {} for line in data_file: parameter, value = line.rstrip("\n").split("\t") data_dict[parameter] = value plt.errorbar(np.arange(len(matrix)), means, yerr=errors) # title_string = ("Algorithm: {algorithm}, n_iter: {n_iter}," # "throw away: {throw_away}").format(**data_dict) # plt.ylim(y_lim) # plt.title(title_string) plt.xlim(-1, len(matrix) + 1) plt.ylabel(trans_dict[npy_file], fontsize=16) plt.xlabel(u"Ilość skupisk", fontsize=16) plt.tick_params(labelsize=14) plt.xticks(np.arange(len(matrix)), eval(data_dict["list_of_cluster_n"]))
def plot_latent_old(self, filename=False): pl.figure() pl.clf() pl.plot(self.training_nodes, self.training_values, "r+", ms=20) # pl.gca().fill_between( # self.test_nodes, self.fstar - self.s, self.fstar + self.s, color="#dddddd" # ) pl.errorbar( self.test_nodes, self.fstar, self.s, barsabove=True, ecolor="black", linewidth=1, capsize=5, fmt="o", ) pl.plot(self.test_nodes, self.fstar, "ro", ms=4) # pl.plot(self.test_nodes, self.fstar, "r--", lw=2) loglikelihood = -self.logPosterior(self.theta, self.training_nodes, self.training_values) # pl.title( # "Latent Process Mean and Variance \n(length scale: %.3f , constant scale: %.3f , noise variance: %.3f )\n Log-Likelihood: %.3f" # % (self.theta[1], self.theta[0], self.theta[2], loglikelihood) # ) pl.title("Latent Process Mean and Variance") pl.xlabel("nodes") pl.ylabel("values") if type(filename) is str: pl.savefig(filename, bbox_inches="tight") # pl.axis([-5, 5, -3, 3]) return self
def plot_best(self, extra=""): b = [ x for x in self.rez['models'] if x['period'] == self.rez['best_period'] ][0] from matplotlib import pylab as plt tt = (self.t / self.rez['best_period']) % 1 s = tt.argsort() x = tt[s] y = self.y[s] z = self.dy[s] if self.mag: mm = where(y == max(y)) else: mm = where(y == min(y)) pmm = x[mm] tt = (((self.t / self.rez['best_period']) % 1.0) - pmm + 0.5) % 1.0 s = tt.argsort() x = tt[s] - 0.5 y = self.y[s] z = self.dy[s] plt.errorbar(x, y, z, fmt='o', c="r") plt.plot(b['phase'], b['f'], c="b") plt.ylim(self.y.max() + 0.05, self.y.min() - 0.05) plt.xlabel("phase") plt.ylabel("flux/mag") plt.title("Best p = %.6f (chi2 = %.3f)" % (self.rez['best_period'], self.rez['best_chi2'])) plt.text(-0.2, self.y.max() - 0.05, "%s" % extra, ha='center', alpha=0.5)
def SNPhot_plotter_filt(obs, gp, filt=b'r'): df = obs[obs.FLT == filt] x = df.MJD.values y = df.FLUXCAL.values dy = df.FLUXCALERR.values colors = {b'g': 'g', b'r': 'r', b'i': 'c', b'z': 'm'} pred = pred_var = 0. if gp is not None: x_pred = np.linspace(x.min() - 100., x.max() + 100., 1000) pred, pred_var = gp.predict(y, x_pred, return_var=True) #pred, pred_cov = gp.predict(y, x_pred, return_cov=True) pred_sig = np.sqrt(np.abs(pred_var)) plt.fill_between(x_pred, pred - 2 * pred_sig, pred + 2 * pred_sig, color=colors[filt], alpha=0.2) plt.plot(x_pred, pred, "k", lw=1.5, alpha=0.5) plt.ylim( np.append(np.append(y - dy * 1.1, [0]), [pred - pred_sig]).min(), np.append(y + dy * 1.1, [pred + pred_sig]).max()) plt.errorbar(x, y, yerr=dy, fmt=".k", capsize=0) plt.xlim(x.min() - 30, x.max() + 30) plt.title(filt) return plt
def plot(self, outf=None, dosave=True, savedir="Plot/", show=True): if outf is None: outf = self.outf # print outf oo = mlab.csv2rec(outf, delimiter=" ") # print oo plt.errorbar(oo["time"] % self.period, oo["magnitude"], oo["error"], fmt="b.") plt.plot(oo["time"] % self.period, oo["model"], "ro") plt.title( "#%i P=%f d (chisq/dof = %f) r1+r2=%f" % (self.dotastro_id, self.period, self.outrez["chisq"], self.outrez.get("r1") + self.outrez.get("r2")) ) ylim = plt.ylim() # print ylim if ylim[0] < ylim[1]: plt.ylim(ylim[1], ylim[0]) plt.draw() if show: plt.show() if dosave: if not os.path.isdir(savedir): os.mkdir(savedir) plt.savefig("%splot%i.png" % (savedir, self.dotastro_id)) # ,self.period)) print("Saved", "%splot%i.png" % (savedir, self.dotastro_id)) # ,self.period) plt.clf()
def main(root): sims = glob(f"{root}/*/algo_stats.json") for sim in sims: N, T, mu, r_hgt, r_indel, r_xpose = params(rm_prefix(sim, root)) # NOTE: this is a temporary hack if not (N == 100 and mu == 3e-4): continue with open(sim) as fd: d = json.load(fd) mus, mean_lens, sdev_lens, mean_deps, sdev_deps = [], [], [], [], [] for i, (key, val) in enumerate(d.items()): mu, beta = coeffs(key) x, y = np.array(val['length']), np.array(val['depth']) mus.append(mu) mean_lens.append(np.mean(x)) sdev_lens.append(np.std(x)) mean_deps.append(np.mean(y)) sdev_deps.append(np.std(y)) mean_lens, sdev_lens, mean_deps, sdev_deps = as_array(mean_lens, sdev_lens, mean_deps, sdev_deps) idx = sorted(range(len(mus)), key= lambda i: mus[i]) mean_lens = mean_lens[idx] sdev_lens = .5*sdev_lens[idx] mean_deps = mean_deps[idx] sdev_deps = .5*sdev_deps[idx] plt.errorbar(mean_lens, mean_deps, xerr=sdev_lens, yerr=sdev_deps) plt.xlabel("length") plt.ylabel("depth") plt.show()
def gp_regression(x, y_ave, y_std, points): # Training set X = np.atleast_2d(x).T y = y_ave dy = y_std noise = np.random.normal(0, dy) y += noise # Test set x = np.atleast_2d(np.linspace(x[0], x[-1], points)).T kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2)) gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y)**2, n_restarts_optimizer=10) gp.fit(X, y) y_pred, sigma = gp.predict(x, return_std=True) if 0: fig = plt.figure() plt.errorbar(X, y, dy, fmt='r.', markersize=10, label=u'Observations') plt.plot(x, y_pred, 'b-', label=u'Prediction') plt.fill(np.concatenate([x, x[::-1]]), np.concatenate([ y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1] ]), alpha=.5, fc='b', ec='None', label='95% confidence interval') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.ylim(-1, 1) plt.legend(loc='upper left') return y_pred, sigma
def plot_hypothesis5(): weights = [ 158.0, 164.2, 160.3, 159.9, 162.1, 164.6, 169.6, 167.4, 166.4, 171.0, 171.2, 172.6 ] xs = range(1, len(weights) + 1) line = np.poly1d(np.polyfit(xs, weights, 1)) with figsize(y=2.5): plt.figure() plt.errorbar(range(1, 13), weights, label='weights', yerr=5, fmt='o', capthick=2, capsize=10) plt.plot(xs, line(xs), c='r', label='hypothesis') plt.xlim(0, 13) plt.ylim(145, 185) plt.xlabel('day') plt.ylabel('weight (lbs)') book_plots.show_legend() plt.grid(False)
def errorbar_plot(data, x_spec, y_spec, fname): """ Dynamically create errorbar plot """ x_label, x_func = x_spec y_label, y_func = y_spec # compute data points = collections.defaultdict(list) for syst, mat, _ in data: x_value = x_func(syst, mat) y_value = y_func(syst, mat) if x_value is None or y_value is None: continue points[x_value].append(y_value) # plot figure densities = [] averages = [] errbars = [] for dens, avgs in points.items(): densities.append(dens) averages.append(np.mean(avgs)) errbars.append(np.std(avgs)) plt.errorbar( densities, averages, yerr=errbars, fmt='o', clip_on=False) plt.title('') plt.xlabel(x_label) plt.ylabel(y_label) plt.tight_layout() save_figure('images/%s' % fname, bbox_inches='tight') plt.close()
def choose_gamma_knn(X, y, range_gamma, plot_color): '''Implement 5 fold cv to determine optimal gamma''' #Param setup kf = KFold(n_splits = 5) mean_error=[]; std_error=[]; for gammaX in range_gamma: #Params mse_temp = [] #Set up gaussian kernel function def gaussian_kernel(distances): weights = np.exp(-gammaX*(distances**2)) return weights/np.sum(weights) for train, test in kf.split(X): #Model model = KNeighborsRegressor(n_neighbors= len(train), weights=gaussian_kernel) model.fit(X[train], y[train]) ypred = model.predict(X[test]) mse = mean_squared_error(y[test], ypred) mse_temp.append(mse) #Get mean & variance mean_error.append(np.array(mse_temp).mean()) std_error.append(np.array(mse_temp).std()) #Plot fig = plt.figure(figsize=(15,12)) plt.errorbar(range_gamma, mean_error, yerr=std_error, color = plot_color) plt.xlabel('gamma') plt.ylabel('Mean square error') plt.title('Choice of gamma in kernelised knn - 5 fold CV') plt.show()
def compareMassRatioVsRedshift(his, hiserr, mine, myerr, hisfield, hisid, myfield, myid, z): import matplotlib.pylab as plt a = [] b = [] c = [] d = [] e = [] z_list = [] for i in range(0, np.shape(mine)[0]): if mine[i] != 0: #his_id_idx = np.where(hisid == myid[i])[0] pos_id_idx = np.where(hisid == myid[i])[0] if len(pos_id_idx) == 1: his_id_idx = int(pos_id_idx) else: pos_field_idx = np.where(hisfield == myfield[i])[0] for ii in range(0, len(pos_field_idx)): for iii in range(0, len(pos_id_idx)): if pos_field_idx[ii] == pos_id_idx[iii]: his_id_idx = int(pos_id_idx[iii]) #print str(myid[i]), str(myfield[i]), str(hisid[his_id_idx]), str(hisfield[his_id_idx]) assert myfield[i] == hisfield[his_id_idx] assert myid[i] == hisid[his_id_idx] if his[his_id_idx] != -999: a.append(his[his_id_idx]) b.append(hiserr[his_id_idx]) c.append(mine[i])#-np.log10(((1.0+z[i])))) d.append(myerr[i,0])#-np.log10(((1.0+z[i])))) e.append(myerr[i,1])#-np.log10(1.0+z[i])) z_list.append(z[i]) # since errors are just percentile, need difference from median d = np.asarray(c)-np.asarray(d) e = np.asarray(e)-np.asarray(c) c_a = 10**np.array(c) a_a = 10**np.array(a) ratio = c_a/a_a plt.errorbar(z_list, ratio, fmt='o', color='b', capsize=0, alpha=0.50) # plot the y=x line x = np.linspace(np.min(z_list), np.max(z_list), 10) plt.plot(x, np.ones(len(x)), 'k--') plt.yscale('log') plt.xlabel("Redshift") plt.ylabel("MCSED / Alex's [note: ratio of actual masses]") plt.title("Redshift vs Mass Ratio (MCSED/Alex)") #plt.legend(['With Neb. Emis.'],loc=0)#, 'W/o Neb. Emis'], loc=0) plt.show()
def main(): # epochs == 100 bz = np.array( [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384]) sc = np.array([ 0.97174, 0.96976, 0.96876, 0.96734, 0.96596, 0.96332, 0.95684, 0.94564, 0.93312, 0.91788, 0.90046, 0.87318, 0.82926, 0.76104 ]) ec = np.array([ 0.00006, 0.00034, 0.00007, 0.00012, 0.00011, 0.00040, 0.00042, 0.00040, 0.00038, 0.00040, 0.00078, 0.00086, 0.00203, 0.00591 ]) # minibatches = 8192 sc0 = np.array([ 0.93658, 0.94556, 0.94856, 0.94916, 0.95012, 0.94946, 0.95068, 0.95038, 0.95112, 0.95030, 0.95066, 0.95028, 0.94992, 0.94994 ]) ec0 = np.array([ 0.00214, 0.00078, 0.00070, 0.00115, 0.00025, 0.00028, 0.00053, 0.00041, 0.00045, 0.00023, 0.00032, 0.00058, 0.00044, 0.00022 ]) plt.errorbar(bz, sc, ec, marker='o', color='red') plt.errorbar(bz, sc0, ec0, marker='s', color='blue') plt.xlabel("Minibatch Size") plt.ylabel("Mean Score") plt.tight_layout() plt.savefig("mnist_nn_experiments_batch_size_plot.pdf", format="pdf", dpi=600) plt.show()
def choose_alpha_ridge(X, y, range_C, gammaX, plot_color): '''Implement 5 fold cv to determine optimal gamma''' #Param setup kf = KFold(n_splits = 5) mean_error=[]; std_error=[]; for C in range_C: #Params mse_temp = [] #Model model = KernelRidge(alpha= 1.0/(2*C), kernel= 'rbf', gamma=gammaX) #5 fold CV for train, test in kf.split(X): #Model model.fit(X[train], y[train]) ypred = model.predict(X[test]) mse = mean_squared_error(y[test], ypred) mse_temp.append(mse) #Get mean & variance mean_error.append(np.array(mse_temp).mean()) std_error.append(np.array(mse_temp).std()) #Plot fig = plt.figure(figsize=(15,12)) plt.errorbar(range_C, mean_error, yerr=std_error, color = plot_color) plt.xlabel('C') plt.ylabel('Mean square error') plt.title('Choice of C in kernelised Ridge Regression - 5 fold CV, gamma = {}'.format(gammaX)) plt.show()
def transits(pipe,mode='transit-times'): _transits = pipe.transits if mode=='transit-times': y = _transits.omc yerr = _transits.ut0 ylabel = 'O - C' if mode=='transit-rp': y = _transits.rp yerr = _transits.urp ylabel = 'Rp/Rstar' x = _transits.transit_id xl = x.min() -1, x.max() + 1 xlabel = 'transit-id' yspan = y.ptp() yl = y.min() - 1 * yspan, y.max() + 1 * yspan plt.errorbar(x, y, yerr=yerr, fmt='o', ms=5, mew=0, capsize=0) plt.xlim(*xl) plt.ylim(*yl) plt.xlabel(xlabel) plt.ylabel(ylabel) AddAnchored(mode,prop=tprop,frameon=True,loc=2)
def extractEffectiveMass(df,cut_low=0,cut_right=None,n_cuts=5,makePlot=False): if cut_right==None: cut_right=max(df["t"]) window=(cut_right-cut_low)/n_cuts slope=np.zeros(n_cuts) intercept=np.zeros(n_cuts) slopeError=np.zeros(n_cuts) interceptError=np.zeros(n_cuts) for i in range(0,n_cuts): # select the right intervals for the linear fit cut_low_current=cut_low + i*window cut_high_current=cut_low + (i+1)*window df1=df[(df["t"]>cut_low_current) & (df["t"]<cut_high_current) ] if len(df1["t"]) <= 3: raise not_enough_data() params,covs=curve_fit(linear,df1["t"],df1["W"],sigma=df1["deltaW"],maxfev=100000) slope[i]=params[0] intercept[i]=params[1] slopeError[i]=sqrt(covs[0][0]) interceptError[i]=sqrt(covs[1][1]) if makePlot==True: up=(slope[i]+slopeError[i])+(intercept[i]+interceptError[i])/df1["t"] down=(slope[i]-slopeError[i])+(intercept[i]-interceptError[i])/df1["t"] plt.fill_between(df1["t"],up,down,alpha=0.4) plt.errorbar(np.array(df1["t"]),np.array(df1["W"])/np.array(df1["t"]),np.array(df1["deltaW"])/np.array(df1["t"]),fmt="or") return np.array([slope.mean(),sqrt( slopeError.mean()**2 + slope.var())])
def parse_error(d): if not np.isfinite(d.fp_local): return f = ais_code.fp_to_ne(d.fp_local) f0 = ais_code.fp_to_ne(d.fp_local + d.fp_local_error) f1 = ais_code.fp_to_ne(d.fp_local - d.fp_local_error) if errors: plt.plot((d.time, d.time),(f0,f1), color='lightgrey', linestyle='solid', marker='None', zorder=-1000,**kwargs) plt.plot(d.time, f, fmt, ms=self.marker_size, zorder=1000, **kwargs) if full_marsis and hasattr(d, 'maximum_fp_local'): plt.plot(d.time, ais_code.fp_to_ne(d.maximum_fp_local), 'b.', ms=self.marker_size, zorder=900, **kwargs) if full_marsis: if np.isfinite(d.morphology_fp_local): v, e = ais_code.fp_to_ne(d.morphology_fp_local, d.morphology_fp_local_error) plt.errorbar(float(d.time), v, yerr=e, marker='x', ms=1.3, color='purple', zorder=1e90, capsize=0., ecolor='plum') if np.isfinite(d.integrated_fp_local): v, e = ais_code.fp_to_ne(d.integrated_fp_local, d.integrated_fp_local_error) plt.errorbar(float(d.time), v, yerr=e, marker='x', ms=1.3, color='blue', zorder=1e99, capsize=0., ecolor='cyan')
def plot_source_radius(cat): mw = [] mw_std = [] source_radius = [] source_radius_std = [] plt.figure(figsize=(10, 4.5)) # Read the source radius. for event in cat: mag = event.magnitudes[1] if len(mag.comments) != 2: continue mw.append(mag.mag) mw_std.append(mag.mag_errors.uncertainty) sr, std = mag.comments[1].text.split(";") _, sr = sr.split("=") _, std = std.split("=") sr = float(sr[:-1]) std = float(std) source_radius.append(sr) source_radius_std.append(std) plt.errorbar(mw, source_radius, yerr=source_radius_std, fmt="o", linestyle="None") plt.xlabel("Mw", fontsize="x-large") plt.ylabel("Source Radius [m]", fontsize="x-large") plt.grid() plt.savefig("/Users/lion/Desktop/SourceRadius.pdf")
def plotting_multiple_files(dict_of_files, title='Title', check='F'): linestyles = {'fil': '--', 'raw': '-', 'vic': '-'} for reps in sorted(list(dict_of_files)): Sample_Type = reps.split(' ')[0] if not isinstance(dict_of_files[reps], pd.DataFrame): df = pd.read_csv(dict_of_files[reps], skiprows=0, delimiter='\t') else: df = dict_of_files[reps] df.reset_index(inplace=True, drop=False) df.rename(columns={ 'c_mean': 'mean', 'a_mean': 'mean', 'c_std': 'std', 'a_std': 'std' }, inplace=True) plt.plot('wl', 'mean', data=df, label=reps, linestyle=linestyles[Sample_Type]) plt.errorbar('wl', 'mean', yerr='std', fmt='k-', linewidth=0.5, data=df) plt.ylabel('[1/m]') plt.xlabel('Wavelength (nm)') plt.title(title) plt.legend(sorted(list(dict_of_files))) return plt
def check_model(self): gamma11, M11, N11 = map( lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(self.samples, [16, 50, 84], axis=0))) means = [gamma11[0], M11[0], N11[0]] ups = [gamma11[1], M11[1], N11[1]] downs = [gamma11[2], M11[2], N11[2]] print(means, ups, downs) dict_obs = self.load() dict_SMHM = dict(gamma10=0.57, gamma11= gamma11[0], beta10=None, beta11=None,\ M10=11.95, SHMnorm10=None, M11=M11[0], SHMnorm11=N11[0]) maker = make_satellites_only(use_peak=True, use_Multidark=False) df_sat = maker.make_sat(dict_SMHM=dict_SMHM, Mstar_low=11.2, Mstar_up=12, mu=2.5, AK=0.013, sigmaK=0.1, M0=1.5) bins = np.append(dict_obs['rbins'] - dict_obs['rbinwidth'] / 2, dict_obs['rbins'][-1] + dict_obs['rbinwidth'] / 2) #restore original bins Vol = (250 / 0.7)**3 model = np.histogram(df_sat['Re_sat'], bins=bins)[0] / Vol / dict_obs['rbinwidth'] plt.errorbar(dict_obs['rbins'], dict_obs['phi'], yerr=dict_obs['err'], label='SDSS sat', markersize=15, color='navy', fmt='v', zorder=1) plt.plot(dict_obs['rbins'], model, lw=4, label='satellites, MCMC', color='navy', ls='-', zorder=2) plt.yscale('log') plt.ylabel('$\phi(R_e)$') plt.xlabel('$\log{R_e} \ [kpc]$') plt.xlim(0.25, 2.5) plt.ylim(3.e-9, 5.e-4) #plt.ylim(5.e-9,1.e-4) plt.legend(frameon=False, fontsize=30) plt.title('Evolving SMHM, MQGs') plt.savefig('./Pictures/modelMCMC.pdf', bbox_inches='tight') plt.close()
def plot(self, outf=None, dosave=True, savedir="Plot/", show=True): if outf is None: outf = self.outf #print outf oo = mlab.csv2rec(outf, delimiter=" ") #print oo plt.errorbar(oo['time'] % self.period, oo['magnitude'], oo['error'], fmt="b.") plt.plot(oo['time'] % self.period, oo['model'], "ro") plt.title("#%i P=%f d (chisq/dof = %f) r1+r2=%f" % (self.dotastro_id,self.period,self.outrez['chisq'],\ self.outrez.get('r1') + self.outrez.get('r2'))) ylim = plt.ylim() #print ylim if ylim[0] < ylim[1]: plt.ylim(ylim[1], ylim[0]) plt.draw() if show: plt.show() if dosave: if not os.path.isdir(savedir): os.mkdir(savedir) plt.savefig("%splot%i.png" % (savedir, self.dotastro_id)) #,self.period)) print "Saved", "%splot%i.png" % (savedir, self.dotastro_id ) #,self.period) plt.clf()
def get_fitting_isochrone(self, isochrone_fitter, ax=None, spectrum_ids=[0,1]): combined_stellar_params = self.get_combined_stellar_parameters( spectrum_ids=spectrum_ids) (age, metallicity), closest_isochrone = \ isochrone_fitter.find_closest_isochrone( teff=combined_stellar_params['teff'], logg=combined_stellar_params['logg'], feh=combined_stellar_params['feh']) if ax is None: return closest_isochrone else: ax.plot(closest_isochrone['teff'], closest_isochrone['logg']) ax.errorbar([combined_stellar_params['teff']], [combined_stellar_params['logg']], xerr=[combined_stellar_params['teff_uncertainty']], yerr=[combined_stellar_params['logg_uncertainty']], label='Teff={0:.2f}+-{1:.2f}\n logg={2:.2f}+-{3:.2f}' .format(combined_stellar_params['teff'], combined_stellar_params['teff_uncertainty'], combined_stellar_params['logg'], combined_stellar_params['logg_uncertainty'])) ax.set_title('Age = {0:.2g} Gyr [Fe/H] = {1:.2g}'.format(age, metallicity)) ax.invert_xaxis() ax.invert_yaxis()
def do_plot_obs(self): """ Plot the observed radial velocities as a function of time. Data from each file are color coded and labeled. """ # import pyqtgraph as pg colors = 'bgrcmykw' # lets hope for less than 9 data-sets t, rv, err = self.time, self.vrad, self.error # temporaries plt.figure() # p = pg.plot() # plot each files' values for i, (fname, [n, nout]) in enumerate(sorted(self.provenance.iteritems())): m = n-nout # how many values are there after restriction # e = pg.ErrorBarItem(x=t[:m], y=rv[:m], \ # height=err[:m], beam=0.5,\ # pen=pg.mkPen(None)) # pen={'color': 0.8, 'width': 2}) # p.addItem(e) # p.plot(t[:m], rv[:m], symbol='o') plt.errorbar(t[:m], rv[:m], yerr=err[:m], \ fmt='o'+colors[i], label=fname) t, rv, err = t[m:], rv[m:], err[m:] plt.xlabel('Time [days]') plt.ylabel('RV [km/s]') plt.legend() plt.tight_layout() plt.show()
def display_vecstascollector_summary(stcol): n = stcol.getStat("N[0]") print n mean = stcol.getMean() stddev = stcol.getStdDev() ucov = (1.0 / n) * stcol.getXtX() cov = stcol.getCovariance() corr = stcol.getCorrelation() pxy_px_py = ucov - outer(mean, mean) plt.subplot(4, 2, 1) plt.errorbar(arange(len(mean)), mean, yerr=stddev) plt.title("activations mean and stddev") plt.subplot(4, 2, 2) plot_histogram(mean, "activations mean") plt.subplot(4, 2, 3) plot_offdiag_histogram(ucov, "uncentered covariances") plt.subplot(4, 2, 4) plot_diag_histogram(ucov, "uncentered variances") plt.subplot(4, 2, 5) plot_offdiag_histogram(cov, "covariances") plt.subplot(4, 2, 6) plot_diag_histogram(cov, "variances") plt.subplot(4, 2, 7) plot_offdiag_histogram(corr, "correlations") plt.subplot(4, 2, 8) plot_histogram(stddev, "stddevs") plt.show()
def plot(scores, scores2=None): import matplotlib.pylab as pl from matplotlib.ticker import FuncFormatter def percentages(x, pos=0): return "%2.2f%%" % (100 * x) ax1 = pl.subplot(211) pl.errorbar(scores2[:, 1], scores2[:, 2], yerr=scores2[:, 5], c="k", marker="o") # if scores2 is not None: # pl.errorbar(scores2[:, 1] + 0.02, scores2[:, 2], yerr=scores2[:, 5], # c='0.5', marker='s') pl.ylabel("Singular acc.") ax1.yaxis.set_major_formatter(FuncFormatter(percentages)) pl.xlabel("Proportion of training set used") ax2 = pl.subplot(212, sharex=ax1) pl.errorbar(scores[:, 1], scores[:, 3], yerr=scores[:, 6], c="k", marker="o") if scores2 is not None: pl.errorbar(scores2[:, 1], scores2[:, 3], yerr=scores2[:, 6], c="k", marker="s") ax2.yaxis.set_major_formatter(FuncFormatter(percentages)) # ax3 = pl.subplot(313, sharex=ax2) pl.errorbar(scores[:, 1] + 0.02, scores[:, 4], yerr=scores[:, 7], c="0.5", marker="o") if scores2 is not None: pl.errorbar(scores2[:, 1] + 0.02, scores2[:, 4], yerr=scores2[:, 7], c="0.5", marker="s") pl.ylabel("Plural and combined acc.") # ax3.yaxis.set_major_formatter(FuncFormatter(percentages)) # pl.setp(ax3.get_xticklabels(), visible=False) # pl.show() for ext in ("pdf", "svg", "png"): pl.savefig("train_size-i.%s" % ext)
def display_vecstascollector_summary(stcol): n = stcol.getStat("N[0]") print n mean = stcol.getMean() stddev = stcol.getStdDev() ucov = (1.0/n)*stcol.getXtX() cov = stcol.getCovariance() corr = stcol.getCorrelation() pxy_px_py = ucov-outer(mean,mean) plt.subplot(4,2,1) plt.errorbar(arange(len(mean)),mean,yerr=stddev) plt.title("activations mean and stddev") plt.subplot(4,2,2) plot_histogram(mean, "activations mean") plt.subplot(4,2,3) plot_offdiag_histogram(ucov, "uncentered covariances") plt.subplot(4,2,4) plot_diag_histogram(ucov, "uncentered variances") plt.subplot(4,2,5) plot_offdiag_histogram(cov, "covariances") plt.subplot(4,2,6) plot_diag_histogram(cov, "variances") plt.subplot(4,2,7) plot_offdiag_histogram(corr, "correlations") plt.subplot(4,2,8) plot_histogram(stddev, "stddevs") plt.show()
def plot_avg(measure_type, tofile=None): fig,ax = plt.subplots() for name in system_names: Y = [] syst, fmt = systems[name] for op in op_types: if measure_type == 'latency': latencies = get_datapoints_latency(op) Y_op, _ = latencies[name] elif measure_type == 'throughput': throughputs = get_datapoints_throughput(op) Y_op = throughputs[name] Y.append(Y_op) Y = np.mean(Y, axis=0) plt.errorbar(X,Y,fmt=fmt,label=name) ax.set_xticks(X) ax.set_xlabel('Number of concurrent nodes.') ax.set_xlim([0,17]) if measure_type == 'throughput': ax.set_ylabel('Average throughput in KOps per second.') elif measure_type == 'latency': ax.set_ylabel('Average latency in ms.') lgd = plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode="expand", borderaxespad=0.) plt.grid(True) if tofile is not None: plt.savefig(tofile, bbox_extra_artists=(lgd,), bbox_inches='tight') else: plt.show()
def plot_latent(self, filename=False): pl.figure(figsize=[15, 9]) pl.clf() errorbar_df = self.df.iloc[list( self.test_nodes)].sort_values(by=["pvtdist"]) pl.errorbar( errorbar_df["pvtdist"].values, errorbar_df["fstar"].values, errorbar_df["variance_s"].values, barsabove=True, ecolor="black", linewidth=1, capsize=5, fmt="o", ) plot_df = self.df.iloc[list( self.training_nodes)].sort_values(by=["pvtdist"]) pl.plot(plot_df["pvtdist"].values, plot_df["train_vals"].values, "r+", ms=20) if self.pivot_flag == True: pvt_dist_df = self.df.sort_values(by=["pvtdist"]) pl.plot(pvt_dist_df["pvtdist"].values, pvt_dist_df["pvtdist"].values) pl.title("Latent Process Mean and Variance") pl.xlabel("pivot distance") pl.ylabel("values") if type(filename) is str: pl.savefig(filename, bbox_inches="tight") # pl.axis([-5, 5, -3, 3]) return self
def errorbar_plot(data, x_spec, y_spec, fname): """ Dynamically create errorbar plot """ x_label, x_func = x_spec y_label, y_func = y_spec # compute data points = collections.defaultdict(list) for syst, mat, _ in data: x_value = x_func(syst, mat) y_value = y_func(syst, mat) if x_value is None or y_value is None: continue points[x_value].append(y_value) # plot figure densities = [] averages = [] errbars = [] for dens, avgs in points.items(): densities.append(dens) averages.append(np.mean(avgs)) errbars.append(np.std(avgs)) plt.errorbar(densities, averages, yerr=errbars, fmt='o', clip_on=False) plt.title('') plt.xlabel(x_label) plt.ylabel(y_label) plt.tight_layout() save_figure('images/%s' % fname, bbox_inches='tight') plt.close()
def plot_runs(runs): """ Plot population evolutions """ ts = range(len(runs[0])) cmap = plt.get_cmap('viridis') for i, r in enumerate(runs): mean, var = zip(*r) bm, cm = zip(*mean) bv, cv = zip(*var) color = cmap(float(i)/len(runs)) plt.errorbar(ts, bm, fmt='-', yerr=bv, c=color) plt.errorbar(ts, cm, fmt='--', yerr=cv, c=color) plt.title('population evolution overview') plt.xlabel('time') plt.ylabel('value') plt.ylim((0, 1)) plt.plot(0, 0, '-', c='black', label='benefit value') plt.plot(0, 0, '--', c='black', label='cost value') plt.legend(loc='best') plt.savefig('result.pdf') plt.show()
def plot_energy_curve(y_min, y_eb, mit_min, mit_eb, error_mit=False): fig = plt.figure(0) plt.plot(r_values, energies) # Theory curve error_labels = ['Total duration = 1,700 ns', 'Total duration = 2,740 ns'] for j in range(len(y_min)): plt.errorbar(r_values, y_min[j], marker='o', markersize=1, linestyle='None', yerr=y_eb[j], label=error_labels[j]) if error_mit: plt.errorbar(r_values, mit_min, marker='o', markersize=1, linestyle='None', yerr=mit_eb, label="Error mitigated result") plt.legend() plt.xlabel(r'Bond distance (Angstrom)', fontsize=20) plt.ylabel(r'Total Energy (Hatree)', fontsize=20) plt.show()
def main(): parser = OptionParser(description='Fitting to a noisy data generated by a known function') parser.add_option("--npoints", type="int", help="number of data points") parser.add_option("--low", type="float", help="smallest data point") parser.add_option("--high", type="float", help="highest data point") parser.add_option("--sigma", type="float", help="std of noise") (options, args) = parser.parse_args() pl.figure(1,(7,6)) ax = pl.subplot(1,1,1) pl.connect('key_press_event',kevent.press) sigma = options.sigma Ls = np.append(np.linspace(options.low,options.high,options.npoints),46) nLs = np.linspace(min(Ls),max(Ls),100) Mis = HalfLog(Ls,.5,0.5) errs = np.random.normal(0,sigma, len(Mis)) Mis = Mis+errs pl.errorbar(Ls,Mis,errs,ls='',marker='s',color='b') print sigma/Mis coeff, var_matrix = curve_fit(FreeLog,Ls,Mis,(1.0,1.0,1.0)) err = np.sqrt(np.diagonal(var_matrix)) dof = len(Ls) - len(coeff) chisq = sum(((Mis-FreeLog(Ls,coeff[0],coeff[1],coeff[2]))/sigma)**2) cdf = special.chdtrc(dof,chisq) print 'Free: a = %0.2f(%0.2f); b = %0.2f(%0.2f); c = %0.2f(%0.2f); p-value = %0.2f ' %(coeff[0],err[0],coeff[1],err[1],coeff[2],err[2],cdf) pl.plot(nLs,FreeLog(nLs,coeff[0],coeff[1],coeff[2]),label='Free',color='y') coeff, var_matrix = curve_fit(ZeroLog,Ls,Mis,(1.0,1.0)) err = np.sqrt(np.diagonal(var_matrix)) dof = len(Ls) - len(coeff) chisq = sum(((Mis-ZeroLog(Ls,coeff[0],coeff[1]))/sigma)**2) cdf = special.chdtrc(dof,chisq) print 'Zero: a = %0.2f(%0.2f); c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf) pl.plot(nLs,ZeroLog(nLs,coeff[0],coeff[1]),label='Zero',color='g') pl.tight_layout() coeff, var_matrix = curve_fit(HalfLog,Ls,Mis,(1.0,1.0)) err = np.sqrt(np.diagonal(var_matrix)) dof = len(Ls) - len(coeff) chisq = sum(((Mis-HalfLog(Ls,coeff[0],coeff[1]))/sigma)**2) cdf = special.chdtrc(dof,chisq) print 'Half: a = %0.2f(%0.2f); c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf) pl.plot(nLs,HalfLog(nLs,coeff[0],coeff[1]),label='Half',color='b') pl.tight_layout() coeff, var_matrix = curve_fit(OneLog,Ls,Mis,(1.0,1.0)) err = np.sqrt(np.diagonal(var_matrix)) dof = len(Ls) - len(coeff) chisq = sum(((Mis-OneLog(Ls,coeff[0],coeff[1]))/sigma)**2) cdf = special.chdtrc(dof,chisq) print 'Unity: a = %0.2f(%0.2f); c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf) pl.plot(nLs,OneLog(nLs,coeff[0],coeff[1]),label='Unity',color='r') pl.tight_layout() pl.legend() pl.show()
def stellar_distribution_plot(frame, fit): # Plot the data and model plt.errorbar(frame["bins"], frame["bin_count"], xerr=frame["bins_error"], yerr=frame["count_error"], fmt="o", ms=0.5, color="black", ecolor="grey", elinewidth=0.25, label="Gaia DR2", zorder=1) # Density distribution model summation_s_0 = frame["bin_count"].values.max() l_c, r_b = 3, 2.5 xb = np.linspace(0, 16, 50) yb = (summation_s_0 * l_c)/np.sqrt(((xb - r_b) ** 2) + (l_c ** 2)) xp = np.array([frame['bins'].iloc[0], frame['bins'].iloc[14], frame['bins'].iloc[-1]]) yp = np.array([frame['bin_count'].iloc[0], frame['bin_count'].iloc[14], frame['bin_count'].iloc[-1]]) params = fix_points(fit, xb, yb, xp, yp) ppoly = np.polynomial.Polynomial(params) pdata = np.linspace(0, 16, 50) plt.plot(pdata, ppoly(pdata), ms=1, color="blue", label="Best fit density distribution model", zorder=2) # Plot characteristics plt.title("Stellar Density Distribution") plt.legend(loc="upper right") plt.xlabel("Galactic radius ($kpc$)") plt.ylabel("Density distribution ($arbitrary$ $units$)") plt.xlim(0, 16) # Adjustment for galactic radius plt.ylim(0, frame["bin_count"].values.max() + (1/10 * frame["bin_count"].values.max())) plt.show() # Stellar disc model against observational data (Gaia) return
def nova_plot(): erg2mev=624151. fig=plot.figure() yrange = [1e-6,2e-4] xrange = [1e-1,1e5] plot.fill_between([0.2,10e3],[yrange[1],yrange[1]],[yrange[0],yrange[0]],facecolor='yellow',interpolate=True,color='yellow',alpha=0.5) plot.annotate('AMEGO',xy=(3,9e-5),xycoords='data',fontsize=26,color='black') lat=ascii.read("data/NMon2012.LAT.dat",names=['energy','en_low','en_high','flux','flux_err','tmp']) plot.scatter(lat['energy'],lat['flux']*erg2mev,color='red') plot.errorbar(lat['energy'],lat['flux']*erg2mev,xerr=[lat['en_low'],lat['en_high']],yerr=lat['flux_err']*erg2mev,ecolor='red',capsize=0,fmt='none') latul=ascii.read("data/NMon2012.LAT.limits.dat",names=['energy','en_low','en_high','flux','tmp1','tmp2','tmp3','tmp4']) plot.errorbar(latul['energy'],latul['flux']*erg2mev,xerr=[latul['en_low'],latul['en_high']],yerr=0.5*latul['flux']*erg2mev,uplims=True,ecolor='red',capsize=0,fmt='none') plot.scatter(latul['energy'],latul['flux']*erg2mev,color='red') leptonic=ascii.read("data/sp-NMon12-IC-best-fit-1MeV-30GeV.txt",names=['energy','flux'],data_start=1) hadronic=ascii.read("data/sp-NMon12-pi0-and-secondaries.txt",names=['energy','flux1','flux2'],data_start=1) plot.plot(leptonic['energy'],leptonic['flux']*erg2mev,'r--',color='black',lw=2,label='Leptonic') plot.plot(hadronic['energy'],hadronic['flux2']*erg2mev,color='black',lw=2,label='Hadronic+Secondary Leptons') plot.legend(loc='upper right',fontsize='small',frameon=False,framealpha=0.5) plot.xscale('log') plot.yscale('log') plot.ylim(yrange) plot.xlim(xrange) plot.xlabel(r'Energy (MeV)') plot.ylabel(r'Energy$^2 \times $ Flux (Energy) (erg cm$^{-2}$ s$^{-1}$)') plot.title('Nova V339 Del 2013') plot.savefig('Nova_SED.png', bbox_inches='tight') plot.savefig('Nova_SED.eps', bbox_inches='tight') plot.show() plot.close()
def process_mmd_experiment(width_class): results_file_name = mmd_experiment.results_file_stub + "_" + width_class + ".pickle" results = pickle.load( open(results_file_name,'rb' ) ) callibration_mmds = np.loadtxt('results/callibration_mmds.csv') mean_callibration = np.mean(callibration_mmds) mmd_squareds = results['mmd_squareds'] hidden_layer_numbers = results['hidden_layer_numbers'] hidden_unit_numbers = results['hidden_unit_numbers'] num_repeats = mmd_squareds.shape[2] mean_mmds = np.mean( mmd_squareds, axis = 2 ) std_mmds = np.std( mmd_squareds, axis = 2 ) / np.sqrt(num_repeats) plt.figure() for hidden_layer_number, index in zip(hidden_layer_numbers,range(len(hidden_layer_numbers))): if hidden_layer_number==1: layer_string = ' hidden layer' else: layer_string = ' hidden layers' line_name = str(hidden_layer_number) + layer_string plt.errorbar( hidden_unit_numbers, mean_mmds[:,index], yerr = 2.*std_mmds[:,index], label = line_name) plt.xlabel('Number of hidden units per layer') plt.xlim([0,60]) plt.ylabel('MMD SQUARED(GP, NN)') plt.ylim([0.,0.02]) plt.axhline(y=mean_callibration, color='r', linestyle='--') plt.legend() output_file_name = "../figures/mmds_" + width_class + ".pdf" plt.savefig(output_file_name) embed() plt.show()
def sanity_2dCircularFit(self): import numpy as np import matplotlib.pylab as plt from PyAstronomy import funcFit as fuf # Get the circular model and assign # parameter values c = fuf.Circle2d() c["r"] = 1.0 c["t0"] = 0.0 c["per"] = 3.0 # Evaluate the model at a number of # time stamps t = np.linspace(0.0, 10.0, 20) pos = c.evaluate(t) # Add some error to the "measurement" pos += np.reshape(np.random.normal(0.0, 0.2, pos.size), pos.shape) err = np.reshape(np.ones(pos.size), pos.shape) * 0.2 # Define free parameters and fit the model c.thaw(["r", "t0", "per"]) c.fit(t, pos, yerr=err) c.parameterSummary() # Evaluate the model at a larger number of # points for plotting tt = np.linspace(0.0, 10.0, 200) model = c.evaluate(tt) # Plot the result plt.errorbar(pos[::,0], pos[::,1], yerr=err[::,1], \ xerr=err[::,0], fmt='bp') plt.plot(model[::,0], model[::,1], 'r--')
def plotRocCurves(file_legend): pylab.clf() pylab.figure(1) pylab.xlabel('1 - Specificity', fontsize=12) pylab.ylabel('Sensitivity', fontsize=12) pylab.title("Need for Referral") pylab.grid(True, which='both') pylab.xticks([i/10.0 for i in range(1,11)]) pylab.yticks([i/10.0 for i in range(0,11)]) pylab.tick_params(axis="both", labelsize=15) for file, legend in file_legend: points = open(file,"rb").readlines() x = [float(p.split()[0]) for p in points] y = [float(p.split()[1]) for p in points] dev = [float(p.split()[2]) for p in points] x = [0.0] + x y = [0.0] + y dev = [0.0] + dev auc = np.trapz(y, x) * 100 aucDev = np.trapz(dev, x) * 100 pylab.grid() pylab.errorbar(x, y, yerr = dev, fmt='-') pylab.plot(x, y, '-', linewidth = 1.5, label = legend + u" (AUC = {0:0.1f}% \xb1 {1:0.1f}%)".format(auc,aucDev)) pylab.legend(loc = 4, borderaxespad=0.4, prop={'size':12}) pylab.savefig("referral/referral-curves.pdf", format='pdf')
def sanity_example_binningx0dt_example2(self): """ Checking `binningx0dt` example 2. """ import numpy as np import matplotlib.pylab as plt from PyAstronomy.pyasl import binningx0dt # Generate some data x = np.arange(-100,999) # Create some holes in the data x = np.delete(x, range(340,490)) x = np.delete(x, range(670,685)) x = np.delete(x, range(771,779)) y = np.sin(x/100.) y += np.random.normal(0,0.1,len(x)) # Bin using bin width of 27 and starting at minimum x-value. # Use beginning of bin as starting value. r1, dt1 = binningx0dt(x, y, dt=27, x0=min(x), useBinCenter=True) # As previously, but use the mean x-value in the bins to produce the # rebinned time axis. r2, dt2 = binningx0dt(x, y, dt=27, x0=min(x), useMeanX=True) print "Median shift between the time axes: ", np.median(r1[::,0] - r2[::,0]) print " -> Time bins are not aligned due to 'forced' positioning of" print " the first axis." # Plot the output plt.plot(x,y, 'b.-') plt.errorbar(r1[::,0], r1[::,1], yerr=r1[::,2], fmt='kp--') plt.errorbar(r2[::,0], r2[::,1], yerr=r2[::,2], fmt='rp--')
def method_errorbar(data,xlabels, line_color=default_color, med_color=None, legend=None, y_offset=0.0,alpha=0.05): if not med_color: med_color=line_color ax.grid(axis='x', color='0.9', linestyle='-', linewidth=0.2) ax.set_axisbelow(True) n,m=data.shape medians = [percentile(data[:,i],50) for i in range(m)] xerr = [[ medians[i]-percentile(data[:,i],100*(alpha/2.)), percentile(data[:,i],100*(1-alpha/2.))-medians[i] ] for i in range(m)] xerr=np.array(xerr).transpose() y_marks = np.array(range(len(xlabels)))-y_offset plt.errorbar(y=y_marks, x=medians,xerr=xerr,fmt='|',capsize=0,color=line_color, ecolor=line_color,elinewidth=0.3,markersize=2) plt.xlabel('% cases used', fontsize=8) ax.tick_params(axis='x', which='both', labelsize=8) ax.set_yticks(np.array(range(len(xlabels)))) ax.set_yticklabels(xlabels,fontsize=6) plt.ylim((min(y_marks)-0.5,max(y_marks)+0.5)) spines_to_remove = ['top', 'right','left'] for spine in spines_to_remove: ax.spines[spine].set_visible(False) ppl.utils.remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=False) if legend: rect = legend.get_frame() rect.set_facecolor(light_grey) rect.set_linewidth(0.0)
def plot_table(*positional_parameters,errorbars=None,xrange=None,yrange=None, title="",xtitle="",ytitle="",show=1,legend=None,color=None): n_arguments = len(positional_parameters) if n_arguments == 0: return fig = plt.figure() if n_arguments == 1: y = positional_parameters[0] x = np.arange(y.size) plt.plot(x,y,label=legend) elif n_arguments == 2: x = positional_parameters[0] y = positional_parameters[1] if len(y.shape) == 1: y = np.reshape(y,(1,y.size)) if isinstance(legend,str): legend = [legend] if isinstance(color,str): color = [color] for i in range(y.shape[0]): if legend is None: ilegend = None else: ilegend = legend[i] if color is None: icolor = None else: icolor = color[i] if errorbars is None: plt.plot(x,y[i],label=ilegend,color=icolor) else: plt.errorbar(x,y[i],yerr=errorbars[i],label=ilegend,color=icolor) else: raise Exception("Incorrect number of arguments") plt.xlim( xrange ) plt.ylim( yrange ) if legend is not None: ax = plt.subplot(111) ax.legend(bbox_to_anchor=(1.1, 1.05)) plt.title(title) plt.xlabel(xtitle) plt.ylabel(ytitle) if show: plt.show() return fig
def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('losses', nargs='+', type=str) parser.add_argument('-o', '--output', default='log-loss', type=str) #parser.add_argument('--dataset', default='Test', type=str) parser.add_argument('--captions', nargs='+', type=str) args = parser.parse_args() rs = np.random.RandomState(0) vz = VzLog(args.output) plt.figure() for i, loss_fn in enumerate(args.losses): print('file', loss_fn) data = dd.io.load(loss_fn) iter = data['iterations'][0] losses = data['losses'].mean(0) losses_std = data['losses'].std(0) if args.captions: caption = args.captions[i] else: caption = loss_fn caption = '{} ({:.3f})'.format(caption, losses[-1]) plt.errorbar(iter, losses, yerr=losses_std, label=caption) plt.legend() plt.ylabel('Loss') plt.xlabel('Iteration') vz.savefig() plt.close() plt.figure() for i, loss_fn in enumerate(args.losses): print('file', loss_fn) data = dd.io.load(loss_fn) iter = data['iterations'][0] rates = 100*(1-data['rates'].mean(0)) rates_std = 100*data['rates'].std(0) if args.captions: caption = args.captions[i] else: caption = loss_fn caption = '{} ({:.2f}%)'.format(caption, rates[-1]) plt.errorbar(iter, rates, yerr=rates_std, label=caption) plt.legend() plt.ylabel('Error rate (%)') plt.xlabel('Iteration') vz.savefig() plt.close()
def errorplot(x, y, minconf, maxconf, **kwargs): ''' e.g. g = sns.FacetGrid(attr, col='run', hue='subj_pos', col_wrap=5) g = g.map(errorplot, 'n_diff_intervening', 'errorprob', 'minconf', 'maxconf').add_legend() ''' plt.errorbar(x, y, yerr=[y - minconf, maxconf - y], fmt='o-', **kwargs)
def plot(x,y, error): plt.plot(x, y,color='blue', linestyle='dashed') plt.errorbar(x,y,yerr=error) plt.title('Mean Number of Inflamation Bouts Per Day Of Trial') plt.ylabel('Mean Number of Innflamation Bouts Reported Per Patient') plt.xlabel('Days in Trial') plt.grid(color='k', linestyle='--', linewidth=0.1) plt.show()
def compareMasses(his, hiserr, mine, myerr, hisfield, hisid, myfield, myid, z): import matplotlib.pylab as plt a = [] b = [] c = [] d = [] e = [] for i in range(0, np.shape(mine)[0]): if mine[i] != 0: #his_id_idx = np.where(hisid == myid[i])[0] pos_id_idx = np.where(hisid == myid[i])[0] if len(pos_id_idx) == 1: his_id_idx = int(pos_id_idx) else: pos_field_idx = np.where(hisfield == myfield[i])[0] for ii in range(0, len(pos_field_idx)): for iii in range(0, len(pos_id_idx)): if pos_field_idx[ii] == pos_id_idx[iii]: his_id_idx = int(pos_id_idx[iii]) #print str(myid[i]), str(myfield[i]), str(hisid[his_id_idx]), str(hisfield[his_id_idx]) assert myfield[i] == hisfield[his_id_idx] assert myid[i] == hisid[his_id_idx] if his[his_id_idx] != -999: a.append(his[his_id_idx]) b.append(hiserr[his_id_idx]) c.append(mine[i]-np.log10(((1.0+z[i]))))#-np.log10(2.75))#(1.0+z[i])) d.append(myerr[i,0]-np.log10(((1.0+z[i]))))#-np.log10(2.75))#1.0+z[i])) e.append(myerr[i,1]-np.log10(1.0+z[i]))#-np.log10(2.75))#1.0+z[i])) # since errors are just percentile, need difference from median d = np.asarray(c)-np.asarray(d) e = np.asarray(e)-np.asarray(c) plt.errorbar(a, c, xerr=b, yerr=[d,e], fmt='o', color='b', capsize=0, alpha=0.50) # plot the y=x line x = np.linspace(np.min((a,c)), np.max((a,c)), 10) plt.plot(x, x, 'k--') ''' # plot the best fit line m, b = np.polyfit(a, c, 1) plt.plot(x, m*x + b, 'r--') ''' plt.xlabel("Alex's log10(mass)") plt.ylabel('MCSED log10(mass) / (1+z)') plt.title("Alex's Masses vs MCSED Masses/(1+z)")#, Red=LineOfBestFit(m: "+str(m)+", b:"+str(b)+"), Black=OneToOneLine") #plt.legend(['With Neb. Emis.'],loc=0)#, 'W/o Neb. Emis'], loc=0) plt.show()
def plot_points(data, xpower=0): """Make a nice plot """ import matplotlib.pylab as plt x = data[0] y = data[1] * x ** xpower ydn = data[2] * x ** xpower yup = data[3] * x ** xpower plt.errorbar(x, y, [ydn, yup], fmt='o', color='k')
def plot_barchart(values, main='', labels=None, errors=None): """Plot a bar chart and put labels (list) on the x-axis. If 'errors' are given, show an error bar for each value. """ from matplotlib.pylab import errorbar create_barchart(values, main=main, labels=labels) if errors is not None: errorbar(arange(values.size)+0.5, values, errors, fmt='o') show_plots()
def plot_points(data, xpower=0): """Make a nice plot """ import matplotlib.pylab as plt x = data[0] y = data[1] * x**xpower ydn = data[2] * x**xpower yup = data[3] * x**xpower plt.errorbar(x, y, [ydn, yup], fmt='o', color='k')
def plot_pop_size_across_time(params, ymin=ymin, ymax=ymax): offset = step_size / 250. x_offsets = [0, offset, 2*offset] num_xticks = 11 ax = sns.tsplot(time="t", value="log2_pop_size", unit="sim_num", condition="policy", color=policy_colors, err_style=None, data=df) for policy_num, policy in enumerate(policy_colors): error_df = summary_df[summary_df["policy"] == policy] c = policy_colors[policy] assert (len(error_df["t"]) == len(time_obj.t) == \ len(error_df["log2_pop_size"]["mean"])) plt.errorbar(error_df["t"] + x_offsets[policy_num], error_df["log2_pop_size"]["mean"], yerr=error_df["log2_pop_size"]["std"].values, color=c, linestyle="none", marker="o", capsize=2, capthick=1, clip_on=False) plt.xlabel(r"$t$") plt.ylabel("Pop. size ($\log_{2}$)") # #\theta_{\mathsf{Gal}\rightarrow\mathsf{Glu}} print "PARAMS: ", params plt.title(r"$P_{0} = %d$, $\theta_{\mathsf{Glu}\rightarrow \mathsf{Glu}} = %.2f$, " \ r"$\theta_{\mathsf{Gal}\rightarrow \mathsf{Glu}} = %.2f$, " \ r"$\mu_{\mathsf{Glu}} = %.2f, \mu_{\mathsf{Gal}} = %.2f$, " \ r"$\mu_{\mathsf{Mis}} = %.2f$, lag = %d, " \ r"%d iters" %(sum(params["init_pop_size"]), params["true_gluc_to_gluc"], params["true_galac_to_gluc"], params["gluc_growth_rate"], params["galac_growth_rate"], params["mismatch_growth_rate"], params["decision_lag_time"], params["num_sim_iters"]), fontsize=8) c = 0.5 plt.xlim([min(df["t"]) - c, max(df["t"]) + c]) if ymin is None: ymin = int(np.log2(sum(params["init_pop_size"]))) if ymax is None: ymax = int(error_df["log2_pop_size"]["mean"].max() + \ error_df["log2_pop_size"]["std"].max()) + 1 plt.ylim([ymin, ymax]) plt.xlim([time_obj.t.min(), time_obj.t.max()]) # plt.yticks(range(ymin, ymax + 1)) plt.xticks(np.linspace(time_obj.t.min(), time_obj.t.max(), num_xticks)) sns.despine(trim=True, offset=time_obj.step_size*2)
def run_correlation_model(gp, mp, Xs, Ys1, Ys2, Ys1err, Ys2err, mcmc_samples=500): xbins = np.linspace(0.0, 2.0, 11) xbins = [[xbins[i], xbins[i + 1]] for i in range(len(xbins) - 1)] rmed = [] rmin = [] rmax = [] x = [] for xbin in (xbins): mask = Xs.T[0] < xbin[1] mask *= Xs.T[0] > xbin[0] r, sig_1, sig_2 = estimate_property_covariance( gp, mp, Xs[mask], Ys1[mask], Ys2[mask], Ys1err[mask], Ys2err[mask], nu=4.0, mcmc_samples=mcmc_samples) rmed += [np.percentile(r, 50)] rmin += [np.percentile(r, 50) - np.percentile(r, 16)] rmax += [np.percentile(r, 84) - np.percentile(r, 50)] x += [(xbin[1] + xbin[0]) / 2.0] plt.errorbar(np.array(x), np.array(x) * 0.0 + 0.5, color='orange', lw=5.0, label='Input Correlation') plt.errorbar(np.array(x), rmed, yerr=[rmin, rmax], fmt='o', label='Inferred') plt.legend(loc=4, prop={'size': 18}) plt.xlabel('r', size=23) plt.ylabel('correlation', size=23) plt.ylim([-1, 1]) plt.grid() check_directory('./plots/') plt.savefig('./plots/inferred_correlations_fake_sim.png', bbox_inches='tight')
def dynamo_spread(self,lethality_percent): infected_percentage_points=51 iterations=200 results=list() percent_dead_results = [[] for i in range(infected_percentage_points)] percent_infected_results = [[] for i in range(infected_percentage_points)] for iter in range(iterations): dead = list() infected = list() num=random.randint(self.min,self.max) if self.graph_type == "BA": edges = self.gen_Barabasi_Albert(num, 4, 6) else: edges = self.gen_Watts_Strogatz(num, 10, 0.5) for pre_infection_percent in range(infected_percentage_points): percent=pre_infection_percent/float(infected_percentage_points-1) verts = [0]*num if self.infection_type=="degree": verts = self.gen_infection_degreeranked(verts,edges,int(percent*num)) elif self.infection_type=="random": verts = self.gen_infection_random(verts,edges,int(percent*num)) elif self.infection_type=="eigen": verts = self.gen_infection_eigenranked(verts,edges,int(percent*num)) elif self.infection_type=="betweenness": verts = self.gen_infection_betweennessranked(verts,edges,int(percent*num)) ftime,verts_out=self.spread(verts,edges,lethality_percent) percent_dead = len([vertex for vertex in verts_out if vertex==3])/float(num) percent_affect = len([vertex for vertex in verts_out if vertex!=0])/float(num) percent_dead_results[pre_infection_percent].append(percent_dead) percent_infected_results[pre_infection_percent].append(percent_affect) #percent_dead_results = [percent_dead_results[m].append(dead[m]) for m in range(infected_percentage_points)] #percent_infected_results = [percent_infected_results[m].append(infected[m]) for m in range(infected_percentage_points)] #print percent_dead_results #print percent_infected_results if self.plot: x = [location/float(infected_percentage_points-1) for location in range(0,infected_percentage_points)] figure = plt.figure(1) ax1 = figure.add_axes((.1,.4,.8,.5)) plt.clf() yone = [mean(item) for item in percent_dead_results] ytwo = [mean(item) for item in percent_infected_results] print percent_infected_results errorone = [2.58*std(item)/sqrt(len(item)) for item in percent_dead_results] errortwo = [2.58*std(item)/sqrt(len(item)) for item in percent_infected_results] print x print ytwo, errortwo one = pylab.errorbar(x,yone,yerr=errorone,fmt='ro') two = pylab.errorbar(x,ytwo,yerr=errortwo,fmt='bo') txt=", ".join(["Lethality of Infection: "+str(percent),"Averaged over "+str(iterations)+" distinct graphs\n","99% Confidence Interval","Min Graph Size: "+str(self.min),"Max Graph Size: "+str(self.max), "Infecting type: "+self.infection_type+"\n","Graph type: "+self.graph_type,"Mortality and Spread vs Infected Size"]) plt.title(txt,fontsize=10) plt.legend([one,two],["Dead","Infected"]) plt.xlabel("Percent Infected") plt.ylabel("Percent of Graph") figure.savefig(", ".join([str(dt.now()),self.infection_type,self.graph_type])+"_dynamo_spread.png") #plt.show() return percent_dead_results, percent_infected_results
def plotData(date,loc,uncert,fil,dir): os.chdir(dir) fig = plt.figure() plt.scatter(date,loc) uncert = [u*3 for u in uncert] plt.errorbar(date, loc, uncert, ls='none') plt.savefig(fil + '.png') plt.close(fig) os.chdir('..') return
def plotMultiStab(data_file, matrix_file): data_dict = {} stab_mat = np.load(matrix_file) for line in open(data_file, "r"): key, val = line.rstrip("\n").split("\t") data_dict[key] = val accord_err = np.std(stab_mat, axis=1) accord_mean = np.mean(stab_mat, axis=1) plt.errorbar(np.arange(len(stab_mat)), accord_mean, yerr=accord_err) plt.show()
def clusterRTToverTime(rttEstimates, timeBin="60", outputDirectory="./rttDistributions/", minEstimates=10, plot=True, logNormal=True): """For each IP address, find the different RTT distributions for each time bin and plot the average value of each distribution. """ # for each IP in the traffic ips = rttEstimates.index.unique() for ip in ips: start = rttEstimates[rttEstimates.index == ip].start_sec.min() end = rttEstimates[rttEstimates.index == ip].start_sec.max() dataIP = rttEstimates[rttEstimates.index == ip] x = [] y = [] z = [] i = 0 for ts in range(start,end,timeBin): if logNormal: data = np.log10(dataIP[(dataIP.start_sec>=ts) & (dataIP.start_sec<ts+timeBin)].rtt) else: data = dataIP[(dataIP.start_sec>=ts) & (dataIP.start_sec<ts+timeBin)].rtt # Look only at flows containing a certain number of RTT estimates if len(data) < minEstimates: sys.stderr("Ignoring data!! not enough samples!") continue # Cluster the data vdp = dpgmm(data) if vdp is None: continue params = NIWparam2Nparam(vdp) if logNormal: mean, std = logNormalMeanStdDev(params[0, :], params[1, :]) else: mean = params[0, :] std = params[1, :] for mu, sig in zip(mean, std): y.append(mu) z.append(sig) x.append(ts) # Plot the clusters characteristics in a file plt.figure() plt.errorbar(x,y,yerr=z,fmt="o") plt.grid(True) if logNormal: plt.savefig("{0}/{1}_timeBin{2}sec_logNormal.eps".format(outputDirectory, ip, timeBin)) else: plt.savefig("{0}/{1}_timeBin{2}sec_normal.eps".format(outputDirectory, ip, timeBin))
def plot_hypothesis2(): with book_plots.figsize(y=2.5): plt.figure() plt.errorbar(range(1, 11), [169, 170, 169,171, 170, 171, 169, 170, 169, 170], xerr=0, yerr=6, fmt='bo', capthick=2, capsize=10) plt.plot([1, 10], [169, 170.5], color='g', ls='--') plt.xlim(0, 11) plt.ylim(150, 185) plt.xlabel('day') plt.ylabel('lbs')
def plot_table(table): table1=np.array(table) table1=table[table1[:,0].argsort()] if len(table1.shape) != 2: raise bad_table if table1.shape[1]>=3: plt.errorbar(table1[:,0],table1[:,1],yerr=table1[:,2],fmt='o') else: if table.shape[1]>=2: plt.plot(table1[:,0],table1[:,1],marker='o') else: raise bad_table
def plot_vector(plt, title, na, label=''): #plt is the plot object #title is the plot title #na is the numpy array to be plotted avgIndex = 0 stdIndex = 1 y = na[:,avgIndex] err = na[:,stdIndex] plt.title(title) plt.errorbar(range(1,len(y)+1), y, yerr=err) plt.xlabel(label)