def sanity_exampleExoplanetEU2(self): """ Example of ExoplanetEU2 """ from PyAstronomy import pyasl import matplotlib.pylab as plt # Instantiate exoplanetEU2 object v = pyasl.ExoplanetEU2() # Show the available data v.showAvailableData() print() # Get a list of all available column names acs = v.getColnames() print("Available column names: " + ", ".join(acs)) print() # Select data by planet name (returns a dictionary) print(v.selectByPlanetName("CoRoT-2 b")) print() # Get all data as an astropy table at = v.getAllDataAPT() # Export all data as a pandas DataFrame pd = v.getAllDataPandas() # Plot mass vs. SMA plt.title("Mass vs. SMA") plt.xlabel("[" + v.getUnitOf("mass") + "]") plt.ylabel("[" + v.getUnitOf("semi_major_axis") + "]") plt.loglog(at["mass"], at["semi_major_axis"], 'b.')
def plot_ncp_MQI_size(isoperimetry_R_vs_size_R, isoperimetry_MQI_vs_size_R): """ Plots the MQI Network Community Profile, i.e., minimum isoperimetry vs size. """ lists = sorted(isoperimetry_R_vs_size_R.items()) x, y = zip(*lists) fig = plt.figure() ax = fig.add_subplot(111) plt.loglog(x, y) lists = sorted(isoperimetry_MQI_vs_size_R.items()) x, y = zip(*lists) plt.loglog(x, y) ax.legend(["For given sets", "After applying MQI"]) ax.set_xlabel('Size') ax.set_ylabel('Minimum isoperimetry') ax.set_title('Min. Isoperimetry vs. Size NCP') plt.show()
def plot_refinement(sets, xkey, ykey, name): ''' Plot computational time for jacobi, seidel and sor ''' plt.figure() fig, ax = plt.subplots() ax.spines['right'].set_visible(True) ax.spines['top'].set_visible(True) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') #plt.axis([100, 10000, 1.0e-5, 1.0e-1]) # plot solutions on same graph keys = sets.keys() cidx = 0 for key in keys: cidx += 2 data = sets[key] plt.loglog(data[xkey], data[ykey], '-', lw=3, mec='black', label=key) # Axis formatting plt.legend(loc='lower left') plt.xlabel(xkey) plt.ylabel(ykey) plt.savefig(name, bbox_inches='tight', pad_inches=0.05) return
def experiment_plot( ctr, trials, success ): """ Pass in the ctr, trials and success returned by the `experiment` function and plot the Cumulative Number of Turns For Each Arm and the CTR's Convergence Plot side by side """ T, K = trials.shape n = np.arange(T) + 1 fig = plt.figure( figsize = ( 14, 7 ) ) plt.subplot(121) for i in range(K): plt.loglog( n, trials[ :, i ], label = "arm {}".format(i + 1) ) plt.legend( loc = "upper left" ) plt.xlabel("Number of turns") plt.ylabel("Number of turns/arm") plt.title("Cumulative Number of Turns For Each Arm") plt.subplot(122) for i in range(K): plt.semilogx( n, np.zeros(T) + ctr[i], label = "arm {}'s CTR".format( i + 1 ) ) plt.semilogx( n, ( success[ :, 0 ] + success[ :, 1 ] ) / n, label = "CTR at turn t" ) plt.axis([ 0, T, 0, 1 ] ) plt.legend( loc = "upper left" ) plt.xlabel("Number of turns") plt.ylabel("CTR") plt.title("CTR's Convergence Plot") return fig
def CCDF(G, gType='loglog', fmt='', xlabel='Degree, k', ylabel='Pr(K>=k)', title=None, **kwargs): degs = nx.degree(G) kmax = 0 karr = [] for _, k in degs: karr.append(k) if (k > kmax): kmax = k c, b = np.histogram(karr, bins=[i for i in range(kmax + 2)], density=True) a = np.cumsum(c) a = np.insert(a, 0, 0) if (gType == 'loglog'): plt.loglog(b[1:-1], 1 - a[1:-1], fmt, **kwargs) elif (gType == 'semilogx'): plt.semilogx(b[1:-1], 1 - a[1:-1], fmt, **kwargs) elif (gType == 'semilogy'): plt.semilogy(b[1:-1], 1 - a[1:-1], fmt, **kwargs) elif (gType == 'linear'): plt.plot(b[1:-1], 1 - a[1:-1], fmt, **kwargs) else: raise Exception( 'gType was specified incorrectly. Please specify loglog, semilogx, semilogy, or linear.' ) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.show() return
def error(f, fdy, x0, y0, xn, maxExp, sol): res = np.zeros([len(Method), maxExp + 1]) if sol is None: intervals = 2**(maxExp + 4) sol = solve(f, fdy, x0, y0, (xn - x0) / intervals, intervals + 1, Method.BDF6)[0::int(2**4)] else: sol = sol(np.linspace(x0, xn, 2**maxExp + 1)) threads = [] q = mp.Queue() for md in Method: t = ErrorProcess(f, fdy, x0, y0, xn, maxExp, sol, md, md.value - 1, q) t.start() threads.append(t) for t in threads: t.join() while not q.empty(): (idx, rs) = q.get() res[idx] = rs.transpose()[0] plt.loglog(2**np.linspace(0, maxExp, maxExp + 1), np.transpose(res)) plt.legend(Method) plt.tight_layout(pad=0) plt.show() return None
def experiment_plot(ctr, trials, success): """ Pass in the ctr, trials and success returned by the `experiment` function and plot the Cumulative Number of Turns For Each Arm and the CTR's Convergence Plot side by side """ T, K = trials.shape n = np.arange(T) + 1 fig = plt.figure(figsize=(14, 7)) plt.subplot(121) for i in range(K): plt.loglog(n, trials[:, i], label="arm {}".format(i + 1)) plt.legend(loc="upper left") plt.xlabel("Number of turns") plt.ylabel("Number of turns/arm") plt.title("Cumulative Number of Turns For Each Arm") plt.subplot(122) for i in range(K): plt.semilogx(n, np.zeros(T) + ctr[i], label="arm {}'s CTR".format(i + 1)) plt.semilogx(n, (success[:, 0] + success[:, 1]) / n, label="CTR at turn t") plt.axis([0, T, 0, 1]) plt.legend(loc="upper left") plt.xlabel("Number of turns") plt.ylabel("CTR") plt.title("CTR's Convergence Plot") return fig
def plot_Category(Category, start, end, fres, freq_tot, total, figval=1): fig = plt.figure(figval) status = 0 for name, Items in Category.items(): cattotal = np.zeros(len(freq_tot)) for subname, conf in Items.items(): if 'equation' in conf: ans = plot_singleTheoN(conf, freq_tot, cattotal) status = ans[0] if status == 1: return status, ans[1] else: cattotal = ans[1] elif 'chan' in conf: ans = plot_singleRTN(conf, start, end, fres, freq_tot, cattotal) status = ans[0] if status == 1: return status, ans[1] else: cattotal = ans[1] plt.figure(figval) plt.loglog(freq_tot, cattotal, label=name) plt.ylabel(r'Displacement [m/$\sqrt{\rm Hz}$]') plt.xlabel('Frequency [Hz]') plt.legend() plt.grid(True) total = np.sqrt(total * total + cattotal * cattotal) return status, total
def fit_sim_frb(dm=1000.0, nfreq=1536, freq=(1219.70092773, 1519.50561523), scat_tau_ref=0.001, spec_ind=0.0, dt=8.192e-5, width=0.0001, save_data=False): ntime = np.int(2*4148*dm*np.abs(freq[0]**-2-freq[-1]**-2)/dt) print(nfreq, ntime) undispersed_arrival_time = 0.5*ntime*dt undispersed_arrival_time -= 4148*dm*(max(freq)**-2) sp = simpulse.single_pulse(ntime, nfreq, min(freq), max(freq), dm, scat_tau_ref, width, 10.0, spec_ind, undispersed_arrival_time) data_simpulse = np.zeros([nfreq, ntime]) sp.add_to_timestream(data_simpulse, 0.0, ntime*dt) data_event = data_simpulse[::-1] data_dedisp = tools.dedisperse(data_event, dm, freq=(freq[1], freq[0])) # plt.imshow(data_event, aspect='auto') # plt.show() if save_data: np.save('./herial.npy', data_dedisp) f,s = fit_width_freq(data_dedisp, freq=(freq[1], freq[0]), plot=True) s = np.array(s) plt.plot(f, s/s[len(s)//2], color='k', lw=3) plt.plot(f, (f/np.median(f))**-2.0) # plt.plot(f, (f/np.median(f))**-3.0) plt.plot(f, (f/np.median(f))**-4.0) plt.legend(['data','-2','-4']) plt.loglog() plt.show()
def listDistribution(self, lista, disfigdatafilepath=None, xlabel='Amount', ylabel='Frequency', showfig=True, binsdivide=1): "IN:one list of numbers;savefigure data or not;xylabel;show the fig or not" "OUT:the distribution data [x,y]" hist = numpy.histogram(lista, bins=numpy.max(lista) / binsdivide) #bins=numpy.max(lista) print hist x = hist[1][:len(hist[1]) - 1] y = hist[0] if disfigdatafilepath: self.savefigdata(disfigdatafilepath, x, y, errorbarlist=None, title='', xlabel=xlabel, ylabel=ylabel, leglend=None) if showfig: plt.xlabel(xlabel) plt.ylabel(ylabel) plt.loglog(x, y, marker='o', linestyle='') #semilogy plt.show() plt.close() return [x, y]
def plot_results(xdata, ydata, labels, xlabel='', ylabel='', title='', linestyles=None, logy=False, logx=False, legend_loc=0, ylim=None, save_plot=False, filename=''): for x, y, linestyle in zip(xdata, ydata, linestyles): if logy and not logx: plt.semilogy(np.transpose(xdata), np.transpose(ydata)) elif logx and not logy: plt.semilogx(np.transpose(xdata), np.transpose(ydata)) elif logy and logx: plt.loglog(np.transpose(xdata), np.transpose(ydata)) else: plt.plot(x, y, linestyle=linestyle) plt.legend(labels=labels, loc=legend_loc, frameon=False) if ylim is None: ylim = [np.min(ydata), np.max(ydata)] plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.ylim(ylim) if save_plot: plt.savefig(filename, transparent=True) plt.show() return
def plotpowerspectrum(t, y, t1, t2, Fs, fileprefix): ''' plot powerspectrum of time-series data between t1 and t2 and save plot to .png file ''' filename = fileprefix + '_powerspectrum.png' # find indices for tlow, thigh n1 = np.where(t>=t1)[0] n2 = np.where(t>=t2)[0] # calculate welch estimate of power spectrum N = n2[0]-n1[0]+1; seglength = np.int(N/8.) f, P = scipy.signal.welch(y[n1[0]:n2[0]], fs=Fs, window='hanning', nperseg=seglength) # plot power spectrum plt.figure() plt.rc('text', usetex=True) plt.tick_params(labelsize=20) plt.loglog(f, P, linewidth=2) plt.xlabel('frequency (Hz)', size=22) plt.ylabel('power spectral density (1/Hz)', size=22) plt.grid(True) plt.savefig(filename, bbox_inches='tight', dpi=400) return
def Ms_f_Mh(Mh0, Ms0, folder): Mh = physique.m2mo(Mh0, folder) Ms = physique.m2mo(Ms0, folder) """ print Mh print Ms """ """ b = 2 n0, bins0 = np.histogram(Mh,bins=b) n1, bins1 = np.histogram(Ms,bins=b) x = np.zeros(b) y = np.zeros(b) for i in range(b): x[i] = n0[i] * bins0[i] #+ ( bins0[i+1] - bins0[i] )/2. y[i] = n1[i] * bins1[i] #+ ( bins1[i+1] - bins1[i] )/2. """ plt.clf() plt.loglog(Mh, Ms, '.') plt.title("Stars mass function of halo mass") plt.xlabel(r'halo mass ($M_{\odot}$ )') plt.ylabel(r'stars mass ($M_{\odot}$ )') plt.legend() plt.show(block=False)
def plot_ncp_MQI_vol(conductance_R_vs_vol_R, conductance_MQI_vs_vol_R): """ Plots the Network Community Profile, i.e., minimum conductance vs volume. """ lists = sorted(conductance_R_vs_vol_R.items()) x, y = zip(*lists) fig = plt.figure() ax = fig.add_subplot(111) plt.loglog(x, y) lists = sorted(conductance_MQI_vs_vol_R.items()) x, y = zip(*lists) plt.loglog(x, y) ax.legend(["For given sets", "After applying MQI"]) ax.set_xlabel('Volume') ax.set_ylabel('Minimum conductance') ax.set_title('Min. Conductance vs. Volume NCP') plt.show()
def gerador_de_sinais_colored_noise(num_valores_por_sinal, num_sinais, nome_arquivo_colorednoise, betas, is_plotar_exemplo_familia=False): df_todos_sinais = None for beta in betas: for sinal in range(num_sinais): for num_valores in num_valores_por_sinal: sinais = cn.powerlaw_psd_gaussian(beta, num_valores) df = pd.DataFrame() df['valor'] = sinais # a famílias são as cores df['familia'] = beta df['sinal'] = sinal + 1 df_todos_sinais = pd.concat([df_todos_sinais, df]) if is_plotar_exemplo_familia: # plot da densidade do Power Spectral from matplotlib import mlab from matplotlib import pylab as plt s, f = mlab.psd(sinais, NFFT=2**13) plt.loglog(f, s) plt.grid(True) plt.xlabel("log da frequência") plt.ylabel("log do valor do Power Spectrum") plt.title( "Densidade do Power Spectrum para beta = {}".format(beta)) plt.savefig("./power_spec_beta_{}.png".format(beta)) plt.show() df_todos_sinais.to_csv(nome_arquivo_colorednoise, index=False) return df_todos_sinais
def plot_perf(nlist, err, color, label, errbar=False, perc=20): pl.loglog(nlist, err.mean(0), label=label, color=color) if errbar: pl.fill_between(nlist, np.percentile(err, perc, axis=0), np.percentile(err, 100 - perc, axis=0), alpha=0.2, facecolor=color)
def _test_convergence(self, fun, x, *args): for i in x: y = fun(i, *args) plt.figure('res') plt.plot(abs(i), y, '.') plt.loglog()
def plotFreq(dict): sortedTuples = sorted(dict.items(), key=lambda x: x[1], reverse=True) sortedFreq = [] for tup in sortedTuples: sortedFreq.append(tup[1]) x = np.linspace(1, len(sortedFreq) + 1, len(sortedFreq)) y = sortedFreq plt.loglog(x, y)
def plot_norm_error(tab, max_iter, title, file_name): x = np.linspace(0, max_iter, len(tab)) pl.loglog(x, tab) pl.title(title) pl.xlabel('iter') pl.ylabel('norm error') pl.savefig('imgs/' + file_name) pl.show()
def plot_age_velocity_relation(s, limits=pynbody.filt.SolarNeighborhood( 7.5, 8.5, .2)): import matplotlib.pylab as plt from scipy import polyfit prof = pynbody.analysis.profile.Profile(s.s[limits], calc_x=lambda x: x['age'], type='log', nbins=10, min=1) plt.plot(prof['rbins'], np.sqrt(prof['vr_disp']**2 + prof['vt_disp']**2 + prof['vz_disp']**2), 'k-', label=r'$\sigma_{tot}$', linewidth=2) plt.plot(prof['rbins'], prof['vr_disp'], 'k--', label=r'$\sigma_R$', linewidth=2) plt.plot(prof['rbins'], prof['vt_disp'], 'k-.', label=r'$\sigma_{\phi}$', linewidth=2) plt.plot(prof['rbins'], prof['vz_disp'], 'k:', label=r'$\sigma_z$', linewidth=2) plt.loglog() plt.xlabel('Age [Gyr]') plt.ylabel('$\sigma$ [km/s]') plt.legend(loc='upper left', prop=dict(size='small')) # fits fittot = polyfit( np.log10(prof['rbins']), np.log10( np.sqrt(prof['vr_disp']**2 + prof['vt_disp']**2 + prof['vz_disp']**2)), 1) fitr = polyfit(np.log10(prof['rbins']), np.log10(prof['vr_disp']), 1) fitt = polyfit(np.log10(prof['rbins']), np.log10(prof['vt_disp']), 1) fitz = polyfit(np.log10(prof['rbins']), np.log10(prof['vz_disp']), 1) plt.plot(prof['rbins'], 10**fitr[1] * prof['rbins']**fitr[0], 'r--') plt.plot(prof['rbins'], 10**fitt[1] * prof['rbins']**fitt[0], 'r--') plt.plot(prof['rbins'], 10**fitz[1] * prof['rbins']**fitz[0], 'r--') print(fittot) print(fitr) print(fitt) print(fitz)
def FitResult(freq, Z, Zfit_tot, chipName, file_i): plt.scatter(freq, Z, label="experiment", color="blue") plt.loglog(freq, Zfit_tot, label="fitting", color="red") plt.xlabel("Frequency [Hz]") plt.ylabel("|Z| [Ω]") plt.legend(loc='upper right', fontsize=10) plt.savefig("Result_" + str(chipName) + "/" + "FittingResult" + str(file_i)) plt.close()
def testBoundFreeClassical(): energies = 10.**numpy.linspace(-4, 3, 101) sigma = [] n = 1 l = 0 for en in energies: sigma.append(crossSectionBoundFreeClassical(1.0, n, en)) pylab.loglog(energies, sigma) pylab.show()
def filter_and_logplot(d, value=100, title=None): if title is None: title = "LogLog Plot for Code Samples by Extension Type, > %s samples" % value filtered = filter_by_value(d, value=value) sorted_counts = sort_dict(filtered) x, y = zip(*sorted_counts) plt.loglog(list(range(1, len(y) + 1)), y) plt.title(title) plt.show() return filtered
def make_final_plots(NTS, points): hs = [[Tmax / NT for NT in nts] for nts in NTS] for probe in points: for h, p in zip(NTS, probe): plt.plot(h, p) plt.figure() best = probe[-1][-1] for h, p in zip(hs, probe): plt.loglog(h, [np.abs(y - best) for y in p], '-+') plt.figure()
def plot_obs_pred(obs_pred_data, dest_file='./obs_pred.png'): plot_color_by_pt_dens(obs_pred_data['pred'], obs_pred_data['obs'], 3, loglog=1) plt.loglog([min(obs_pred_data['pred']), max(obs_pred_data['pred'])], [min(obs_pred_data['pred']), max(obs_pred_data['pred'])], 'k-') plt.savefig(dest_file, dpi=400)
def draw_fibo(): na = range(1, 1000, 50) #on va pas j'usqua 10000 vu que python explose avant... vals = np.zeros(len(na)) #liste contenant les resultats (pour l'instant plein de 0) for i in range(len(na)): vals[i] = fibo_Dico(na[i]) #boucle remplissant les resultats plt.loglog(na ,vals, '-', label="Fibonacci(n)") #ajout de la courbe en mode logarithmique pour plus de lisibilité plt.xlabel("n") #ajout d'un titre d'axe (x) plt.ylabel("Fibonacci(n)") #ajout d'un titre d'axe (y) plt.legend() #ajout de la legende plt.show() #affichage du graph
def plot_obs_pred(obs_pred_data, yvar, dest_file='./obs_pred.png'): plot_color_by_pt_dens(10**obs_pred_data[yvar + 'pred'], 10**obs_pred_data[yvar], 3, loglog=1) plt.loglog( [min(10**obs_pred_data[yvar + 'pred']), max(10**obs_pred_data[yvar])], [min(10**obs_pred_data[yvar + 'pred']), max(10**obs_pred_data[yvar])], 'k-') plt.savefig(dest_file, dpi=400)
def EachFitResult_woCell(freq, freq_high, freq_low, Z, Z_high, Z_low, chipName, file_i): plt.scatter(freq, Z, label="experiment", color="blue") plt.loglog(freq_high, Z_high, label="fitting", color="red") plt.loglog(freq_low, Z_low, label="fitting", color="red") plt.xlabel("Frequency [Hz]") plt.ylabel("|Z| [Ω]") plt.legend(loc='upper right', fontsize=10) plt.savefig("Result_" + str(chipName) + "/" + "HighMidLow Fitting" + str(file_i)) plt.close()
def make_plots(all_series): for g in xrange(len(probes)): plt.figure() for series in all_series: for ts, ys, o, h in series[1]: plt.plot(ts, ys[:, g]) plt.figure() exact = all_series[-1][1][-1][1][-1, g] for series in all_series: plt.loglog([x[2] for x in series[1]], [np.abs(x[1][-1, g] - exact) for x in series[1]], '-+')
def p1(): from scipy.sparse import diags Nv = 2**np.arange(3, 17) for n in Nv: h = 2 * pi / n x = -pi + h * np.arange(n) u = np.exp(np.sin(x)) uprime = np.cos(x) * u #du/dx d1 = np.array([2 / 3.]) d2 = 1 / 12. + np.zeros(2) d3 = 1 / 12. + np.zeros(n - 2) d4 = 2 / 3. + np.zeros(n - 1) D=diags([d1,-d2,d3,-d4,d4,-d3,d2,-d1],\ [-n+1,-n+2,-2,-1,1,2,n-2,n-1]) #print np.round(D.toarray(),decimals=2) D = D / h error = np.linalg.norm(D * u - uprime) pl.loglog(n, error, 'o', c='#1f77b4') d1 = np.array([0.5]) d2 = 0.5 + np.zeros(n - 1) D = diags([d1, -d2, d2, -d1], [-n + 1, -1, 1, n - 1]) D = D / h error = np.linalg.norm(D * u - uprime) pl.loglog(n, error, 'o', c='#2ca02c') #d=[np.zeros(n-i-1)+1./(i+1) for i in range(n-1)] #diag=[d1 if i%2 else -d1 for i, d1 in enumerate(reversed(d))]+\ # [-d1 if i%2 else d1 for i, d1 in enumerate(d)] #loc=[-i for i in range(n-1,0,-1)]+range(1,n) #D=diags(diag,loc).toarray() #if n==16: print np.round(D.toarray(), decimals=3) #D=D/h #error=np.linalg.norm(np.dot(D,u)-uprime) #pl.loglog(n,error,'o',c='#bcbd22') pl.semilogy(Nv, 1. / Nv**4, '--', c='#1f77b4') pl.semilogy(Nv, 1. / Nv**2, '--', c='#2ca02c') pl.title('Convergence of 2- & 4-th order finite differences') pl.xlabel('N') pl.grid(ls='--', which='both') pl.text(20, 5e-8, r'N$^{-4}$', fontsize=14) pl.text(2000, 1e-6, r'N$^{-2}$', fontsize=14) pl.ylabel('error') pl.show()
def testBoundFreeGauntFactor(): energies = 10.**numpy.linspace(-4, 3, 101) gbf = [] n = 1 l = 0 for en in energies: g = abs(gauntFactorBoundFree(1.0, n, l, en)) print en, g gbf.append(g) pylab.loglog(energies, gbf) pylab.show()
def plot_degree_dist(degree, value, xlabel, ylabel, color_marker='b.', title=None, outfn=None): fig = plt.figure() plt.loglog(degree, value, color_marker, markersize=2, markeredgecolor=None) plt.xlabel(xlabel, linespacing=12, fontsize=18) plt.ylabel(ylabel, linespacing=12, fontsize=18) plt.tight_layout() if title is not None: plt.title(title, fontsize=18, y=1.05) if outfn is not None: fig.savefig(outfn) plt.close() return fig
def in_degrees_dist_plot(in_degrees_dist, num_nodes): import matplotlib.pylab as plt x_axis = [] y_axis = [] for node, degree in in_degrees_dist.items(): if node != 0: distribution = float(degree) / float(num_nodes) x_axis.append(node) y_axis.append(distribution) plt.loglog(x_axis, y_axis, 'ro') plt.xlabel('In-degrees') plt.ylabel('Distribution') plt.title('In degrees Distribution (log/log Plot)') plt.show()
def plotsa(): """ Plots Simulated annealing example """ fig1 = pl.figure() data = np.loadtxt("simulatedannealing1.csv", skiprows=1, delimiter=",") fvals = data[:, 0] nevals = range(len(fvals)) pl.loglog(nevals, fvals, 'b-') pl.xlabel("function evaluations", fontsize=20) pl.ylabel("cost function value", fontsize=20) pl.ylim([50, 5000]) ax = fig1.gca() ax.tick_params(axis='x', labelsize=16) ax.tick_params(axis='y', labelsize=16) pl.savefig("simulatedannealing1.png", bbox_inches='tight')
def plotga(): """ Plots genetic algorithm """ fig1 = pl.figure() data = np.loadtxt("geneticalgorithm.csv", skiprows=1, delimiter=",") fvals = data[:, 0] nevals = range(len(fvals)) pl.loglog(nevals, fvals, 'b-') pl.xlabel("function evaluations", fontsize=20) pl.ylabel("cost function value", fontsize=20) pl.ylim([50, 3000]) ax = fig1.gca() ax.tick_params(axis='x', labelsize=16) ax.tick_params(axis='y', labelsize=16) # pl.tight_layout() pl.savefig("geneticalgorithm.png", bbox_inches='tight')
def plot_convergence(self,x,y,rate=None,log=True,figname='_plot',case='',title='',tolatex=False): self.create_dir(self.plotdir) log_name = '' log_label = '' log_title = '' if not tolatex: log_title = 'Log Log ' if log: log_name = 'loglog_' plt.figure() if rate is not None: p = self.p_line_range m = rate[0] c = rate[1] plt.hold(True) plt.loglog(x[p[0]:p[1]],10.0**(m*np.log10(x[p[0]:p[1]])+c-2.0),'r') if log: plt.loglog(x,y,'bo--') else: plt.plot(x,y,'o:') if not tolatex: plt.title(title+log_title+case+' convergence') plt.xlabel('$'+log_label+'mx$') plt.ylabel('$'+log_label+'\Delta q$') plt.grid(True) plt.draw() case = case.replace(" ", "_") fig_save_name = figname+'_'+log_name+case+'.'+self.plot_format figpath = os.path.join(self.plotdir,fig_save_name) plt.savefig(figpath,format=self.plot_format,dpi=320,bbox_inches='tight') plt.close() if tolatex: caption = '' if log: caption = 'Log Log ' caption = caption + 'plot of '+case.replace("_"," ")+' convergence test' caption.capitalize() self.gen_latex_fig(figpath,caption=caption) return
def sanity_example(self): """ Exoplanet EU example """ from PyAstronomy import pyasl import matplotlib.pylab as plt eu = pyasl.ExoplanetEU() # See what information is available cols = eu.availableColumns() print cols print # Get all data and plot planet Mass vs. # semi-major axis in log-log plot dat = eu.getAllData() plt.xlabel("Planet Mass [RJ]") plt.ylabel("Semi-major axis [AU]") plt.loglog(dat.plMass, dat.sma, 'b.')
def log_datafit(x, y, deg): z = np.polyfit(np.log10(x), np.log10(y), deg) p = np.poly1d(z) A = np.zeros(np.shape(p)[0]) for i in range(np.shape(p)[0]): A[::-1][i] = p[i] yvals = 0. for j in range(np.shape(p)[0]): yvals += (((np.log10(x))**j)*A[::-1][j]) plt.ion() plt.loglog(x, y, 'bo', label='Data') plt.loglog(x, 10**(yvals), 'g--', lw=2, label='Best Fit') plt.legend(loc='best') plt.grid(which='minor') plt.minorticks_on() print "Ax+B" print "A = ", A[0] print "B =", A[1]
def plot_regularization_curve(self, name_add=''): # Matplotlib is loaded selectively as it is requires # libraries that are often not installed on clusters import matplotlib.pylab as plt an_name = self.settings.get('an_name', 'xx') filename = an_name + '_regu_curve' + name_add + '.png' sort_order = np.argsort(self.omega2_list) omega2_arr = np.array(self.omega2_list).take(sort_order) epe_arr = np.array(self.epe_list).take(sort_order) err_arr = np.array(self.err_list).take(sort_order) ERR_arr = np.array(self.ERR_list).take(sort_order) plt.title('omega2: %.2e Neff: %.1f' % (self.opt_omega2, self.n_eff)) plt.loglog(omega2_arr, epe_arr, '-', label='EPE') plt.loglog(omega2_arr, np.sqrt(err_arr), '--', label='err') plt.loglog(omega2_arr, np.sqrt(ERR_arr), '-.', label='ERR') plt.legend(loc=2) plt.xlabel('Omega2') plt.ylabel('eV') # plt.show() plt.savefig(filename) plt.clf()
def calculate_speed_up(): N = np.array([80,160,320,640,1280]) h = 2./N cpu_time = np.array([8.97, 33.28 , 141.02 , 554.96, 2164.34]) gpu_time = np.array([1.4877, 3.1019, 10.46, 33.3699, 125.660]) speedup = np.array([6.25, 11.43, 13.48, 16.63, 17.22]) plt.figure() plt.loglog(N, cpu_time,'ro-', label='Xeon E5-2650 2.60GHz') plt.loglog(N, gpu_time,'b+-', label='Tesla K40') plt.loglog(N, (cpu_time[0]*1.2)*(N/N[0])**2, 'k-', label='2rd order') #legend(handles=[numerical_solution, order5]) grid(True) ylabel('Runtime for 100 time step (second)') xlabel('Domain size') title("CPU and GPU Performance") xlim([80, 2560]) xticks([20,40,80,160,320,640,1280, 2560], [20,40,80,160,320,640,1280, 2560]) legend(bbox_to_anchor=(0., 1, 0.5, .0)) plt.figure() plt.plot(N, speedup,'b+-', label='Tesla K40') #loglog(N, (speedup[0]*1.2)*(N/N[0])**2, 'k-', label='2rd order') #legend(handles=[numerical_solution, order5]) grid(True) ylabel('Speedup ($t_{cpu}/t_{gpu}$)') xlabel('domain size') title("GPU Speedup") xticks([80,160,320,640,1280], [80,160,320,640,1280]) legend(bbox_to_anchor=(0., 1, 0.3, .0)) show()
def many_spectra(): import healpy as hp chunks = ['f','g','h'] thresh = {'f':1.0, 'g':1.0, 'h':1.8} fwhm_deg = {'f':7.0, 'g':7.0, 'h':5.0} final_fwhm_deg = {'f':7.0, 'g':7.0, 'h':7.0} cl={} pl.figure(1) pl.clf() xlim = [10,700] ylim = [1e-7, 3e-4] for k in chunks: print k nmap = get_hpix(quick=True,name=k) mask = mask_from_map(nmap, fwhm_deg=fwhm_deg[k], final_fwhm_deg=final_fwhm_deg[k], thresh=thresh[k]) # mean_nmap = np.mean(nmap[np.where(mask!=0)[0]]) mean_nmap = np.mean(nmap[np.where(mask>0.5)[0]]) delta = (nmap-mean_nmap)/mean_nmap hp.mollview(hp.smoothing(delta*mask,fwhm=1.*np.pi/180.),title=k,min=-0.3,max=0.3) continue this_cl = hp.anafast(delta*mask)/np.mean(mask**2.) l=np.arange(len(this_cl)) # smooth in l*cl lcl = this_cl*l sm_lcl = np.zeros_like(lcl) rbox = 15 for i in l: imin = np.max([0,i-rbox]) imax = np.min([np.max(l), i+rbox]) sm_lcl[i]=np.mean(lcl[imin:imax]) # cl[k:this_cl] # pl.loglog(l,this_cl,linewidth=2) pl.loglog(l,sm_lcl,linewidth=2) pdb.set_trace() pl.xlim(xlim) # pl.ylim(ylim) pl.legend(chunks) pl.xlabel('L') pl.ylabel('L*CL')
def plot(out_filepath, graph): fig = pylab.figure(1) pylab.subplot(211) networkx.draw_spring(graph, node_size = 8, with_labels = False) deg = {} for d in [ len(graph.edges(n)) for n in graph.nodes() ]: try: deg[d] += 1 except KeyError: deg[d] = 1 print(deg) pylab.subplot(212) plot = deg.items() pylab.loglog([ x[0] for x in plot ], [ x[1] for x in plot ], '.') pylab.savefig(out_filepath + '.png')
def expt1(): """ Experiment 1: Chooses the result files and generates figures """ # filename = sys.argv[1] result_file = "./expt1.txt" input_threads, input_sizes, throughputs, resp_times \ = parse_output(result_file) throughputs_MiB = [tp/2**20 for tp in throughputs] fig1 = pl.figure() fig1.set_tight_layout(True) fig1.add_subplot(221) pl.semilogx(input_sizes, throughputs_MiB, 'bo-', ms=MARKER_SIZE, mew=0, mec='b') pl.xlabel("fixed file size (Bytes)") pl.ylabel("throughput (MiB/sec)") pl.text(2E3, 27, "(A)") fig1.add_subplot(222) pl.loglog(input_sizes, resp_times, 'mo-', ms=MARKER_SIZE, mew=0, mec='m') pl.xlabel("fixed file size (Bytes)") pl.ylabel("response time (sec)") pl.text(2E3, 500, "(B)") fig1.add_subplot(223) pl.semilogx(resp_times, throughputs_MiB, 'go-', ms=MARKER_SIZE, mew=0, mec='g') pl.xlabel("response time(sec)") pl.ylabel("throughput (MiB/sec)") pl.text(0.2, 27, "(C)") pl.tight_layout() pl.savefig("./figures/%s" % result_file.replace(".txt", ".pdf"))
def plot_reusage(db, keynames, save_path, attr_name = [ 'linkWeightDistr_x', 'linkWeightDistr_y' ]): plt.clf() plt.figure(figsize = (8, 5)) plt.loglog(db[keynames['mog']][attr_name[0]], db[keynames['mog']][attr_name[1]], 'b-', lw = 5, label = 'fariyland') plt.loglog(db[keynames['mblg']][attr_name[0]], db[keynames['mblg']][attr_name[1]], 'r:', lw = 5, label = 'twitter') plt.loglog(db[keynames['im']][attr_name[0]], db[keynames['im']][attr_name[1]], 'k--', lw = 5, label = 'yahoo') plt.xlabel('Usage (days)') plt.ylabel('CCDF') plt.title('Usage of Links') plt.grid(True) plt.legend(('fairyland', 'twitter', 'yahoo'), loc = 'best') plt.savefig(os.path.join(save_dir, save_path))
def plot_ngals_vs_match_radius(): f = open(datapath+'num_sources_v_match_radius.txt','r') f.next() rad = [] n = [] for line in f: tmp = line.split() rad.append(float(tmp[0])) n.append(int(tmp[1])) pl.clf() pl.loglog(rad,n,'bo') pl.loglog(rad,n,'b') pl.loglog(rad, 550.*(np.array(rad)/15.)**2. + 990.,'r') pl.ylim(800,np.max(n)) f.close()
def createAndSaveLogLogPlot(xData, yData, figFileRoot, xLabel="", yLabel="", fileExt='.png', xMin=-1, xMax=-1, yMin=-1, yMax=-1, plotType='bo', axisFontSize=20, tickFontSize=16, svgFlag=0): figFileName = figFileRoot + fileExt xData = convert_list_to_array(xData) yData = convert_list_to_array(yData) tempPlot = plt.loglog(xData, yData, plotType, hold="False") plt.xlabel(xLabel, fontsize=axisFontSize) plt.ylabel(yLabel, fontsize=axisFontSize) ax = plt.gca() for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(tickFontSize) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(tickFontSize) if xMin == -1: xMin = min(xData.tolist()) if xMax == -1: xMax = max(xData.tolist()) if yMin == -1: yMin = min(yData.tolist()) if isnan(yMin): yMin = 0 if yMax == -1: yMax = 0 yDataList = yData.tolist() for item in yDataList: if not isnan(item): if item > yMax: yMax = item plt.xlim(xMin, xMax) plt.ylim(yMin, yMax) plt.savefig(figFileName, dpi=150) if svgFlag == 1: figFileName = figFileRoot + '.svg' plt.savefig(figFileName, dpi=150) plt.clf()
#!env python3 import numpy as np import matplotlib.pylab as plt data = np.loadtxt("errors.txt") h = data[:,0] # h=first column errL2 = data[:,1] # errL2 = 2nd column errH1 = data[:,2] # errH1 = 3rd column plt.loglog(h,errL2,"-rs",label="Error L2") plt.loglog(h,errH1,"-b^",label="Error H1") plt.legend() plt.show()
vals = [ int(v) for v in vals if int(v) > 0 ] total = float(len(vals)) deg = {} for v in vals: try: deg[v] += 1 except KeyError: deg[v] = 1 for v in deg.keys(): deg[v] = float(deg[v]) / total """ range_step = 0.01 total = float(len(vals)) plots = {} for x in numpy.arange(1.0, step = range_step): plots[x] = float(len([ s for s in vals if x <= s < x + range_step ])) / total print(plots.values()) """ #plt.plot(numpy.arange(1.0, step = range_step), plots.values()) #plt.plot(deg.keys(), deg.values(), ',') plt.loglog(deg.keys(), deg.values(), ',') #plt.xlim(-range_step, 1.0+range_step) plt.savefig(sys.argv[2] + '.png')
def cone_beam_example(): # get_stored_materials returns an iterator of materials already stored in # the application. materials_dict = {mat.name: mat for mat in get_stored_materials()} materials = list(materials_dict.values()) print('Listing imported materials:') for ind, m in enumerate(materials): print('{0}: Name: {1}, Density {2}[g/cm3]'.format(ind, m.name, m.density)) # lets create a water box surronded by air, we need a numpy array of # material indices and a material table # First we specify dimensions af the voxelized box N = np.array([128, 128, 128], dtype='int32') # Then spacing of each voxel in cm spacing = np.array([0.1, 0.1, 0.1], dtype='float64') # lets specify a lookup table as a dictionary for the materials we are # using. The key in the dictionary corresponds to the values in the # material_indices array material_lut = {0: 'air', 1: 'water'} material_indices = np.zeros(N, dtype='int32') #Lets fill a region of the box with water material_indices[20:-20, 20:-20, 20:-20] = 1 # Now we create a density array as same shape as the material_indices array # We could spesify different densities for each voxel, but we are just # going to give each material its room temperature default density air_material = materials_dict['air'] water_material = materials_dict['water'] densities = np.empty(N, dtype='float64') densities[:, :, :] = air_material.density densities[20:-20, 20:-20, 20:-20] = water_material.density # Next we need to get the attinuation lookup table for the specified # materials. This is a numpy float64 array with shape: # [number_of_materials, 5, number_of_energies_we_have_interaction_constants_at] # The generate_attinuation_lut function is a conveniance function that will # generate this LUT lut = generate_attinuation_lut([air_material, water_material], material_lut) #Now from the lut we can plot the attenuation coefficients for water: plt.subplot(2, 2, 1) plt.loglog(lut[1, 0, :], lut[1, 1, :], label='Total') plt.loglog(lut[1, 0, :], lut[1, 2, :], label='Rayleigh scattering') plt.loglog(lut[1, 0, :], lut[1, 3, :], label='Photoelectric effect') plt.loglog(lut[1, 0, :], lut[1, 4, :], label='Compton scattering') plt.legend() plt.title('Attenuation coefficients for water') plt.ylabel('Attenuation coefficient [$cm^2 / g$]') plt.xlabel('Energy [$eV$]') # Now we are ready to set up a simulation: # initializing the monte carlo engine engine = Engine() # In the simulation geometry the first voxel will have coordinates (0, 0, 0) # we can specify an offset to set the center of our box to origo offset = -N * spacing / 2. # we also need the lut shape as an array lut_shape = np.array(lut.shape, dtype='int32') # and an array to store imparted energy energy_imparted = np.zeros_like(densities) simulation = engine.setup_simulation(N, spacing, offset, material_indices, densities, lut_shape, lut, energy_imparted, use_siddon=np.zeros(1, dtype='int')) # Next we setup a beam source source_position = np.array([-20, 0, 0], dtype='float64') source_direction = np.array([1, 0, 0], dtype='float64') # this needs to be a unit vector scan_axis = np.array([0, 0, 1], dtype='float64') # this needs to be a unit vector and orthonormal to source direction # The fan angle of the beam in scan_axid direction is gives as angle = arctan(collimation / sdd) sdd = np.array([1], dtype='float64') collimation = np.array([0.25], dtype='float64') / 4 # the fan angle of the beam in source_direction cross scan_axis is given as angle = arctan(2 * fov / sdd) fov = np.array([0.125], dtype='float64') / 4 #To define wich photon energies we will draw from, we need to specify a specter cummulative propability distribution # and the corresponding energies. To only draw three equaly probable energies, we may specify this as following specter_probabilities = np.array([.25, .25, .50], dtype='float64') specter_probabilities /= specter_probabilities.sum() # we normalize to be certain we get a valid cum. prob. dist. specter_energies = np.array([30e3, 60e3, 90e3], dtype='float64') # energy in eV, here 30, 60 and 90 keV specter_cpd = np.cumsum(specter_probabilities) # last we may specify a weight factor for the source. This should be 1 # unless you create multiple sources and want to apply differet weights # for each source/beam weight = np.array([1], dtype='float64') # We now have all we need to specify a beam source, lets create one: n_specter=np.array(specter_cpd.shape, dtype='int32') beam = engine.setup_source(source_position, source_direction, scan_axis, sdd, fov, collimation, weight, specter_cpd, specter_energies, n_specter) # Running the simulation n_histories = 1000000 t0 = time.clock() import pdb;pdb.set_trace() engine.run(beam, n_histories, simulation) print('Simulated {0} photons in {1} seconds'.format(n_histories, time.clock()-t0)) # let's add one more beam to the simulation source_position_2 = np.array([0, -20, 0], dtype='float64') source_direction_2 = np.array([0, 1, 0], dtype='float64') # th beam2 = engine.setup_source(source_position_2, source_direction_2, scan_axis, sdd, fov, collimation, weight, specter_cpd, specter_energies, n_specter) engine.run(beam2, n_histories, simulation) print('Simulated another {0} photons in {1} seconds'.format(n_histories, time.clock()-t0)) #cleanup of simulation and sources, the monte carlo engine will leak # memory if these functions are not called engine.cleanup(simulation=simulation) engine.cleanup(source=beam) engine.cleanup(source=beam2) plt.subplot(2, 2, 2) plt.imshow(energy_imparted[:, :, N[2] // 2]) plt.title('Energy Imparted [eV]') plt.subplot(2, 2, 3) plt.imshow(material_indices[:, :, N[2] // 2], cmap='gray') plt.title('Material index') plt.subplot(2, 2, 4) dose = energy_imparted / (np.prod(spacing) * densities) plt.imshow(np.log(dose[:, :, N[2] // 2])) plt.title('Logarithm of dose [eV / grams]') plt.show()
# # Plot mesh # # plot(u,wireframe=True,scalarbar=False) # plot(mesh) # if xx == 1: # l2uorder[xx-1] = 0 # else: # l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])) # l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])) # print errL2u[xx-1] # print errL2p[xx-1] if (ShowErrorPlots == 'yes'): plt.loglog(NN,errL2u) plt.title('Error plot for CG2 elements - Velocity L2 convergence = %f' % np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))) plt.xlabel('N') plt.ylabel('L2 error') plt.figure() plt.loglog(NN,errL2p) plt.title('Error plot for CG1 elements - Pressure L2 convergence = %f' % np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))) plt.xlabel('N') plt.ylabel('L2 error') plt.show()
def FPP(log=Logger.logger(0), N = 10000, dt = 1./24000, distributionParameter = [30], plotAll = True, efield = False): #check if rate file or rate is present if len(distributionParameter) == 1: try: data = np.loadtxt(distributionParameter[0],delimiter = ' ') log.info("Rate data loaded") BGsim = True STNdata = [] tick = [] for n in data: STNdata.append(n[1]) tick.append(n[0]) Ratetime = pylab.cumsum(tick) BGdt = tick[1] timeSteps = int(Ratetime[-1]/dt) except: float(distributionParameter[0]) Ratetime = 1. timeSteps = int(Ratetime/dt) BGsim = False else: Ratetime = 1. BGsim = False timeSteps = int(Ratetime/dt) maxrate = 1./0.009 times = [] for n in range(timeSteps): times.append(dt*n) # check for current file, if none present use impules try: It = np.loadtxt('C:\\Users\\Kristian\\Dropbox\\phd\\Data\\apcurrent24k.dat',delimiter = ',') #/home/uqkweegi/Documents/Data/apcurrent24k.dat',delimiter = ',') except: log.error('no current file present') It = np.array(1) log.info('Current loaded') It = np.multiply(np.true_divide(It,It.min()),250e-9) #normalize currentLength = len(It) #calculate extracellular effects epsilon = 8.85e-12 #Permitivity of free space rho = 10.**5 * 10.**6 #density of neurons in STN m^-3 r = np.power(np.multiply(3./4*N/(np.pi*rho),np.array([random.uniform(0,1) for _ in range(N)])),1./3) #create a power law distribution of neuron radii r.sort() if efield: rijk = [[random.uniform(0,1)-0.5 for _ in range(N)],[random.uniform(0,1)-0.5 for _ in range(N)],[random.uniform(0,1)-0.5 for _ in range(N)]] #create vector direction of field #if plotAll: # vi = pylab.plot(rijk[0]) # vj = pylab.plot(rijk[1]) # vk = pylab.plot(rijk[2]) # pylab.show() R3 = 0.96e3 C3 = 2.22e-6 C2 = 9.38e-9 C3 = 1.56e-6 C2 = 9.38e-9 R4 = 100.e6 R2N = np.multiply(1./(4*np.pi*epsilon),r) R1 = 2100.; t_impulse = np.array([dt*n for n in range(100)]) log.info('initialization complete') Vt = pylab.zeros(len(times)) Vi = Vt Vj = Vt Vk = Vt # start simulation #-------------------------------------------------------------------------------# for neuron in range(N): R2 = R2N[neuron] ppwave = pylab.zeros(len(times)) if BGsim: absoluteTimes = np.random.exponential(1./(maxrate*STNdata[0]),1) else: if len(distributionParameter) == 1: absoluteTimes = np.random.exponential(1./(distributionParameter[0]),1) else: absoluteTimes = [random.weibullvariate(distributionParameter[0],distributionParameter[1])] while absoluteTimes[-1] < times[-1]-currentLength*dt: wave_start = int(absoluteTimes[-1]/dt) wave_end = wave_start+currentLength if wave_end > len(times): break ppwave[wave_start:wave_end] = np.add(ppwave[wave_start:wave_end],It) if BGsim: isi = np.random.exponential(1./(maxrate*STNdata[int(absoluteTimes[-1]/BGdt)]),1) else: if len(distributionParameter) == 1: isi = np.random.exponential(1./(distributionParameter[0]),1) else: isi = random.weibullvariate(distributionParameter[0],distributionParameter[1]) absoluteTimes = np.append(absoluteTimes,[absoluteTimes[-1]+isi]) # calculate neuron contribution #------------------------------------------------------------------------------# extracellular_impulse_response = np.multiply(np.multiply(np.exp(np.multiply(t_impulse,-20*17*((C2*R1*R2 + C2*R1*R3 + C2*R1*R4 - C3*R1*R3 + C3*R2*R3 + C3*R3*R4))/(2*C2*C3*R1*R3*(R2 + R4)))),(np.add(np.cosh(np.multiply(t_impulse,(C2**2*R1**2*R2**2 + 2*C2**2*R1**2*R2*R3 + 2*C2**2*R1**2*R2*R4 + C2**2*R1**2*R3**2 + 2*C2**2*R1**2*R3*R4 + C2**2*R1**2*R4**2 + 2*C2*C3*R1**2*R2*R3 - 2*C2*C3*R1**2*R3**2 + 2*C2*C3*R1**2*R3*R4 - 2*C2*C3*R1*R2**2*R3 - 2*C2*C3*R1*R2*R3**2 - 4*C2*C3*R1*R2*R3*R4 - 2*C2*C3*R1*R3**2*R4 - 2*C2*C3*R1*R3*R4**2 + C3**2*R1**2*R3**2 - 2*C3**2*R1*R2*R3**2 - 2*C3**2*R1*R3**2*R4 + C3**2*R2**2*R3**2 + 2*C3**2*R2*R3**2*R4 + C3**2*R3**2*R4**2)**(1/2)/(2*C2*C3*R1*R3*(R2 + R4)))),np.divide(np.sinh(np.multiply(t_impulse,(C2**2*R1**2*R2**2 + 2*C2**2*R1**2*R2*R3 + 2*C2**2*R1**2*R2*R4 + C2**2*R1**2*R3**2 + 2*C2**2*R1**2*R3*R4 + C2**2*R1**2*R4**2 + 2*C2*C3*R1**2*R2*R3 - 2*C2*C3*R1**2*R3**2 + 2*C2*C3*R1**2*R3*R4 - 2*C2*C3*R1*R2**2*R3 - 2*C2*C3*R1*R2*R3**2 - 4*C2*C3*R1*R2*R3*R4 - 2*C2*C3*R1*R3**2*R4 - 2*C2*C3*R1*R3*R4**2 + C3**2*R1**2*R3**2 - 2*C3**2*R1*R2*R3**2 - 2*C3**2*R1*R3**2*R4 + C3**2*R2**2*R3**2 + 2*C3**2*R2*R3**2*R4 + C3**2*R3**2*R4**2)**(1/2)/(2*C2*C3*R1*R3*(R2 + R4))))*(C2*R1*R2 - C2*R1*R3 + C2*R1*R4 + C3*R1*R3 - C3*R2*R3 - C3*R3*R4),(C2**2*R1**2*R2**2 + 2*C2**2*R1**2*R2*R3 + 2*C2**2*R1**2*R2*R4 + C2**2*R1**2*R3**2 + 2*C2**2*R1**2*R3*R4 + C2**2*R1**2*R4**2 + 2*C2*C3*R1**2*R2*R3 - 2*C2*C3*R1**2*R3**2 + 2*C2*C3*R1**2*R3*R4 - 2*C2*C3*R1*R2**2*R3 - 2*C2*C3*R1*R2*R3**2 - 4*C2*C3*R1*R2*R3*R4 - 2*C2*C3*R1*R3**2*R4 - 2*C2*C3*R1*R3*R4**2 + C3**2*R1**2*R3**2 - 2*C3**2*R1*R2*R3**2 - 2*C3**2*R1*R3**2*R4 + C3**2*R2**2*R3**2 + 2*C3**2*R2*R3**2*R4 + C3**2*R3**2*R4**2)**(1/2))))),-R4/(C2*(R2 + R4))); electrode_ppwave = np.convolve(ppwave,extracellular_impulse_response,'same'); if efield: #add fields amp = 1/np.sqrt((np.square(rijk[0][neuron])+np.square(rijk[1][neuron])+np.square(rijk[2][neuron]))) rijk[0][neuron] = rijk[0][neuron]*amp rijk[1][neuron] = rijk[1][neuron]*amp rijk[2][neuron] = rijk[2][neuron]*amp Vi = np.add(Vi,np.multiply(electrode_ppwave,rijk[0][neuron])) Vj = np.add(Vj,np.multiply(electrode_ppwave,rijk[1][neuron])) Vk = np.add(Vk,np.multiply(electrode_ppwave,rijk[2][neuron])) else: #add scalar Vt = np.add(Vt,electrode_ppwave) if np.mod(neuron,1000) == 999: log.info(str(neuron+1)+" neurons calculated") #------------------------------------------------------------------------------# # end simulation log.info('neuron contribution to MER complete') #remove bias if efield: Vt = np.sqrt(np.add(np.square(Vi),np.square(Vj),np.square(Vk))) Vt = np.subtract(Vt,np.mean(Vt)) #apply hardware filters flow = 5500*2. fhigh = 500. b,a = signal.butter(18,flow*dt,'low') Vt = signal.lfilter(b, a, Vt) b,a = signal.butter(1,fhigh*dt,'high') Vt = signal.lfilter(b, a, Vt) #produce plots if plotAll: volts = pylab.plot(times,Vt) if BGsim: stnrate = pylab.plot(Ratetime,np.multiply(STNdata,200)) pylab.show() nfft=2**int(math.log(len(Vt),2))+1 sr = 1/dt Pxi,freqs=pylab.psd(x=Vt,Fs=sr,NFFT=nfft/10,window=pylab.window_none, noverlap=100) pylab.show() return freqs, Pxi psd = pylab.loglog(freqs, Pxi) pylab.show() return Vt, times
def plot_obs_pred(obs_pred_data, dest_file='./obs_pred.png'): plot_color_by_pt_dens(obs_pred_data['pred'], obs_pred_data['obs'], 3, loglog=1) plt.loglog([min(obs_pred_data['pred']), max(obs_pred_data['pred'])], [min(obs_pred_data['pred']), max(obs_pred_data['pred'])], 'k-') plt.savefig(dest_file, dpi = 400)
#lun_lin = np.dot(u_lin-usol,F_lin); # value of l(u_n) #en_quad = sqrt(abs(lun_lin)); #discret_err_lin[j] = en_quad; # QUADRATIC FINITE ELEMENTS lun_quad = np.dot(u[:],F[:]); # value of l(u_n) en_quad = sqrt(abs(lu-lun_quad)); # energy norm of error vector discret_err_quad[j] = en_quad; #lun_quad = np.dot(u-usol,F[0:N]); # value of l(u_n) #en_quad = sqrt(abs(lun_quad)); #discret_err_quad[j] = en_quad; a = discret_err_quad[3]/mmw[3]**2 b = discret_err_lin[3]/mmw[3] plt.plot(mmw, discret_err_lin,':*', label='energy error for linear FE') plt.loglog(mmw, b*mmw[:], label='linear slope') plt.loglog(mmw, discret_err_quad,':o', label='energy error for quadratic FE') plt.loglog(mmw, a*mmw[:]**2, label='quadratic sloe') plt.title('energy error depending on mesh width') plt.legend() plt.show() ## Visualization: #FEM.plot(p, t, u); # approximated solution #plt.title('Approximation quadratic'); #FEM.plot(p, t, u-u_lin); # approximated solution #plt.title('Approximation linear error'); #FEM.plot(p, t, u-usol); # approximation error #plt.title('Approximation Error'); #plt.show()
# -*- coding: utf-8 -*- import numpy as np import matplotlib.mlab as mlab import os import matplotlib.pylab as plt from math import* os.chdir('/Users/ronaldholt/Desktop/ORNL/SANS_NCBD_Reduced') for filename in os.listdir("."): if filename.endswith(".txt"): x,y=np.loadtxt(filename, skiprows=1, usecols=(0,1), delimiter=",", unpack=True) x=x[27:91] z=y[27:91] plt.loglog(x,z,label=filename) plt.legend(loc='lower left') plt.title("I(Q) vs. Q") plt.xlabel("Q") plt.xlim([.1,.3]) plt.ylabel("I(Q)") plt.show()
dx=dx, detrend=True, tap=tap) try: SS_obs = SS_obs + numpy.interp(f0, ffo, PSD_obs) SS_model = SS_model + numpy.interp(f0, ffm, PSD_model) except: SS_obs = numpy.interp(f0, ffo, PSD_obs) SS_model = numpy.interp(f0,ffm, PSD_model) nr += 1 SS_obs /= nr SS_model /= nr ff = f0 dff = ff[1]-ff[0] plt.close() plt.figure() plt.loglog(ff, SS_model, color='red',lw=2, label='ssh_model') plt.loglog(ff, SS_obs, color='k', label='ssh_obs') plt.grid() #plt.axis([5e-3,0.25,1e-4,1e3]) plt.xlabel(u'cy/km') plt.ylabel(u'm²/(cy/km)') plt.legend() plt.title('SSH spectra for SWOT-like data and model data interpolated on the swath') plt.savefig('{}_ssh_spectra.png'.format(p.config)) plt.show()
''' Roll and gyro power spectrum as a function of wavenumber''' import matplotlib.pylab as plt import numpy import matplotlib as mpl import swotsimulator.rw_data as rw_data import params as p file_instr = rw_data.file_instr(file=p.file_inst_error) file_instr.read_var(spatial_frequency=[], rollPSD=[], gyroPSD=[]) ## - Cut frequencies larger than Nyquist frequency and cut long wavelengths (larger than p.lambda_max) ind=numpy.where((file_instr.spatial_frequency<1./float(2*p.delta_al)) & (file_instr.spatial_frequency>0) & (file_instr.spatial_frequency>1./p.lambda_max))[0] freq=file_instr.spatial_frequency[ind] freq2=file_instr.spatial_frequency fig = plt.figure(figsize=(12,9)) tloc=0.11 tfont=20 stitle = 'Roll and gyro power spectrum' print(freq, file_instr.rollPSD) #plt.loglog(freq, file_instr.rollPSD[ind], label='roll') #plt.loglog(freq, file_instr.gyroPSD[ind], label='Gyro') plt.loglog(freq2, file_instr.rollPSD, label='roll') plt.loglog(freq2, file_instr.gyroPSD, label='Gyro') plt.ylabel('Power(asec**2/(cy/km))') plt.xlabel('Wavenumber (cy/km)') plt.legend() plt.grid() plt.title(stitle, y=-tloc, fontsize=tfont) #size[1]) plt.savefig('Fig7.png')
ff, PSD_obs = myspectools.psd1d(hh=data.SSH_obs[:,iac],dx=dx, detrend=True, tap=tap) ff, PSD_model = myspectools.psd1d(hh=data.SSH_model[:,iac],dx=dx, detrend=True, tap=tap) try: SS_obs = SS_obs + numpy.interp(f0, ff, PSD_obs) SS_model = SS_model + numpy.interp(f0, ff, PSD_model) except: SS_obs = numpy.interp(f0, ff, PSD_obs) SS_model = numpy.interp(f0,ff, PSD_model) nr+=1 SS_obs/=nr SS_model/=nr ff = f0 dff = ff[1]-ff[0] plt.close() plt.loglog(ff,SS_obs, color='k', label='SSH_obs'); plt.grid() plt.loglog(ff,SS_model, color='red',lw=2, label='SSH_model') #plt.axis([5e-3,0.25,1e-4,1e3]) plt.xlabel(u'cy/km') plt.ylabel(u'm²/(cy/km)') plt.legend() plt.title('SSH spectra for SWOT-like data and model data interpolated on the swath') plt.savefig('SSH_spectra.png') plt.show()
def compareModelKinetics(model1, model2): """ Compare the kinetics of :class:`ReactionModel` objects `model1` and `model2`, printing the results to stdout. """ from matplotlib import pylab # Determine reactions that both models have in common commonReactions = {} for rxn1 in model1.reactions: for rxn2 in model2.reactions: if rxn1.isIsomorphic(rxn2): commonReactions[rxn1] = rxn2 model2.reactions.remove(rxn2) break uniqueReactions1 = [rxn for rxn in model1.reactions if rxn not in commonReactions.keys()] uniqueReactions2 = model2.reactions logging.info('{0:d} reactions were found in both models:'.format(len(commonReactions))) for rxn in commonReactions: logging.info(' {0!s}'.format(rxn)) logging.info('{0:d} reactions were only found in the first model:'.format(len(uniqueReactions1))) for rxn in uniqueReactions1: logging.info(' {0!s}'.format(rxn)) logging.info('{0:d} reactions were only found in the second model:'.format(len(uniqueReactions2))) for rxn in uniqueReactions2: logging.info(' {0!s}'.format(rxn)) from rmgpy.kinetics import Chebyshev T = 1000; P = 1e5 kinetics1 = []; kinetics2 = [] for rxn1, rxn2 in commonReactions.iteritems(): kinetics1.append(rxn1.getRateCoefficient(T,P)) if rxn1.isIsomorphic(rxn2, eitherDirection=False): kinetics2.append(rxn2.getRateCoefficient(T,P)) else: kinetics2.append(rxn2.getRateCoefficient(T,P) / rxn2.getEquilibriumConstant(T)) fig = pylab.figure(figsize=(8,6)) ax = pylab.subplot(1,1,1) pylab.loglog(kinetics1, kinetics2, 'o', picker=5) xlim = pylab.xlim() ylim = pylab.ylim() lim = (min(xlim[0], ylim[0]), max(xlim[1], ylim[1])) ax.loglog(lim, lim, '-k') pylab.xlabel('Model 1 rate coefficient (SI units)') pylab.ylabel('Model 2 rate coefficient (SI units)') pylab.title('T = {0:g} K, P = {1:g} bar'.format(T, P/1e5)) pylab.xlim(lim) pylab.ylim(lim) def onpick(event): xdata = event.artist.get_xdata() ydata = event.artist.get_ydata() for ind in event.ind: logging.info(commonReactions.keys()[ind]) logging.info('k(T,P) = {0:9.2e} from model 1'.format(xdata[ind])) logging.info('k(T,P) = {0:9.2e} from model 2'.format(ydata[ind])) logging.info('ratio = 10**{0:.2f}'.format(math.log10(xdata[ind] / ydata[ind]))) connection_id = fig.canvas.mpl_connect('pick_event', onpick) pylab.show()
# -*- coding: utf8 -*- """ Script to plot some absorption coefficients from NIST. The absorption coefficient data was downloaded as ASCII format table from http://physics.nist.gov/PhysRefData/XrayMassCoef/tab4.html as "material.dat" for some materials. """ import os import glob import matplotlib.pylab as plt import numpy as np BaseDir = os.path.join(os.getcwd(), 'nist', '*') print 'Found', len(glob.glob(BaseDir)), 'files with data from NIST' for item in glob.glob(BaseDir): print 'loading', os.path.basename(item) # Skip lines in which there's more than the info we need (K, L, M, etc) # http://stackoverflow.com/a/17151323 with open(item) as f: lines = (line for line in f if len(line.split()) < 4) Data = np.loadtxt(lines) plt.loglog(Data[:, 0], Data[:, 1], label=os.path.splitext(os.path.basename(item))[0]) plt.rc('text', usetex=True) plt.title(r"$\mu/\rho$ [$\textrm{cm}^{2}$/g]") plt.legend() plt.show()