def labor_data_graphs(weighted, output_dir): ''' ------------------------------------------------------------------------ Plot graphs ------------------------------------------------------------------------ ''' import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D domain = np.linspace(20, 80, S_labor) Jgrid = np.linspace(1, 100, J_labor) X, Y = np.meshgrid(domain, Jgrid) cmap2 = matplotlib.cm.get_cmap('summer') plt.plot(domain, weighted, color='black', label='Data') plt.plot(np.linspace(76, 100, 23), extension, color='black', linestyle='-.', label='Extrapolation') plt.plot(np.linspace(65, 76, 11), to_dot, linestyle='--', color='black') plt.axvline(x=76, color='black', linestyle='--') plt.xlabel(r'age-$s$') plt.ylabel(r'individual labor supply $/bar{l}_s$') plt.legend() plt.savefig(os.path.join( baseline_dir, 'Demographics/labor_dist_data_withfit.png')) fig10 = plt.figure() ax10 = fig10.gca(projection='3d') ax10.plot_surface(X, Y, lab_mat_basic.T, rstride=1, cstride=2, cmap=cmap2) ax10.set_xlabel(r'age-$s$') ax10.set_ylabel(r'ability type -$j$') ax10.set_zlabel(r'labor $e_j(s)$') plt.savefig(os.path.join(baseline_dir, 'Demographics/data_labor_dist'))
def plotISVar(): plt.figure() plt.title('Variance minimization problem (call).\nVertical lines mark the minima.') for K in [0.6, 0.8, 1.0, 1.2]: theta = np.linspace(-0.6, 2) var = [BS.exactCallVar(K*s0, theta) for theta in theta] minth = theta[np.argmin(var)] line, = plt.plot(theta, var, label=str(K)) plt.axvline(minth, color=line.get_color()) plt.xlabel(r'$\theta$') plt.ylabel('call variance') plt.legend(title=r'$K/s_0$', loc='upper left') plt.autoscale(tight=True) plt.figure() plt.title('Variance minimization problem (put).\nVertical lines mark the minima.') for K in [0.8, 1.0, 1.2, 1.4]: theta = np.linspace(-2, 0.5) var = [BS.exactPutVar(K*s0, theta) for theta in theta] minth = theta[np.argmin(var)] line, = plt.plot(theta, var, label=str(K)) plt.axvline(minth, color=line.get_color()) plt.xlabel(r'$\theta$') plt.ylabel('put variance') plt.legend(title=r'$K/s_0$', loc='upper left') plt.autoscale(tight=True)
def plot_ekf_vs_mc(): def fx(x): return x**3 def dfx(x): return 3*x**2 mean = 1 var = .1 std = math.sqrt(var) data = normal(loc=mean, scale=std, size=50000) d_t = fx(data) mean_ekf = fx(mean) slope = dfx(mean) std_ekf = abs(slope*std) norm = scipy.stats.norm(mean_ekf, std_ekf) xs = np.linspace(-3, 5, 200) plt.plot(xs, norm.pdf(xs), lw=2, ls='--', color='b') plt.hist(d_t, bins=200, normed=True, histtype='step', lw=2, color='g') actual_mean = d_t.mean() plt.axvline(actual_mean, lw=2, color='g', label='Monte Carlo') plt.axvline(mean_ekf, lw=2, ls='--', color='b', label='EKF') plt.legend() plt.show() print('actual mean={:.2f}, std={:.2f}'.format(d_t.mean(), d_t.std())) print('EKF mean={:.2f}, std={:.2f}'.format(mean_ekf, std_ekf))
def plot_scatter(points, rects, level_id, fig_area=FIG_AREA, grid_area=GRID_AREA, with_axis=False, with_img=True, img_alpha=1.0): rect = rects[level_id] top_lat, top_lng, bot_lat, bot_lng = get_rect_bounds(rect) plevel = get_points_level(points, rects, level_id) ax = plevel.plot('lng', 'lat', 'scatter') plt.xlim(left=top_lng, right=bot_lng) plt.ylim(top=top_lat, bottom=bot_lat) if with_img: img = plt.imread('/data/images/level%s.png' % level_id) plt.imshow(img, zorder=0, alpha=img_alpha, extent=[top_lng, bot_lng, bot_lat, top_lat]) width, height = get_rect_width_height(rect) fig_width, fig_height = get_fig_width_height(width, height, fig_area) plt.gcf().set_size_inches(fig_width, fig_height) if grid_area: grid_horiz, grid_vertic = get_grids(rects, level_id, grid_area, fig_area) for lat in grid_horiz: plt.axhline(lat, color=COLOR_GRID, lw=GRID_LW) for lng in grid_vertic: plt.axvline(lng, color=COLOR_GRID, lw=GRID_LW) if not with_axis: ax.set_axis_off() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) return ax
def draw_img_for_viewing_ice(self): #print "Press 'p' to save PNG." global colmax global colmin fig = P.figure(num=None, figsize=(13.5, 5), dpi=100, facecolor='w', edgecolor='k') cid1 = fig.canvas.mpl_connect('key_press_event', self.on_keypress_for_viewing) cid2 = fig.canvas.mpl_connect('button_press_event', self.on_click) canvas = fig.add_subplot(121) canvas.set_title(self.filename) self.axes = P.imshow(self.inarr, origin='lower', vmax = colmax, vmin = colmin) self.colbar = P.colorbar(self.axes, pad=0.01) self.orglims = self.axes.get_clim() canvas = fig.add_subplot(122) canvas.set_title("Angular Average") maxAngAvg = (self.inangavg).max() numQLabels = len(eDD.iceHInvAngQ.keys())+1 labelPosition = maxAngAvg/numQLabels for i,j in eDD.iceHInvAngQ.iteritems(): P.axvline(j,0,colmax,color='r') P.text(j,labelPosition,str(i), rotation="45") labelPosition += maxAngAvg/numQLabels P.plot(self.inangavgQ, self.inangavg) P.xlabel("Q (A-1)") P.ylabel("I(Q) (ADU/srad)") pngtag = original_dir + "peakfit-gdvn_%s.png" % (self.filename) P.savefig(pngtag) print "%s saved." % (pngtag) P.close()
def fluence_dist(self): """ Plots the fluence distribution and gives the mean and median fluence values of the sample """ fluences = [] for i in range(0,len(self.fluences),1): try: fluences.append(float(self.fluences[i])) except ValueError: continue fluences = np.array(fluences) mean_fluence = np.mean(fluences) median_fluence = np.median(fluences) print('Mean Fluence =',mean_fluence,'(15-150 keV) [10^-7 erg cm^-2]') print('Median Fluence =',median_fluence,'(15-150 keV) [10^-7 erg cm^-2]') plt.figure() plt.xlabel('Fluence (15-150 keV) [$10^{-7}$ erg cm$^{-2}$]') plt.ylabel('Number of GRBs') plt.xscale('log') minimum, maximum = min(fluences), max(fluences) plt.axvline(mean_fluence,color='red',linestyle='-') plt.axvline(median_fluence,color='blue',linestyle='-') plt.hist(fluences,bins= 10**np.linspace(np.log10(minimum),np.log10(maximum),20),color='grey',alpha=0.5) plt.show()
def plotresult(i=0, j=101, step=1): import matplotlib.pyplot as mpl from numpy import arange res = getevaluation(i, j, step) x = [k / 100.0 for k in range(i, j, step)] nbcurve = len(res[0]) nres = [[] for i in xrange(nbcurve)] mres = [] maxofmin = -1, 0.01 for kindex, kres in enumerate(res): minv = min(kres.values()) if minv > maxofmin[1]: maxofmin = kindex, minv lres = [(i, j) for i, j in kres.items()] lres.sort(lambda x, y: cmp(x[0], y[0])) for i, v in enumerate(lres): nres[i].append(v[1]) mres.append(sum([j for i, j in lres]) / nbcurve) print maxofmin for y in nres: mpl.plot(x, y) mpl.plot(x, mres, linewidth=2) mpl.ylim(0.5, 1) mpl.xlim(0, 1) mpl.axhline(0.8) mpl.axvline(0.77) mpl.xticks(arange(0, 1.1, 0.1)) mpl.yticks(arange(0.5, 1.04, 0.05)) mpl.show()
def periodograms(id, x, y, yerr, path, plot=False, savepgram=False): """ takes id of the star, returns an array of period measurements and saves the results. id: star id. x, y, yerr: time, flux and error arrays. path: path where you want to save the output. """ ps = np.linspace(2, 100, 1000) model = LombScargle().fit(x, y, yerr) pgram = model.periodogram(ps) # find peaks peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] < pgram[i] and pgram[i+1] < pgram[i]]) if len(peaks): period = ps[pgram==max(pgram[peaks])][0] else: period = 0 if plot: plt.clf() plt.plot(ps, pgram) plt.axvline(period, color="r") plt.savefig("{0}/{1}_pgram".format(path, str(int(id)).zfill(4))) if savepgram: np.savetxt("{0}/{1}_pgram.txt".format(path, str(int(id)).zfill(4)), np.transpose((ps, pgram))) np.savetxt("{0}/{1}_pgram_result.txt".format(path, str(int(id)).zfill(4)), np.ones(2).T*period) return period
def t90_dist(self): """ Plots T90 distribution, gives the mean and median T90 values of the sample and calculates the number of short, long bursts in the sample """ t90s = [] for i in range(0,len(self.t90s),1): try: t90s.append(float(self.t90s[i])) except ValueError: continue t90s = np.array(t90s) mean_t90 = np.mean(t90s) median_t90 = np.median(t90s) print('Mean T90 time =',mean_t90,'s') print('Median T90 time=',median_t90,'s') mask = np.ma.masked_where(t90s < 2, t90s) short_t90s = t90s[mask == False] long_t90s = t90s[mask != False] print('Number of Short/Long GRBs =',len(short_t90s),'/',len(long_t90s)) plt.figure() plt.xlabel('T$_{90}$ (s)') plt.ylabel('Number of GRBs') plt.xscale('log') minimum, maximum, = min(short_t90s), max(long_t90s) plt.axvline(mean_t90,color='red',linestyle='-') plt.axvline(median_t90,color='blue',linestyle='-') plt.hist(t90s,bins= 10**np.linspace(np.log10(minimum),np.log10(maximum),20),color='grey',alpha=0.5) plt.show()
def fit_and_plot(cand): data = cand.profile n = len(data) xs = np.linspace(0.0, 1.0, n, endpoint=False) G = gauss._compute_data(cand) print "k: %g, log(k): %g" % (G.k, np.log10(G.k)) test_ks = np.logspace(np.log10(G.k)-2, np.log10(G.k)+1, 1e3) #test_ks = np.exp(np.linspace(np.log(1e-1),np.log(1e3),1e3)) plt.figure(1) resids = [gauss._rms_residual(k,data) for k in test_ks] plt.loglog(test_ks,resids,color="green", label="_nolabel_") #plt.axvline(true_k,color="red", label="true k") best_k = test_ks[np.argmin(resids)] plt.axvline(best_k,color="green", label="best k") plt.axvline(G.k,color="cyan", label="k from fit") plt.ylabel("RMS of residuals") plt.xlabel("Value of k used (i.e. held fixed) when fitting") plt.legend(loc="best") plt.figure(2) mue, ae, be = gauss._fit_all_but_k(best_k, data) #plt.plot(xs, true_prof, color="red", label="true") plt.plot(xs, data, color="black", label="data") plt.plot(xs, (ae*utils.vonmises_histogram(best_k,mue,n)+be), color="green", label="exhaustive best fit") plt.plot(xs, G.histogram(n), color="cyan", label="best fit") plt.legend(loc="best") plt.show()
def plot_monthly_dollars_by_party(party, color='blue', start_date=None, end_date=None, election_date=None): monthly_dollars = monthly_dollars_by_party(party, start_date, end_date) months, dollars = monthly_dollars.keys(), monthly_dollars.values() plt.plot(range(len(months)), dollars, 'o-', color=color, label=party) # label every other month xtick_locs = range(0, len(months), 2) xtick_labels = [d.strftime('%B %Y') for d in months[::2]] plt.xticks(xtick_locs, xtick_labels, rotation=70) # format for dollars a = plt.gca() a.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ','))) # show election day if election_date: first_date = months[0] date_diff = election_date - first_date months_diff = date_diff.days / 30.0 plt.axvline(months_diff, color='black', ls='dashed', label='Election Day') plt.xlim((0, len(months)-1)) plt.ylabel('Total Monthly Contributions ($)') plt.title('Total Monthly Contributions') plt.legend(loc='upper right')
def test_blc2(oversample=2, verbose=True, wavelength=2e-6, angle=0, kind='circular', sigma=1.0, loc = 0.3998): import scipy x = np.linspace(-5, 5, 401) sigmar = sigma*x if kind == 'circular': trans = (1- (2*scipy.special.jn(1,sigmar)/sigmar)**2)**2 else: trans = (1- (np.sin(sigmar)/sigmar)**2)**2 plt.clf() plt.plot(x, trans) plt.axhline(0.5, ls='--', color='k') plt.axvline(loc, ls='--', color='k') #plt.gca().set_xbound(loc*0.98, loc*1.02) wg = np.where(sigmar > 0.01) intfn = scipy.interpolate.interp1d(x[wg], trans[wg]) print "Value at %.4f :\t%.4f" % (loc, intfn(loc)) # figure out the FWHM # cut out the portion of the curve from the origin to the first positive maximum wp = np.where(x > 0) xp = x[wp] transp = trans[wp] wm = np.argmax(transp) wg = np.where(( x>0 )& ( x<xp[wm])) interp = scipy.interpolate.interp1d(trans[wg], x[wg]) print "For sigma = %.4f, HWHM occurs at %.4f" % (sigma, interp(0.5))
def createHistogram(df, pic, bins=45, rates=False): data=mergeMatrix(df, pic) matrix=sortMatrix(df, pic) density = gaussian_kde(data) xs = np.linspace(min(data), max(data), max(data)) density.covariance_factor = lambda : .25 density._compute_covariance() #xs = np.linspace(min(data), max(data), 1000) fig,ax1 = plt.subplots() #plt.xlim([0, 4000]) plt.hist(data, bins=bins, range=[-500, 4000], histtype='stepfilled', color='grey', alpha=0.5) lims = plt.ylim() height=lims[1]-2 for i in range(0,len(matrix)): currentRow = matrix[i][np.nonzero(matrix[i])] plt.plot(currentRow, np.ones(len(currentRow))*height, '|', color='black') height -= 2 plt.axvline(x=0, color='red', linestyle='dashed') #plt.axvline(x=1000, color='black', linestyle='dashed') #plt.axvline(x=2000, color='black', linestyle='dashed') #plt.axvline(x=3000, color='black', linestyle='dashed') if rates: rates = get_rate(df, pic) ax1.text(-250, 4, str(rates[0]), size=15, ha='center', va='center', color='green') ax1.text(500, 4, str(rates[1]), size=15, ha='center', va='center', color='green') ax1.text(1500, 4, str(rates[2]), size=15, ha='center', va='center', color='green') ax1.text(2500, 4, str(rates[3]), size=15, ha='center', va='center', color='green') ax1.text(3500, 4, str(rates[4])+ r' $\frac{\mathsf{Spikes}}{\mathsf{s}}$', size=15, ha='center', va='center', color='green') plt.ylim([0,lims[1]+5]) plt.xlim([0, 4000]) plt.title('Histogram for ' + str(pic)) ax1.set_xticklabels([-500, 'Start\nStimulus', 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000]) plt.xlabel('Time (ms)') plt.ylabel('Counts (Spikes)') print lims arr_hand = getPic(pic) imagebox = OffsetImage(arr_hand, zoom=.3) xy = [3200, lims[1]+5] # coordinates to position this image ab = AnnotationBbox(imagebox, xy, xybox=(30., -30.), xycoords='data',boxcoords="offset points") ax1.add_artist(ab) ax2 = ax1.twinx() #Necessary for multiple y-axes #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds ax2.plot(xs, density(xs) , 'g', drawstyle='steps') plt.ylim([0,0.001]) plt.yticks([0.0001,0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008, 0.0009]) ax2.set_yticklabels([1,2,3,4, 5, 6, 7, 8, 9]) plt.ylabel(r'Density ($\cdot \mathsf{10^{-4}}$)', color='green') plt.gcf().subplots_adjust(right=0.89) plt.gcf().subplots_adjust(bottom=0.2) plt.savefig(pic, dpi=150)
def make_heatmap(n, ratings): """Makes a 2d histogram of n vs ratings """ # File name vline = "_cutoff-%s" % args.n if args.n else "" bins = "_nbin-%i_rbin-%i" % (args.n_bins, args.rating_bins) outfile = args.outbase + "_heatmap_n-vs-rating%s%s.pdf" % (vline, bins) # Bin points heatmap, n_edges, rating_edges = \ np.histogram2d(n, ratings, bins=(args.n_bins, args.rating_bins)) heatmap = np.log10(heatmap + 1) # log space to see full spectrum extent = [n_edges[0], n_edges[-1], 0, rating_edges[-1]] # plot fig, ax = plt.subplots() img = ax.imshow(heatmap.T, extent=extent, aspect="auto", origin="lower", cmap="RdBu") cbar = plt.colorbar(img, ticks=[0,1,2,3]) cbar.set_ticklabels([0,1000,2000,3000]) # easier to read cbar.set_label("# businesses") ax.set_xlabel("Number of reviews (n)") ax.set_ylabel("Average rating (stars)") if args.n: # vline if specified plt.axvline(args.n, color="white", linestyle="--") ax.text(args.n + 50, 0.5, "cutoff", color="white", fontweight="bold") fig.savefig(outfile) print "2D histogram made at \n%s" % outfile return
def bFctV0(n1, n2, rho, b, V0, modes, delta): NA = sqrt(n1**2 - n2**2) pyplot.figure() sim = Simulator(delta=delta) sim.setWavelength(Wavelength(k0=(v0 / b / NA)) for v0 in V0) sim.setMaterials(Fixed, Fixed, Fixed) sim.setRadii((rho * b,), (b,)) sim.setMaterialsParams((n2,), (n1,), (n2,)) fiber = fixedFiber(0, [rho * b, b], [n2, n1, n2]) for m in modes: neff = sim.getNeff(m) bnorm = (neff - n2) / (n1 - n2) pyplot.plot(V0, bnorm, color=COLORS[m.family], label=str(m)) c = fiber.cutoffV0(m) pyplot.axvline(c, color=COLORS[m.family], ls='--') pyplot.xlim((0, V0[-1])) pyplot.title("$n_1 = {}, n_2 = {}, \\rho = {}$".format(n1, n2, rho)) pyplot.xlabel("Normalized frequency ($V_0$)") pyplot.ylabel("Normalized propagation constant ($\widetilde{\\beta}$)")
def _update(num, data): nonlocal cmap1, bins, ax # clear axes, load data to refresh plt.cla() data = np.loadtxt(core_dict['DataFolder'] + "/data0.txt", float) # plots plt.axvline(x = np.average(data), color = cmap1(0.5), ls="--" , linewidth=1.7) plt.hist(data, bins, alpha=0.6, normed=1, facecolor=cmap1(0.8), label="X ~ Beta(2,5)") # labels legend = plt.legend(loc='upper right', framealpha = 1.0) legend.get_frame().set_linewidth(1) plt.title(core_dict['PlotTitle'], style='italic') plt.xlabel('Regret') plt.ylabel('Frequency') ax.set_ylim([0,0.2])
def make_scatter(n, ratings): """Makes a scatter plot of n vs ratings """ # File name vline = "_cutoff-%s" % args.n if args.n else "" log = "_log" if args.log else "" outfile = args.outbase + "_scatter_n-vs-rating%s%s.pdf" % \ (vline, log) fig, ax = plt.subplots() ax.scatter(n, jitter(ratings), color="#e34a33", alpha=0.3) if args.log: ax.set_xscale("log") ax.set_xlim([0,np.max(n)]) ax.set_ylim([0,5.5]) ax.set_xlabel("Number of reviews (n)") ax.set_ylabel("Average rating (stars)") if args.n: # vline if specified plt.axvline(args.n, color="black", linestyle="--") ax.text(args.n + 50, 0.5, "cutoff", fontweight="bold") fig.savefig(outfile) print "Scatter plot made at \n%s" % outfile return
def plot_trade_windows(self, dt_s, dt_e, f_width): ts_signal = self.getBollingerValue(dt_s, dt_e) dt_previous = '' s_color_previous = '' for i in range(len(ts_signal)): # get values for the current date dt = ts_signal.index[i] s = ts_signal[dt] s_color = 'r' if s >= f_width else 'g' if s <= -f_width else '' # update the figure: on change and in last day if s_color != s_color_previous \ or (i == len(ts_signal)-1): # if we are ending a trade opportunity window if s_color_previous != '': # shade the trade opportunity window plt.axvspan(dt_previous, dt, color=s_color_previous, alpha=0.25) # draw the end line plt.axvline(x=dt, color=s_color_previous, alpha=0.5) # if we are starting a new trade opportunity window if s_color != '': # draw the start line plt.axvline(x=dt, color=s_color, alpha=0.5) # save the last event s_color_previous = s_color dt_previous = dt
return valor def Plotar(self, serie = None, deteccoes = None): ''' esse metodo plota a serie final com todos os conceitos divididos por uma reta vermelha ''' if(serie == None): plt.plot(self.serie_final, label = 'Série', color = 'Blue') contador = len(self.serie_dividida[0]) for i in range(len(self.serie_dividida)): plt.axvline(contador, linewidth=1, color='r', zorder=-1) if(i > 0): contador += len(self.serie_dividida[i]) plt.title('Série com %s conceitos' %(self.qtd_conceitos)) plt.legend() plt.tight_layout() #plt.axis([0, 10000, -50, 50]) plt.show() else: plt.plot(serie, label = 'Série', color = 'Blue') contador = deteccoes[0] for i in range(len(deteccoes)): plt.axvline(contador, linewidth=1, color='r', zorder=-1) if(i > 0): contador = deteccoes[i]
def main(): ht = 50 hr = 2 f = 900 * 10**6 c = 3 * 10**8 gr = [1, 0.316, 0.1, 0.01] gl = 1 r = -1 distance = np.arange(1, 100001, 1, dtype=float) lambd = c / float(f) dc = 4 * ht * hr / lambd reflect = (distance**2 + (ht + hr)**2)**0.5 los = (distance**2 + (ht - hr)**2)**0.5 phi = 2 * pi * (reflect - los) / lambd flat = distance[:ht] decline = distance[ht:(dc + 1)] steep = distance[dc:] for i in range(len(gr)): temp = gl**0.5 / los + r * (gr[i]**0.5) * np.exp(phi * -1J) / reflect pr = (lambd / 4 / pi)**2 * (abs(temp)**2) plt.subplot(220 + i + 1) plt.plot(10*log(distance),10*log(pr)-10*log10(pr[0]),'b', \ 10*log(flat), np.zeros(len(flat)), 'y', \ 10*log(decline),-20*log(decline),'g', 10*log(steep),-40*log(steep),'r') plt.axvline(x=10 * log10(ht), linestyle='-.') plt.axvline(x=10 * log10(dc), linestyle='-.') plt.title("Gr = %s" % gr[i]) plt.show()
def msepath(X, y): print X.shape, y.shape # Compute paths print("Computing regularization path using the coordinate descent lasso...") model = LassoCV(cv=10, max_iter=3000).fit(X, y) # Display results m_log_alphas = -np.log10(model.alphas_) plt.figure() plt.plot(m_log_alphas, model.mse_path_, ':') plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k', label='Average across the folds', linewidth=2) plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k', label='alpha: CV estimate') plt.legend() plt.xlabel('-log(alpha)') plt.ylabel('Mean square error') plt.title('Mean square error on each fold: coordinate descent') plt.axis('tight') plt.show() fields = iot.read_fields() for i in xrange(len(fields)): print str(fields[i]) +'\t'+ str(model.coef_[i])
def test_airy_1d(display=False): """ Compare analytic airy function results to the expected locations for the first three dark rings and the FWHM of the PSF.""" lam = 1.0e-6 D = 1.0 r, airyprofile = airy_1d(wavelength=lam, diameter=D, length=20480, pixelscale=0.0001) # convert to units of lambda/D r_norm = r*_ARCSECtoRAD / (lam/D) if display: plt.semilogy(r_norm,airyprofile) plt.axvline(1.028/2, color='k', ls=':') plt.axhline(0.5, color='k', ls=':') plt.ylabel('Intensity relative to peak') plt.xlabel('Separation in $\lambda/D$') for rad in airy_zeros: plt.axvline(rad, color='red', ls='--') airyfn = scipy.interpolate.interp1d(r_norm, airyprofile) # test FWHM occurs at 1.028 lam/D, i.e. HWHM is at 0.514 assert (airyfn(0.5144938) - 0.5) < 1e-5 # test first minima occur near 1.22 lam/D, 2.23, 3.24 lam/D # TODO investigate/improve numerical precision here? for rad in airy_zeros: #print(rad, airyfn(rad), airyfn(rad+0.005)) assert airyfn(rad) < airyfn(rad+0.0003) assert airyfn(rad) < airyfn(rad-0.0003)
def mcgehee(): st = TransferMatrix() st.add_layer(0, 1.4504) st.add_layer(110, 1.7704 + 0.01161j) st.add_layer(35, 1.4621 + 0.04426j) st.add_layer(220, 2.12 + 0.3166016j) st.add_layer(7, 2.095 + 2.3357j) st.add_layer(200, 1.20252 + 7.25439j) st.add_layer(0, 1.20252 + 7.25439j) st.set_vacuum_wavelength(600) st.set_polarization('s') st.set_field('E') st.set_incident_angle(0, units='degrees') st.info() # Do calculations result = st.calc_field_structure() z = result['z'] y = result['field_squared'] # Plot results plt.figure() plt.plot(z, y) for z in st.get_layer_boundaries()[:-1]: plt.axvline(x=z, color='k', lw=2) plt.xlabel('Position in Device (nm)') plt.ylabel('Normalized |E|$^2$ Intensity ($|E(z)/E_0(0)|^2$)') if SAVE: plt.savefig('../Images/McGehee structure.png', dpi=300) plt.show()
def guiding_electric_field(): # Create structure st = TransferMatrix() st.set_vacuum_wavelength(lam0) st.add_layer(1.5 * lam0, air) st.add_layer(lam0, si) st.add_layer(1.5 * lam0, air) st.info() st.set_polarization('TM') st.set_field('H') st.set_leaky_or_guiding('guiding') alpha = st.calc_guided_modes(normalised=True) plt.figure() for i, a in enumerate(alpha): st.set_guided_mode(a) result = st.calc_field_structure() z = result['z'] # z = st.calc_z_to_lambda(z) E = result['field'] # Normalise fields # E /= max(E) plt.plot(z, abs(E) ** 2, label=i) for z in st.get_layer_boundaries()[:-1]: # z = st.calc_z_to_lambda(z) plt.axvline(x=z, color='k', lw=1, ls='--') plt.legend(title='Mode index') if SAVE: plt.savefig('../Images/guided fields.png', dpi=300) plt.show()
def test(): # Create structure st = LifetimeTmm() st.set_vacuum_wavelength(lam0) # st.add_layer(1e3, si) st.add_layer(1900, sio2) st.add_layer(100, si) st.add_layer(20, sio2) st.add_layer(100, si) # st.add_layer(1900, sio2) st.add_layer(1e3, air) st.info() st.set_polarization('TM') st.set_field('H') st.set_leaky_or_guiding('guiding') alpha = st.calc_guided_modes(normalised=True) st.set_guided_mode(alpha[0]) result = st.calc_field_structure() z = result['z'] z = st.calc_z_to_lambda(z) E = result['field'] # Normalise fields # E /= max(E) plt.figure() plt.plot(z, abs(E) ** 2) for z in st.get_layer_boundaries()[:-1]: z = st.calc_z_to_lambda(z) plt.axvline(x=z, color='k', lw=1, ls='--') plt.show()
def multi_plot_grid(sig, num = 4, path = None, changes = None): """ Plot in a grid structure. If path "/.../Plot.png" is provided then output plot will be saved into the file. If the list changes is provided thet vertical red lines will be plotted to mark locations. """ n =len(sig) md = n % num wdth = n / num for i in range(1, num+1): plt.subplot(num/2, num/2, i) plt.plot(sig) if changes != None: for j,e in enumerate(changes): plt.axvline(x = e, color = "red") if i == num: plt.xlim((i-1)*wdth, i * wdth + md) else: plt.xlim((i-1)*wdth, i * wdth) plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) if path: plt.savefig(path, dpi = 700) print "Figure is saved into " + path else: plt.show()
def psplot(pslist, nbins = 0, filename=None, figsize=(12, 8), showlegend=True): """ Plots a list of PS objects. If the PS has a slope, it is plotted as well. if nbins > 0, I bin the spectra. add option for linear plot ? """ plt.figure(figsize=figsize) for ps in pslist: if not np.all(np.isfinite(np.log10(ps.p))): print "No power to plot (probably flat curve !), skipping this one." continue # We bin the points if nbins > 0: logf = np.log10(ps.f[1:]) # we remove the first one logbins = np.linspace(np.min(logf), np.max(logf), nbins+1) # So nbins +1 numbers here. bins = 10**logbins bincenters = 0.5*(bins[:-1] + bins[1:]) # nbins centers logbins[0] -= 1.0 logbins[-1] += 1.0 binindexes = np.digitize(logf, logbins) # binindexes go from 1 to nbins+1 binvals = [] binstds = [] for i in range(1, nbins+1): vals = ps.p[1:][binindexes == i] binvals.append(np.mean(vals)) binstds.append(np.std(vals)/np.sqrt(vals.size)) bincenters = np.array(bincenters) binvals = np.array(binvals) binstds = np.array(binstds) plt.loglog(bincenters, binvals, marker=".", linestyle="-", color=ps.plotcolour, label = "%s" % (ps)) else: plt.loglog(ps.f, ps.p, marker=".", linestyle="None", color=ps.plotcolour, label = "%s" % (ps)) if ps.slope != None: plt.loglog(ps.slope["f"], ps.slope["p"], marker="None", color=ps.plotcolour, label = "Slope %s = %.3f" % (ps, ps.slope["slope"])) plt.axvline(ps.slope["fmin"], color = ps.plotcolour, dashes = (5,5)) plt.axvline(ps.slope["fmax"], color = ps.plotcolour, dashes = (5,5)) plt.xlabel("Frequency [1/days]") plt.ylabel("Power") if showlegend: plt.legend() #plt.text(np.min(10**fitx), np.max(10**pfit), "Log slope : %.2f" % (popt[0]), color="red") if filename: plt.save(filename) else: plt.show()
def test_airy_2d(display=False): """ Test 2D airy function vs 1D function; both should yield the exact same results for a 1D cut across the 2d function. And we've already tested the 1D above... """ fn2d = airy_2d(diameter=1.0, wavelength=1e-6, shape=(511, 511), pixelscale=0.010) r, fn1d = airy_1d(diameter=1.0, wavelength=1e-6, length=256, pixelscale=0.010) cut = fn2d[255, 255:].flatten() print(cut.shape) if display: plt.subplot(211) plt.semilogy(r, fn1d, label='1D') plt.semilogy(r, cut, label='2D', color='black', ls='--') plt.legend(loc='upper right') plt.axvline(0.251643, color='red', ls='--') plt.ylabel('Intensity relative to peak') plt.xlabel('Separation in $\lambda/D$') ax=plt.subplot(212) plt.plot(r, cut-fn1d) ax.set_ylim(-1e-8, 1e-8) plt.ylabel('Difference') plt.xlabel('Separation in $\lambda/D$') #print fn1d[0], cut[0] #print np.abs(fn1d-cut) #< 1e-9 assert np.all( np.abs(fn1d-cut) < 1e-9)
def TCP_plot(no_ind_plots, label): #no_ind_plots = 50 ## individual plots cannot be more than total patients if(no_ind_plots>n): no_ind_plots=n ## want to select the individual plots randomly from those calcualted... ind_plots = np.random.choice(len(TCPs),no_ind_plots, replace=False) ## individuals (specified number of plots chosen) for i in ind_plots: plt.plot(nom_doses,TCPs[i], color = 'grey', alpha = 0.5) ## population plt.plot(nom_doses,TCP_pop, color='black', linewidth='2', alpha=0.5) plt.plot(nom_doses,TCP_pop, marker = 'o', ls='none', label=label) ## plot formatting plt.xlim(0,max(nom_doses)) plt.ylim(0,1.0) plt.xlabel('Dose (Gy)') plt.ylabel('TCP') plt.title('TCPs') #plt.legend(loc = 'best', fontsize = 'medium', framealpha = 1) plt.axvline(d_interest, color = 'black', ls='--',) plt.axhline(TCP_pop[frac_interest-1], color='black', ls='--') ## add labels with TCP at dose of interest text_string = ('Pop. TCP = ' + str(round(TCP_cure_at_d_interest,2)) + ' % at ' + str(d_interest) + 'Gy') plt.text(5,0.4,text_string, backgroundcolor='white') plt.legend(loc = 'lower left',numpoints=1) plt.show()
def find_k(self, rank=None, max_clusters=1, vertline=None): if rank != None: svd = TruncatedSVD(rank) self.X = svd.fit_transform(self.X) self.X = Normalizer(copy=False).fit_transform(self.X) k_range = range(1, max_clusters) clusters = [KMeans(n_clusters=k).fit(self.X) for k in k_range] centroids = [cluster.cluster_centers_ for cluster in clusters] k_cosine = [cdist(self.X, cent, metric='cosine') for cent in centroids] dist = [np.min(k_cos, axis=1) for k_cos in k_cosine] wcss = [sum(d[np.isnan(d) == False]**2) for d in dist] # Within cluster sum of squares tss = sum(pdist(self.X)**2)/self.X.shape[0] # Total sum of squares bss = tss - wcss # Explained variance fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(10, 3) plt.tight_layout() ax1.set_title('BSS') ax1.plot(np.arange(1, len(bss)+1), bss) ax1.scatter(np.arange(1, len(bss)+1), bss) ax2.set_title('WCSS') ax2.plot(np.arange(1, len(wcss)+1), wcss) ax2.scatter(np.arange(1, len(wcss)+1), wcss) plt.axvline(vertline, c='red', alpha=0.75) if vertline != None else None plt.show()
def get_distribution(): all_data, all_labels = extract_csv('../data/comments.csv', LABELS, CATEGORY) all_data = np.asarray([[x] for x in all_data], dtype="S1000") all_labels = np.asarray(all_labels) num_trials = TRIALS accuracies = [] for trial in range(num_trials): if (trial % 10) == 0: print("Trial:", trial) randomization_scheme = np.arange(len(all_data)) np.random.shuffle(randomization_scheme) randomized_data = all_data[randomization_scheme] randomized_labels = all_labels[randomization_scheme] train_messages = randomized_data[len(all_data) // VAL_SPLIT:] train_labels = randomized_labels[len(all_data) // VAL_SPLIT:] val_messages = randomized_data[:len(all_data) // VAL_SPLIT] val_labels = randomized_labels[:len(all_data) // VAL_SPLIT] dictionary = create_dictionary(train_messages) train_matrix = transform_text(train_messages, dictionary) val_matrix = transform_text(val_messages, dictionary) if MODEL_CHOICE is "LOGREG": logreg = LogisticRegression() logreg.fit(train_matrix, train_labels) logistic_regresion_predictions = logreg.predict(val_matrix) logistic_regresion_accuracy = np.mean( logistic_regresion_predictions == val_labels) accuracies.append(logistic_regresion_accuracy) elif MODEL_CHOICE is "NAIVE": naive_bayes_model = fit_naive_bayes_model(train_matrix, train_labels, LABELS) naive_bayes_predictions = predict_from_naive_bayes_model( naive_bayes_model, val_matrix) naive_bayes_accuracy = np.mean( naive_bayes_predictions == val_labels) accuracies.append(naive_bayes_accuracy) elif MODEL_CHOICE is "OFFSHELF": sid = SentimentIntensityAnalyzer() converted = [x[0].decode('utf-8') for x in val_messages] sid_predictions = predict_from_off_shelf_model(sid, converted) sid_accuracy = np.mean(sid_predictions == val_labels) accuracies.append(sid_accuracy) plt.figure() plt.hist(accuracies, bins=BINS, label='data', weights=np.ones(num_trials) / num_trials) plt.gca().yaxis.set_major_formatter(PercentFormatter(1)) plt.title("Accuracy Distribution for " + MODEL_CHOICE[0] + MODEL_CHOICE[1:].lower() + " Model") plt.xlabel("Accuracy") plt.ylabel("Percentage") accuracies = np.asarray(accuracies) plt.axvline(x=np.mean(accuracies), color='red', linestyle='--', label='mean') plt.savefig(MODEL_CHOICE.lower() + "_acc_dist.png")
def get_crypto_arb(bitstamp, luno, ice3x, AMT=30000, plt_results=False, plt_rev_arb=False): CT = datetime.datetime.now() zarusd = get_zar_usd() usdzar = 1 / zarusd luno_tickers = np.array(['BTC/ZAR']) bitstamp_tickers = np.array(['BTC/USD', 'LTC/USD']) ice3x_tickers = np.array(['BTC/ZAR', 'LTC/ZAR']) #luno = ccxt.luno() #bitstamp = ccxt.bitstamp() #ice3x = ccxt.ice3x() luno_rates = get_ticker_rates(luno, luno_tickers) bitstamp_rates = get_ticker_rates(bitstamp, bitstamp_tickers) ice3x_rates = get_ticker_rates(ice3x, ice3x_tickers) luno_btc_arb = calc_arb(AMT, luno_rates[0]['bid'], bitstamp_rates[0]['ask'], usdzar) ice3x_btc_arb = calc_arb(AMT, ice3x_rates[0]['last'], bitstamp_rates[0]['ask'], usdzar) ice3x_ltc_arb = calc_arb(AMT, ice3x_rates[1]['last'], bitstamp_rates[1]['ask'], usdzar) luno_btc_revarb = calc_rev_arb(ASK=luno_rates[0]['ask'], BID=bitstamp_rates[0]['bid'], BASE=zarusd) ice3x_btc_revarb = calc_rev_arb(ASK=ice3x_rates[0]['last'], BID=bitstamp_rates[0]['bid'], BASE=zarusd) ice3x_ltc_revarb = calc_rev_arb(ASK=ice3x_rates[1]['last'], BID=bitstamp_rates[1]['bid'], BASE=zarusd) # print('The ARB ratio is: {}'.format(luno_btc_arb)) # print luno_btc_arb, ice3x_btc_arb, ice3x_ltc_arb, luno_btc_revarb, ice3x_btc_revarb, ice3x_ltc_revarb if plt_results: AMT_VEC = range(0, 35000, 100) LUNO_BTC_VEC = [ calc_arb(float(x), luno_rates[0]['bid'], bitstamp_rates[0]['ask'], usdzar) for x in AMT_VEC ] ICE3X_BTC_VEC = [ calc_arb(float(x), ice3x_rates[0]['last'], bitstamp_rates[0]['ask'], usdzar) for x in AMT_VEC ] ICE3X_LTC_VEC = [ calc_arb(float(x), ice3x_rates[1]['last'], bitstamp_rates[1]['ask'], usdzar) for x in AMT_VEC ] ymax = np.max([luno_btc_arb, ice3x_btc_arb, ice3x_ltc_arb]) ymin = np.min([luno_btc_arb, ice3x_btc_arb, ice3x_ltc_arb]) plt.clf() plt.plot(AMT_VEC, LUNO_BTC_VEC) plt.plot(AMT_VEC, ICE3X_BTC_VEC) plt.plot(AMT_VEC, ICE3X_LTC_VEC) plt.axhline(y=0, color='dimgrey') plt.axvline(x=0, color='dimgrey') plt.legend([ 'LUNO_BTC: ' + str(np.round(luno_btc_arb * 100, 4)) + '%', 'ICE3X_BTC: ' + str(np.round(ice3x_btc_arb * 100, 4)) + '%', 'ICE3X_LTC: ' + str(np.round(ice3x_ltc_arb * 100, 4)) + '%' ], loc='lower right') plt.ylim((ymin - 0.03, ymax + 0.02)) plt.text(35000 / 2, ymax + 0.015, str(CT.strftime('%Y-%m-%d %H:%M')), fontsize=12, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center', verticalalignment='center') plt.title('ARB Comparison') plt.xlabel('ZAR') plt.ylabel('Arb %') plt.grid(which='both') # plt.show() plt.savefig('images/ARB.png') plt.close() if plt_rev_arb: AMT_VEC = range(0, 35000, 100) LUNO_BTC_REV_VEC = [ calc_rev_arb(ASK=luno_rates[0]['ask'], BID=bitstamp_rates[0]['bid'], AMT=x, BASE=zarusd) for x in AMT_VEC ] ICE3X_BTC_REV_VEC = [ calc_rev_arb(ASK=ice3x_rates[0]['last'], BID=bitstamp_rates[0]['bid'], AMT=x, BASE=zarusd) for x in AMT_VEC ] ICE3X_LTC_REV_VEC = [ calc_rev_arb(ASK=ice3x_rates[1]['last'], BID=bitstamp_rates[1]['bid'], AMT=x, BASE=zarusd) for x in AMT_VEC ] ymax = np.max([luno_btc_revarb, ice3x_btc_revarb, ice3x_ltc_revarb]) ymin = np.min([luno_btc_revarb, ice3x_btc_revarb, ice3x_ltc_revarb]) plt.clf() plt.plot(AMT_VEC, LUNO_BTC_REV_VEC) plt.plot(AMT_VEC, ICE3X_BTC_REV_VEC) plt.plot(AMT_VEC, ICE3X_LTC_REV_VEC) plt.axhline(y=0, color='dimgray') plt.axvline(x=0, color='dimgray') plt.legend([ 'LUNO_BTC: ' + str(np.round(luno_btc_revarb * 100, 4)) + '%', 'ICE3X_BTC: ' + str(np.round(ice3x_btc_revarb * 100, 4)) + '%', 'ICE3X_LTC: ' + str(np.round(ice3x_ltc_revarb * 100, 4)) + '%' ], loc='lower right') plt.title('Reverse ARB Comparison') plt.text(35000 / 2, ymax + 0.015, str(CT.strftime('%Y-%m-%d %H:%M')), fontsize=12, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center', verticalalignment='center') plt.xlabel('ZAR') plt.ylabel('Arb %') plt.ylim((ymin - 0.03, ymax + 0.02)) plt.grid(which='both') # plt.show() plt.savefig('images/REV_ARB.png') plt.close() return luno_btc_arb, ice3x_btc_arb, ice3x_ltc_arb, luno_btc_revarb, ice3x_btc_revarb, ice3x_ltc_revarb, zarusd
def Candle_plot(Stock_data, stadate, enddate, trade_dates, trade_prices, buy_dates, buy_prices, sell_dates, sell_prices): # 输入Stock_data,并处理索引、日期格式、按索引日期排序 Stock_data = Stock_data.set_index('date') Stock_data.index = pd.to_datetime(Stock_data.index) Stock_data = Stock_data.sort_index(axis=0, ascending=True) # 风险决策指标 Risk_Ratio = Risk_Ratio_Functions.Risk_Ratio(Stock_data) Risk_Ratio = Risk_Ratio[['Decision','MAJCQX']] # 运筹帷幄指标,指标构建 DA = 6 ZFXF_YCWW = pd.DataFrame() ZFXF_YCWW['LLV_min'] = Stock_data['close'].rolling(window=DA).min() ZFXF_YCWW['HHV_max'] = Stock_data['close'].rolling(window=DA).max() ZFXF_YCWW['ABS'] = np.abs(ZFXF_YCWW['HHV_max']/ZFXF_YCWW['LLV_min']-1) ZFXF_YCWW['MAX'] = ZFXF_YCWW['ABS'].rolling(window=DA).max() ZFXF_YCWW['DA'] = DA ZFXF_YCWW.loc[ZFXF_YCWW['MAX']>0.1,'DA'] = 3 ZFXF_YCWW['Tomorrow'] = Stock_data['close'].rolling(window=DA).mean() ZFXF_YCWW.loc[ZFXF_YCWW['DA'] == 3,'Tomorrow'] = Stock_data['close'].rolling(window=3).mean() ZFXF_YCWW['YCWW'] = ZFXF_YCWW['Tomorrow'].shift(1) # 运筹帷幄指标,买卖信号点构建 ZFXF_YCWW['Buy_point'] = 0 ZFXF_YCWW.loc[Stock_data['close'] > ZFXF_YCWW['YCWW'],'Buy_point'] = 1 ZFXF_YCWW['Sell_point'] = 0 ZFXF_YCWW.loc[Stock_data['close'] < ZFXF_YCWW['YCWW'],'Sell_point'] = -1 ZFXF_YCWW['Points'] = ZFXF_YCWW['Buy_point']+ZFXF_YCWW['Sell_point'] # 运筹帷幄指标,形成【成住坏空(2,1,-2,-1)】信号点 g = len(ZFXF_YCWW) ZFXF_YCWW['order'] = np.arange(0,g,1) ZFXF_YCWW.loc[g-1:g,'Points'] = -1 ZFXF_YCWW['BS_point'] = ZFXF_YCWW['Points'].shift(1) ZFXF_YCWW['BS_point'] = ZFXF_YCWW['BS_point'].fillna(-1) ZFXF_YCWW['BS_point'] = (ZFXF_YCWW['Points'] - ZFXF_YCWW['BS_point'])/2+ZFXF_YCWW['Points'] # 合并两大指标 Stock_Risk_Ratio_ZFXF_YCWW = pd.concat([Stock_data,Risk_Ratio,ZFXF_YCWW[['YCWW','Tomorrow','BS_point']]], axis=1) # 设置目标时间区间 Stock_Risk_Ratio_ZFXF_YCWW = Stock_Risk_Ratio_ZFXF_YCWW[Stock_Risk_Ratio_ZFXF_YCWW.index >= pd.to_datetime(stadate)] Stock_Risk_Ratio_ZFXF_YCWW = Stock_Risk_Ratio_ZFXF_YCWW[Stock_Risk_Ratio_ZFXF_YCWW.index <= pd.to_datetime(enddate)] k = len(Stock_Risk_Ratio_ZFXF_YCWW) # 设置id,作为坐标系的x轴数据,最后用date来标注 Stock_Risk_Ratio_ZFXF_YCWW['date'] = Stock_Risk_Ratio_ZFXF_YCWW.index Stock_Risk_Ratio_ZFXF_YCWW = Stock_Risk_Ratio_ZFXF_YCWW.reset_index(drop=True) Stock_Risk_Ratio_ZFXF_YCWW['id'] = Stock_Risk_Ratio_ZFXF_YCWW.index+1 # ochl数据设置,date数据设置 Stock_Risk_Ratio_ZFXF_YCWW_ochl = Stock_Risk_Ratio_ZFXF_YCWW[['id','open','close','high','low']] Stock_Risk_Ratio_ZFXF_YCWW_date = Stock_Risk_Ratio_ZFXF_YCWW.set_index('date') # 设置交易点的时间和价格 Trades = pd.DataFrame() Trades['price'] = trade_prices Trades['date'] = 0 Trades['id'] = 0 for t in range(0,len(trade_dates)): T_date = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(trade_dates[t]),'date'].values T_id = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(trade_dates[t]),'id'].values Trades.loc[t:t+1,'date'] = T_date Trades.loc[t:t+1,'id'] = T_id Trades = Trades.set_index('id') tp = trade_prices idt = Trades.index.values # 设置买卖连线的时间和价格 Buys = pd.DataFrame() Buys['price'] = buy_prices Buys['date'] = 0 Buys['id'] = 0 Sells = pd.DataFrame() Sells['price'] = sell_prices Sells['date'] = 0 Sells['id'] = 0 for p in range(0,len(buy_dates)): Buys_date = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(buy_dates[p]),'date'].values Buys_id = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(buy_dates[p]),'id'].values Buys.loc[p:p+1,'date'] = Buys_date Buys.loc[p:p+1,'id'] = Buys_id Buys = Buys.set_index('id') by = buy_prices idby = Buys.index.values for q in range(0,len(sell_dates)): Sells_date = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(sell_dates[q]),'date'].values Sells_id = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['date'] == pd.to_datetime(sell_dates[q]),'id'].values Sells.loc[q:q+1,'date'] = Sells_date Sells.loc[q:q+1,'id'] = Sells_id Sells = Sells.set_index('id') sl = sell_prices idsl = Sells.index.values # 交易收益率 prf = pd.DataFrame() prf['sl'] = sell_prices prf['by'] = buy_prices # 每次交易收益率 prf['prf'] = (prf['sl']-prf['by'])/prf['by'] # 格式化卖出日期为可计算的date2num值 prf['sd'] = sell_dates prf['sd'] = pd.to_datetime(prf['sd']).astype(np.object) prf['sd'] = mdates.date2num(prf['sd']) # 格式化买入日期为可计算的date2num值 prf['bd'] = buy_dates prf['bd'] = pd.to_datetime(prf['bd']).astype(np.object) prf['bd'] = mdates.date2num(prf['bd']) # 持仓时间 prf['ri'] = prf['sd'] - prf['bd'] # 每次年化收益 prf['Yprf'] = prf['prf']/prf['ri']*365 # 格式化百分比 prf['prf'] = prf['prf'].apply(lambda x: format(x, '.2%')) prf['Yprf'] = prf['Yprf'].apply(lambda x: format(x, '.2%')) prfs = prf['prf'].values Yprfs = prf['Yprf'].values # 设置画出K线图的数据datarray # 画出风险决策曲线的数据(Id,De),(ID,JCQX) # 设置替代x轴的日期数据(idx,date) datarray = Stock_Risk_Ratio_ZFXF_YCWW_ochl.values De = Stock_Risk_Ratio_ZFXF_YCWW['Decision'].values JCQX = Stock_Risk_Ratio_ZFXF_YCWW['MAJCQX'].values Id = Stock_Risk_Ratio_ZFXF_YCWW['id'].values idx = np.arange(0, k, 2) date = Stock_Risk_Ratio_ZFXF_YCWW_date.index[idx].date # 画出运筹帷幄买卖信号点的数据(idb,buy),(ids,sell) buy_point = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['BS_point']==2.0,['YCWW','BS_point']] sell_point = Stock_Risk_Ratio_ZFXF_YCWW.loc[Stock_Risk_Ratio_ZFXF_YCWW['BS_point']==-2.0,['YCWW','BS_point']] buy = buy_point['YCWW'].values.round(3) idb = buy_point.index+1 sell = sell_point['YCWW'].values.round(3) ids = sell_point.index+1 # 判断起始画线点应当为买点,如果为卖点,则删除sell_point的第一个点,使得buy_point的第一个点为画线点 if buy_point.index[0] > sell_point.index[0]: sell_point = sell_point.iloc[1:,:] sell2 = sell_point['YCWW'].values.round(3) ids2 = sell_point.index+1 # 运筹帷幄指标预测值 idp = Stock_Risk_Ratio_ZFXF_YCWW.index[-1] # Today's YCWW ptd = Stock_Risk_Ratio_ZFXF_YCWW['YCWW'].tail(1).values.round(4) ptd = float(ptd[0]) # Tomorrow's YCWW ptmr = Stock_Risk_Ratio_ZFXF_YCWW['Tomorrow'].tail(1).values.round(4) ptmr = float(ptmr[0]) # 画图 plt.figure(2,figsize=(14,6), dpi=300) ax1 = plt.subplot(111) # 先画风险决策曲线,在最底层;并画网格 plt.plot(Id, De, color ='Orange', linewidth=2.0, alpha=0.8) plt.plot(Id, JCQX, color ='Brown', linewidth=2.0, alpha=0.8) plt.axhline(y=20, color='cyan', linewidth=1.0, alpha=1.0) plt.axhline(y=60, color='yellow', linewidth=1.0, alpha=1.0) plt.axhline(y=70, color='gold', linewidth=1.0, alpha=1.0) plt.axvline(x=Id[0], color='LightPink', linestyle='--', linewidth=1.0, alpha=1.0) plt.axvline(x=Id[-1], color='MediumOrchid', linestyle='--', linewidth=1.0, alpha=1.0) plt.axvline(x=Id[-1]+1, color='RosyBrown', linestyle='--', linewidth=1.0, alpha=1.0) plt.grid(linestyle=':', alpha=0.5) # 设置x轴,风险决策曲线的y轴 plt.xticks(idx, date) plt.xticks(rotation=45) plt.ylim(10,80,10) # 设置candlestick_ochl ax2 = ax1.twinx() mplf.candlestick_ochl(ax2, datarray, width=0.8, colorup='red', colordown='green', alpha=0.6) # 设置买卖信号点 plt.scatter(idb, buy, color ='blue', s=50, alpha=1.0, zorder=2) plt.scatter(ids, sell, color ='brown', s=50, alpha=1.0, zorder=3) # 设置交易价格点 plt.scatter(idt, tp, color ='black', s=80, alpha=1.0, zorder=4) plt.scatter(idt, tp, color ='yellow', s=30, alpha=1.0, zorder=5) # 设置预测点 plt.scatter(idp+1, ptd, color ='black', s=80, alpha=1.0, zorder=6) plt.scatter(idp+1, ptd, color ='cyan', s=30, alpha=1.0, zorder=7) plt.scatter(idp+2, ptmr, color ='black', s=80, alpha=1.0, zorder=6) plt.scatter(idp+2, ptmr, color ='Lime', s=30, alpha=1.0, zorder=7) # 标注卖点的价格和标签样式 for a,b in zip(ids,sell): ax2.text(a, b*1.01, b, ha='center', va= 'center', bbox = dict(facecolor = "blue", alpha = 0.2)) # 标注买点的价格和标签样式 for c,d in zip(idb,buy): ax2.text(c, d*0.995, d, ha='center', va= 'center', bbox = dict(facecolor = "magenta", alpha = 0.2)) # 标注交易点的价格和标签样式 for e,f in zip(idt,tp): ax2.text(e, f*1.01, f, ha='center', va= 'center', bbox = dict(facecolor = "yellow", alpha = 0.5)) # 标注预测值的价格和标签样式 ax2.text(idp+1, ptd*1.004, ptd, ha='center', va= 'center', bbox = dict(facecolor = "cyan", alpha = 0.2)) ax2.text(idp+2, ptmr*1.004, ptmr, ha='center', va= 'center', bbox = dict(facecolor = "Lime", alpha = 0.2)) # 预测买卖点的连线 for m,n,p,q in zip(idb,ids2,buy,sell2): ax2.add_line(Line2D((m,n), (p,q), linewidth=3, color='magenta', zorder=1)) # 成交买卖点的连线 for u,v,h,g in zip(idby,idsl,by,sl): ax2.add_line(Line2D((u,v), (h,g), linewidth=3, color='blue', zorder=1)) # 交易收益率,年化收益率 for r,s,t in zip(idsl,sl,prfs): ax2.text(r, s*1.024, t, ha='center', va= 'center', bbox = dict(facecolor = "magenta", alpha = 0.5)) for x,y,z in zip(idsl,sl,Yprfs): ax2.text(x, y*1.037, z, ha='center', va= 'center', bbox = dict(facecolor = "cyan", alpha = 0.5)) return plt.show()
plt.xlabel("Date", fontsize=12) plt.ylabel("Price", fontsize=12) plt.show() df['Log_Returns'].plot(figsize=(12, 6)) plt.title("Log Returns", fontsize=18, fontweight='bold') plt.xlabel("Date", fontsize=12) plt.ylabel("Price", fontsize=12) plt.show() plt.figure(figsize=(16, 10)) plt.hist(df['Daily_Returns'].dropna(), bins=100, label='Daily Returns data') # Drop NaN plt.title("Histogram of Daily Returns", fontsize=18, fontweight='bold') plt.axvline(df['Daily_Returns'].mean(), color='r', linestyle='dashed', linewidth=2) # Shows the average line plt.xlabel("Probability", fontsize=12) plt.ylabel("Daily Returns", fontsize=12) plt.show() plt.figure(figsize=(16, 10)) Daily_Drawdown.plot() Negative_Drawdown.plot(color='r', grid=True) plt.title("Maximum Drawdown", fontsize=18, fontweight='bold') plt.xlabel("Date", fontsize=12) plt.ylabel("Price", fontsize=12) plt.show() DIV = pdr.get_data_yahoo(stock, start, end, actions='only') print(DIV)
# Display observed difference in converted rate obs_diff # j. What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**? # In[354]: # create distribution under the null hypothesis null_vals = np.random.normal(0, p_diffs.std(), p_diffs.size) # In[355]: #Plot Null distribution plt.hist(null_vals) #Plot vertical line for observed statistic plt.axvline(x=obs_diff, color='red') # In[375]: #Compute proportion of the p_diffs are greater than the actual difference observed in ab_data.csv (null_vals > obs_diff).mean() # k. In words, explain what you just computed in part **j.**. What is this value called in scientific studies? What does this value mean in terms of whether or not there is a difference between the new and old pages? # Type I error rate of 5%, and Pold > Alpha, we fail to reject the null. # Therefore, the data show, with a type I error rate of 0.05, that the old page has higher probablity of convert rate than new page. # P-Value: The probability of observing our statistic or a more extreme statistic from the null hypothesis. # l. We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. Fill in the below to calculate the number of conversions for each page, as well as the number of individuals who received each page. Let `n_old` and `n_new` refer the the number of rows associated with the old page and new pages, respectively.
locale.setlocale(locale.LC_ALL, "") x = [1, 2, 3, 4, 5, 6, 7] x_labels = [1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2] y = [1.875, 2.5, 3.625, 3.875, 4.875,5.0,6.25] n = [1.875, 2.5, 3.625, 3.875, 4.875,5.0,6.25] y_labels = [locale.format("%.3f", 1.875), locale.format("%.1f", 2.5) , locale.format("%.3f",3.625), locale.format("%.3f",3.875) , locale.format("%.3f",4.875),locale.format("%.1f",5.0),locale.format("%.2f",6.25)] labels = ['MK-100', 'MK-10', 'MK-4', 'NaiveBayes', 'OFS-10', 'OFS-100', 'OFS-4'] # You can specify a rotation for the tick labels in degrees or with keywords. plt.xticks(x, labels, rotation=30) plt.axvline(x=1, linewidth='2', color='r', ymin=0.0, ymax=0.45) plt.axvline(x=2, linewidth='2', color='r', ymin=0.0, ymax=0.51) plt.axvline(x=3, linewidth='2', color='r', ymin=0.09, ymax=0.62) plt.axvline(x=4, linewidth='2', color='r', ymin=0.12, ymax=0.65) plt.axvline(x=5, linewidth='2', color='r', ymin=0.22, ymax=0.75) plt.axvline(x=6, linewidth='2', color='r', ymin=0.23, ymax=0.76) plt.axvline(x=7, linewidth='2', color='r', ymin=0.36, ymax=0.88) plt.plot(x, y, 'ko') legend_elements = [Line2D([0], [0], marker='o', color='k', label='Ranking Médio', markersize=8, linestyle=''), Line2D([0], [0], color='r', linewidth='3', label='Diferença Crítica' ) ]
eps_1[i, :], sigma_N[i, :], sigma_T[i, :]) sigma_kk[i + 1] = trace(sigma[i, :]) #====================== # plotting #====================== # stress -strain plt.subplot(221) plt.plot(eps_1[1:, 0, 0], sigma[1:, 0, 0], color='k', linewidth=1, label='sigma_11_(monotonic)') plt.title('$\sigma - \epsilon$') plt.xlabel('Strain') plt.ylabel('Stress(MPa)') plt.axhline(y=0, color='k', linewidth=1, alpha=0.5) plt.axvline(x=0, color='k', linewidth=1, alpha=0.5) plt.legend() plt.subplot(222) plt.plot(eps_1[:, 0, 0], EN[1:, 0], color='k', linewidth=1, label='sigma_11_(monotonic)') plt.title('$\sigma - \epsilon$') plt.xlabel('Strain') plt.ylabel('Stress(MPa)') plt.axhline(y=0, color='k', linewidth=1, alpha=0.5) plt.axvline(x=0, color='k', linewidth=1, alpha=0.5) plt.legend() plt.show()
ax = fig.add_subplot(422) plt.plot(trange, p_forget, lw=2) ax.set_title('e) forget-node for evidence networks') ax.set_ylim([-0.1, 1.1]) plt.setp(ax.get_xticklabels(), visible=False) ax = fig.add_subplot(423) plt.plot(trange, p_diff, lw=2) plt.legend(labels=diff_labels) ax.set_title('b) pairwise difference of x-positions') plt.setp(ax.get_xticklabels(), visible=False) ax = fig.add_subplot(424) plt.plot(trange, p_evidence, lw=2) plt.legend(labels=labels, loc=2) plt.axvline(time[middle], color="k") ax.set_title('f) evidence for target object') plt.setp(ax.get_xticklabels(), visible=False) ax = fig.add_subplot(425) plt.plot(trange, p_neg_min, lw=2) ax.set_title('c) negative minimum ensemble') ax.set_ylim([-0.1, 1.1]) plt.setp(ax.get_xticklabels(), visible=False) ax = fig.add_subplot(426) plt.plot(trange, p_evidence_left, lw=2) plt.legend(labels=labels, loc=2) plt.axvline(time[middle], color="k") ax.set_title('g) evidence for left neighbour object') plt.setp(ax.get_xticklabels(), visible=False)
absolute_sigma=True, p0=[150, 28, -30, 20]) err_bragg = np.sqrt(np.diag(pcovBragg)) plt.plot(theta_bragg, N_bragg, 'kx', label='Messwerte') x_plot = np.linspace(26, 30, 1000) plt.plot(x_plot, Gauß(x_plot, *popBragg), linestyle='-', label='Ausgleichskurve') plt.xlabel(r'Winkel $\theta\;[\degree]$') plt.ylabel(r'Impuls [s$^{-1}$]') ymax_bragg = np.max(N_bragg) xmax_bragg = theta_bragg[22] plt.axvline(x=xmax_bragg, color='r', linestyle='--', label='Maximum') plt.legend(loc="best") plt.grid() plt.tight_layout plt.savefig('build/plotBragg.pdf') plt.close() a = ufloat(popBragg[0], err_bragg[0]) b = ufloat(popBragg[1], err_bragg[1]) c = ufloat(popBragg[2], err_bragg[2]) d = ufloat(popBragg[3], err_bragg[3]) theta_bragg_err = abs(xmax_bragg - theta_bragg_lit) / theta_bragg_lit # tex file for theta_bragg_err
diff=(maxv-minv) if form==1: minv=minv-diff elif form==2: minv=minv-diff/4 #LINES if args.l: wavelength_range = (wavlim[0] * u.angstrom, wavlim[1] * u.angstrom) elelist=args.e i=0 for ele in elelist: try: linelist=AtomicLineList.query_object(wavelength_range, wavelength_type='air', wavelength_accuracy=20, element_spectrum=ele) for l in linelist['LAMBDA AIR ANG']: try: plt.axvline(np.float(l)*(1.0-rv/c),color="C"+str(i),ls="dotted") if form==1: ax.text(l*(1.0-rv/c),diff*0.0+minv," "+ele+"\n "+str(l)+"$\\AA$",color="C"+str(i),rotation="vertical",verticalalignment="bottom") elif form==2: ax.text(l*(1.0-rv/c),diff*0.0+minv," "+ele,color="C"+str(i),rotation="vertical",verticalalignment="bottom") except: print("Ignore ",ele,l) except: print("Failed for the element ",ele) i=i+1 ########################### plt.ylim(np.max([minv,0.0]),maxv) plt.savefig(object_name+"_spec.pdf",bbox_inches="tight", pad_inches=0.0) plt.savefig(object_name+"_spec.png",bbox_inches="tight", pad_inches=0.0) plt.show()
def __init__(self, G, n = 2000, portion = 0.02, vectors = [13], spectrum_disp = False, cut_disp = False, vectors_disp = False, k_means_disp = False): iters = [] spectrum = [] accs = [] number_of_edges = [] n = G.number_of_nodes() # laplacian_matrix = nx.normalized_laplacian_matrix(G) laplacian_matrix = nx.adjacency_matrix(G) vals, vecs = sparse.linalg.eigs(laplacian_matrix.asfptype() , k=int(portion * (G.n_1 + G.n_2)), which = 'LM') ground_labels = nx.get_node_attributes(G, 'ground_label') optimal_val = n * (G.real_a - G.real_b) step = n/400 if len(vectors) <= 0: vec_idxs = [i for i in range(int(n * portion)) if vals[i] > optimal_val - 3 * step and vals[i] < optimal_val + 3 * step] else: vec_idxs = vectors exact_spectrum = [] for k in range(n): j = 1 temp_sum = 0 while j < G.real_b * n: temp_sum += np.cos(2 * np.pi * k * j / n) j+= 1 j = int((G.real_b * n + 1)/ 2) while j < G.real_a * n / 2: temp_sum += np.cos(4 * np.pi * k * j / n) j+= 1 exact_spectrum += [2* temp_sum] if spectrum_disp: sns.set() plt.rcParams['figure.figsize'] = [14, 7] asymp_spectrum = [(np.sin(2 * np.pi * k * G.real_a) + np.sin(2 * np.pi * k * G.real_b)) * n / (2 * np.pi * k) for k in range(int(n))] plt.scatter(vals, [1 for i in range(len(vals))], marker='o', facecolors='none', edgecolors='b') plt.scatter(asymp_spectrum, [1 for i in range(int(n))], marker='o', facecolors='none', edgecolors='r') plt.scatter(exact_spectrum, [1 for i in range(int(n))], marker='o', facecolors='none', edgecolors='g') plt.axvline(x = optimal_val, linewidth = 2, color='black') plt.xlabel(r"spectrum") plt.ylabel(r"iterations") plt.show() if cut_disp: for i in vec_idxs: vector = vecs[:,i] vector = vector.astype('float64') labels_pred_spectral = checkSign(vector) accuracy = max(accuracy_score(labels_pred_spectral, G.ground_labels), 1 - accuracy_score(labels_pred_spectral, G.ground_labels)) accs += [accuracy] labels_dict = dict(zip(list(G.nodes), labels_pred_spectral)) nx.set_node_attributes(G, labels_dict, "label") sns.distplot(vector, kde = False, bins = 50) plt.show() sns.distplot([G.nodes[node]["coordinate"] for node in G.nodes if G.nodes[node]['label'] == 0], label = "Cluster 0", kde = False, bins = 50) sns.distplot([G.nodes[node]["coordinate"] for node in G.nodes if G.nodes[node]['label'] == 1], label = "Cluster 1", kde = False, bins = 50) plt.title("i = " + str(i) + ", eigenvalue = " + str(vals[i]) + ", accuracy = " + str(accuracy)) plt.show() coordinates0 = [G.nodes[node]["coordinate"] for node in G if G.nodes[node]['ground_label'] == 0] coordinates1 = [G.nodes[node]["coordinate"] for node in G if G.nodes[node]['ground_label'] == 1] plt.scatter(coordinates0, vector[:int(n/2)]) plt.scatter(coordinates1, vector[int(n/2):]) plt.title("i = " + str(i) + ", eigenvalue = " + str(vals[i]) + ", accuracy = " + str(accuracy)) plt.show() dist_dict = nx.shortest_path_length(G, source=0) dist = [dist_dict[x] for x in sorted(dist_dict)] plt.scatter(dist[1:int(n/2)], vector[1:int(n/2)]) plt.scatter(dist[int(n/2):], vector[int(n/2):]) plt.title("i = " + str(i) + ", eigenvalue = " + str(vals[i]) + ", accuracy = " + str(accuracy)) plt.show() if vectors_disp: accs = [] c_norms = [] spectra = [] for i in vec_idxs: vector = vecs[:,i] vector = vector.astype('float64') km = k_means([vector], n) labels_pred = dict(zip(list(G.nodes), km['labels'])) accuracy = G.GetAccuracy(labels_pred) accs += [accuracy] spectra += [vals[i]] c_norms += [np.linalg.norm(sum(km['centers']))] sns.set() plt.plot(vec_idxs, accs, marker='o') plt.xlabel("Order of eigenvector") plt.ylabel("Accuracy") plt.show() # plt.plot(vec_idxs, c_norms, marker='o', label = 'Iteration ' + str(i)) # plt.show() plt.plot(vec_idxs, spectra, marker='o') plt.axhline(y = optimal_val, linewidth = 2, color='black') plt.show() if k_means_disp: k = len(vec_idxs) accs = [] c_norms = [] inerts = [] min_dists = [] sum_dists = [] balances = [] for j in range(1,k+1): # print([vec_idxs[i] for i in range(j)]) # km_vectors = [vecs[:,vectors[1]], vecs[:,vectors[j]]] km_vectors = [vecs[:,vec_idxs[i]] for i in range(j)] km = k_means(km_vectors, n) accuracy = max(accuracy_score(km['labels'], G.ground_labels), 1 - accuracy_score(km['labels'], G.ground_labels)) accs += [accuracy] c_norms += [np.linalg.norm(sum(km['centers']), ord = 2)] inerts += [km['inertia']/j] min_dists += [min(km['dists'])] sum_dists += [sum(km['dists'])] balances += [abs(sum(km['labels']) - n/2)] plt.plot(vec_idxs[:k], accs, marker='o') plt.show() plt.plot(vec_idxs[:k], c_norms, marker='o', color = 'red') plt.show() # plt.plot(vec_idxs[:k], min_dists, marker='o', color = 'red') # plt.show() # plt.plot(vec_idxs[:k], sum_dists, marker='o', color = 'green') # plt.show() plt.plot(vec_idxs[:k], balances, marker='o', color = 'orange') plt.show() # k_means_vectors = [vecs[:,i] for i in vec_idxs] # labels_k_means = k_means(k_means_vectors, n)['labels'] # print(k_means(k_means_vectors, n)['labels'][:100]) # accuracy = max(accuracy_score(labels_k_means, G.ground_labels), 1 - accuracy_score(labels_k_means, G.ground_labels)) # print("Total accuracy after k-means = %.3f" % accuracy) self.n_edges = number_of_edges self.spectrum = vals self.accs = accs
rotation=0, color='gray', horizontalalignment='right') sns.violinplot(stat_slope, color='mistyrose', linewidth=0.85, inner="box") else: ax.set_ylabel(st, size=6, rotation=0, horizontalalignment='right') sns.violinplot(stat_slope, color='lightgray', linewidth=0.85, inner="box") plt.axvline(x=slopeO, color='royalblue', linestyle='--', linewidth=2) ax.get_yaxis().set_label_coords(-0.01, -0.03) ax.set_xlim([-1.25, 1.25]) ax.yaxis.set_ticks_position('none') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) if (num != len(microbe_list) - 1): plt.tick_params(axis='x', labelbottom='off', bottom='off', top='off') ax.spines['bottom'].set_visible(False) if (num == len(microbe_list) - 1): ax.set_xlabel('Slope', size=7) plt.tick_params(axis='x', top='off') print 'sample size for this host = ', sample_size print '----------------------------------------------------------'
def save_influmax_labels_values(subject, modality, atlas, event_name, connectivity_method, band, inverse_method='dSPM', extract_mode='mean_flip'): file_name = '{}_{}_{}_{}_{}'.format(modality, event_name, band, graph_func, connectivity_method) con_fol = op.join(MMVT_DIR, subject, 'connectivity') event_vals = np.load(op.join(con_fol, '{}.npy'.format(file_name))) max_event_vals = np.max(event_vals, axis=0) max_t = np.argmax(max_event_vals) data = event_vals # [:, max_t] fol = utils.make_dir(op.join(MMVT_DIR, subject, 'labels', 'labels_data')) d = np.load( op.join( con_fol, '{}_{}_{}_{}.npz'.format(modality, event_name, band, connectivity_method))) labels_names = d['labels'] np.savez(op.join(fol, '{}_mean.npz'.format(file_name)), names=labels_names, atlas=atlas, data=data, title='influmax', data_min=np.min(data), data_max=np.max(data), cmap='YlOrRd') labels_data_template = op.join(MMVT_DIR, subject, 'meg', 'labels_data_{}'.format(event_name)) labels_data_template += '_{}_{}_{}_{}_{}.npz' # task, atlas, inverse_method, em, hemi for hemi in utils.HEMIS: hemi_inds = [ ind for (ind, label_name) in enumerate(labels_names) if label_name.endswith(hemi) ] hemi_data = {extract_mode: data[hemi_inds]} meg.save_labels_data(hemi_data, hemi, labels_names[hemi_inds], atlas, ['epilepsy'], [extract_mode], [inverse_method], labels_data_template, task='epilepsy') meg_labels_fname = op.join( MMVT_DIR, subject, 'meg', 'labels_data_{}-epilepsy-{}-{}-{}_{}_{}_{}.npz'.format( subject, inverse_method, modality, event_name, atlas, extract_mode, hemi)) meg_data = np.load(meg_labels_fname) label_name = 'middletemporal_4-rh' if label_name not in meg_data['names']: continue ind = list(meg_data['names']).index(label_name) label_data = meg_data['data'][ind] t_axis = np.linspace(-2, 5, label_data.shape[0]) plt.plot(t_axis, label_data) plt.axvline(x=0, linestyle='--', color='k') plt.xlabel('Time(s)', fontsize=18) plt.ylabel(inverse_method, fontsize=18) plt.savefig( op.join(MMVT_DIR, subject, modality, '{}.jpg'.format(label_name))) plt.close()
for entry in plotdata_4["20"]: values.append(plotdata_4["20"][entry]) list_of_datetimes.append( datetime.datetime.strptime(entry, datetimeFormat).date()) dates = matplotlib.dates.date2num(list_of_datetimes) matplotlib.pyplot.plot_date(dates, values, '-', label="Germany", color="#E5C35E") plt.xlabel("Time", fontsize=13) plt.ylabel("Share of users having a streak of length > 20 days", fontsize=13) plt.axvline(x=datetime.datetime.strptime("2016-05-19", datetimeFormat).date(), color='#D3685D', label="Design change", linewidth=2.5) plt.axvline(x=datetime.datetime.strptime("2016-12-25", datetimeFormat).date(), color='#8C8C8C', ls=":", label="Christmas") plt.axvline(x=datetime.datetime.strptime("2017-12-25", datetimeFormat).date(), color='#8C8C8C', ls=":") plt.axvline(x=datetime.datetime.strptime("2015-12-25", datetimeFormat).date(), color='#8C8C8C', ls=":") plt.axvline(x=datetime.datetime.strptime("2016-07-04", datetimeFormat).date(), color="#8C8C8C", ls=":", label="Independence Day")
def plot_crosscorr(scale): """Plot the cross-correlation coefficient as a function of k for neutrinos and DM.""" cc_fast_sims = [ "b300p512nu0.4p1024", ] #,"b300p512nu0.4p"] shots = { "b300p512nu0.4p1024": 300**3 / (1024**3 - 371714852), "b300p512nu0.4p": 300**3 / (512 - 46462529) } for ss in cc_fast_sims: genpk_neutrino = os.path.join(os.path.join(datadir, ss), "output/power-fast-nu-%.4f.txt" % scale) (_, pk_nu) = load_genpk(genpk_neutrino) genpk_dm = os.path.join(os.path.join(datadir, ss), "output/power-DM-%.4f.txt" % scale) (_, pk_dm) = load_genpk(genpk_dm) genpk_cross = os.path.join(os.path.join(datadir, ss), "output/power-fast-nuDM-%.4f.txt" % scale) (k_cross, pk_cross) = load_genpk(genpk_cross) shot = shots[ss] * np.ones_like(pk_nu) pksq = pk_dm * (pk_nu - shot) pksq[np.where(pksq <= 0)] = shots[ss] * 0.03 corr_coeff = pk_cross / np.sqrt(pksq) ii = np.where(k_cross > 1) # corr_coeff[ii] = smooth(corr_coeff[ii]) plt.semilogx(k_cross, corr_coeff, ls="-.", label="PARTICLE 1024 (fast)", color="blue") cc_sims = ["b300p512nu0.4hyb850", "b300p512nu0.4p1024"] shots = { "b300p512nu0.4p1024": (300 / 1024)**3, "b300p512nu0.4hyb850": (300 / 512)**3, "b300p512nu0.4p": (300 / 512)**3 } for ss in cc_sims: genpk_neutrino = os.path.join(os.path.join(datadir, ss), "output/power-nu-%.4f.txt" % scale) (_, pk_nu) = load_genpk(genpk_neutrino) genpk_dm = os.path.join(os.path.join(datadir, ss), "output/power-DM-%.4f.txt" % scale) (_, pk_dm) = load_genpk(genpk_dm) genpk_cross = os.path.join(os.path.join(datadir, ss), "output/power-DMnu-%.4f.txt" % scale) (k_cross, pk_cross) = load_genpk(genpk_cross) shot = shots[ss] * np.ones_like(pk_nu) pksq = pk_dm * (pk_nu - shot) pksq[np.where(pksq <= 0)] = shots[ss] * 0.03 corr_coeff = pk_cross / np.sqrt(pksq) ii = np.where(k_cross > 1) corr_coeff[ii] = smooth(corr_coeff[ii]) plt.semilogx(k_cross, corr_coeff, ls=lss[ss], label=labels[ss], color=colors[ss]) plt.axvline(x=1.2, ls="-", color="black") plt.ylim(0.9, 1.02) plt.xlim(0.01, 10) plt.legend(frameon=False, loc='lower left', fontsize=12) plt.xlabel(r"k (h/Mpc)") plt.ylabel(r"Cross-correlation coefficient") plt.tight_layout() plt.savefig( os.path.join(savedir, "corr_coeff-" + munge_scale(scale) + ".pdf")) plt.clf()
def main(): # Choose what scenario to test #irreps = GT.irrep_list() irreps = ['A1+'] shell = 'all' l = 'both' E_MIN = 4.24 E_MAX = 4.24 L = 5 show_eigs = True find_root = False E0 = 4.25 # initial guess for root-finder order = 1 # polynomial order used for fit new_plots = False temp_plots = False (Ymin, Ymax) = (-1e6, 1e6) (ymin, ymax) = (-1e4, 1e4) (xmin, xmax) = (4.22, 4.5) # only uncomment/edit if you want to zoom in # Separate energies by # of active shells breaks = defns.shell_breaks(E_MAX, L) breaks.append(E_MAX) for b in range(1, len(breaks) - 1): # skip first window Emin = breaks[b] + 1e-8 Emax = breaks[b + 1] - 1e-8 if breaks[b + 1] != E_MAX else breaks[b + 1] if Emax < E_MIN or Emin > E_MAX: continue else: Emin = max(Emin, E_MIN) Emax = min(Emax, E_MAX) ######################################################### # Define parameters (necessary for several functions) # K2 parameters # a0=-10; r0=0.5; P0=0.5; a2=-1 # K2_dir = 'a0=m10_r0=0.5_P0=0.5_a2=m1/' #a0=0.1; r0=0; P0=0; a2=0 #a0=0.1; r0=0; P0=0; a2=0.1 a0 = 0.1 r0 = 0 P0 = 0 a2 = 0.3 #a0=0.1; r0=0; P0=0; a2=0.5 #a0=0.1; r0=0; P0=0; a2=0.7 #a0=0.1; r0=0; P0=0; a2=0.9 #a0=0.1; r0=0; P0=0; a2=1 #a0=0; r0=0; P0=0; a2=0.1 #a0=0; r0=0; P0=0; a2=0.3 #a0=0; r0=0; P0=0; a2=0.5 #a0=0; r0=0; P0=0; a2=0.7 #a0=0; r0=0; P0=0; a2=0.9 #a0=0; r0=0; P0=0; a2=1 # F2_KSS parameter alpha = 0.5 # so far I've ALWAYS set alpha=0.5 # Data & plot directories K2_dir = 'L=' + str(L) + '/a0=' + str(a0) + '_r0=' + str( r0) + '_P0=' + str(P0) + '_a2=' + str(a2) + '/' data_dir = '../Data/' + K2_dir plot_dir = '../Plots/' + K2_dir if not os.path.exists(data_dir): os.makedirs(data_dir) if not os.path.exists(plot_dir): os.makedirs(plot_dir) # Total CM energy & lattice size (lists) E_list = [ 2.9, 2.95, #2.99,2.995,2.999,2.9999, 3.0001, #3.001,3.005,3.01, #3.0318609166490167, # A1+ single root 3.03186092, 3.05, 3.1, 3.15, #3.16303178, #3.1630317882, # T1+ triple root 3.16303179, 3.2, 3.25, 3.3, #3.30679060158175, # T2- triple root 3.30679061, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.61, 3.62, 3.63, 3.65, 3.67, 3.68, 3.69, 3.7, 3.75, 3.8, 3.81, 3.82, 3.827, 3.82755621, # just below where (1,1,0) turns on # ################################# # Begin 3-shell regime 3.84, 3.85, 3.86, 3.87, 3.88, 3.89, #3.8950494797495034, # T2+ triple root 3.89504948, 3.9, 3.91, 3.92, 3.93, #3.9360802243295736, # E- double root 3.93608023, 3.94, #3.9485642787554895, # A2- single root 3.94856428, 3.95, 3.96, #3.963,3.965,3.967,3.96798888, #3.9679888998546713, # old root of Ftilde before removing q's #3.96798890,3.968,3.969, 3.97, #3.973,3.975,3.977, 3.98, 3.99, 4.0, 4.05, 4.1, #4.105402464984292, # T2- triple root 4.10540247, 4.15, 4.16, 4.17, 4.19, 4.195, 4.2, 4.205, #4.209892475540663, # T1+ triple root 4.20989248, 4.21, 4.211, 4.2112, 4.2114, 4.2116, 4.2117, 4.2118, 4.21182, 4.21184, 4.21186, 4.21188, 4.2119, 4.21191, 4.21192, 4.211925, 4.21193, 4.211933, 4.211935, #4.2119352,4.2119354,4.2119356,4.2119358, 4.211936, #4.2119361, 4.2119362, 4.2119364, 4.2119366, 4.2119368, 4.211937, 4.2119372, 4.2119374, 4.2119376, 4.2119378, 4.211938, 4.21193816, 4.21193817, #4.211938171368993, # non-interacting energy E1 ############# 4.21193818, 4.2119385, 4.211939, 4.21193948, # A1+ single root for a0=0, a2=0.1 ? 4.21194, 4.211941, 4.211942, 4.211944, 4.211946, 4.211948, 4.21195, 4.211955, 4.21196, #4.21196290, # E+ double root for a0=0, a2=0.1 ? 4.21196291, 4.211965, 4.21197, 4.211975, 4.21198, 4.21199, 4.212, 4.2121, 4.2122, 4.2125, 4.213, 4.2135, 4.214, 4.2145, 4.215, 4.22, 4.23, 4.24, #4.2421632582173645, # E+ double root for a0=0.1, a2=0 (r0=P0=0) 4.24216326, 4.25, 4.27, #4.2784624639738, # A1+ single root for a0=0.1, a2=0 (r0=P0=0) 4.27846247, 4.3, 4.32, 4.33, #4.33374164, #4.333741640225551, # A1+ single root 4.33374165, 4.335, 4.338, 4.34, #4.341716880828459, # T1- triple root 4.34171689, 4.342, 4.345, 4.35, 4.37, 4.4, 4.43, #4.441146045889443, # T2- triple root 4.44114605, 4.45, #4.486997310056035, # T1+ triple root 4.48699732, 4.5, 4.55, 4.58101788 # just below where (1,1,1) turns on ] E_list = [x for x in E_list if Emin <= x <= Emax] L_list = [5.0] # print('E='+str(E)+', L='+str(L)+'\n') # print('Shells: ', defns.shell_list(E,L), '\n') # print('Matrix dimension: ', len(defns.list_nnk(E,L))*6, '\n') #################################################################################### # Load F3 matrix from file if exists, otherwise compute from scratch & save to file for L in L_list: F3_list = [] for E in E_list: if a2 == 0: datafile = data_dir + 'F3_00_E' + str(E) + '_L' + str( L) + '.dat' elif a0 == 0: datafile = data_dir + 'F3_22_E' + str(E) + '_L' + str( L) + '.dat' else: datafile = data_dir + 'F3_E' + str(E) + '_L' + str( L) + '.dat' try: with open(datafile, 'rb') as fp: F3 = pickle.load(fp) #F3 = pickle.loads(fp.read()) print('F3 loaded from ' + datafile + '\n') except IOError: print(datafile + ' not found; computing F3 from scratch...') t0 = time.time() if a2 == 0: F3 = F3_mat.F3mat00(E, L, a0, r0, P0, a2, alpha) elif a0 == 0: F3 = F3_mat.F3mat22(E, L, a0, r0, P0, a2, alpha) else: F3 = F3_mat.F3mat(E, L, a0, r0, P0, a2, alpha) t1 = time.time() print('Calculation complete (time:', t1 - t0, ')') with open(datafile, 'wb') as fp: pickle.dump(F3, fp, protocol=4, fix_imports=False) print('F3 saved to ' + datafile + '\n') F3_list.append(F3) ################################################### # Create list of inputs needed by several functions inputs = [L, a0, r0, P0, a2, alpha] # Free energies E_free = defns.E_free_list(L, 3, 1).values() E_free = [e for e in E_free if Emin < e < Emax] ################################################## # Project onto chosen irrep for irrep in irreps: # General irrep if irrep in GT.irrep_list(): I = irrep inputs.append(I) if a2 == 0: if sum([ GT.subspace_dim_o_l(s, I, 0) for s in defns.shell_list(Emax, L) ]) == 0: print("0-dim l'=l=0 subspace for " + I + ' for E<' + str(round(Emax, 4))) continue irrep_eigs_array_list = AD.F3i_00_I_eigs_list( E_list, L, F3_list, I) irrep_eigs_array_list_flip = AD.F3i_00_I_eigs_list( E_list, L, F3_list, I, flip=True) f_eigs = F3_mat.F3i_00_I_eigs elif a0 == 0: if sum([ GT.subspace_dim_o_l(s, I, 2) for s in defns.shell_list(Emax, L) ]) == 0: print("0-dim l'=l=2 subspace for " + I + ' for E<' + str(round(Emax, 4))) continue irrep_eigs_array_list = AD.F3i_22_I_eigs_list( E_list, L, F3_list, I) irrep_eigs_array_list_flip = AD.F3i_22_I_eigs_list( E_list, L, F3_list, I, flip=True) f_eigs = F3_mat.F3i_22_I_eigs else: if sum([ GT.subspace_dim_o(s, I) for s in defns.shell_list(Emax, L) ]) == 0: print('0-dim subspace for ' + I + ' for E<' + str(round(Emax, 4))) continue irrep_eigs_array_list = AD.F3i_I_eigs_list( E_list, L, F3_list, I) irrep_eigs_array_list_flip = AD.F3i_I_eigs_list( E_list, L, F3_list, I, flip=True) f_eigs = F3_mat.F3i_I_eigs if show_eigs == True: for i in range(len(E_list)): E = E_list[i] x = irrep_eigs_array_list[i] #x = [y for y in x if abs(y)<1e2] print(E, x) #print(E,min(x,key=abs)) if find_root == True: root = AD.root_finder_secant(E_list, irrep_eigs_array_list, f_eigs, inputs, E0, order) print(root) # irrep_roots_list = AD.root_finder(E_list, irrep_eigs_array_list, f_eigs, inputs) #irrep_roots_file = data_dir+I+'_roots_L='+str(L)+'.dat' #with open(irrep_roots_file,'w') as fp: # fp.write(str(irrep_roots_list)) ################################################# # Full matrix, no projections (all eigenvalues present --> very messy plots) elif irrep == 'full': eigs_array_list = AD.F3i_eigs_list(E_list, L, F3_list) #roots_list = AD.root_finder(E_list, eigs_array_list, F3_mat.F3i_eigs, inputs) # I can't see any reason why we'd ever want to do this # print(roots_list) #roots_file = data_dir+'roots_L='+str(L)+'.dat' #with open(roots_file,'w') as fp: # fp.write(str(roots_list)) ################################################# # Plot F3i eigenvalues vs. E if new_plots == True or temp_plots == True: plotfile1 = plot_dir + str(irrep) + '_' + str( b + 1) + 'shells.pdf' plotfile2 = plot_dir + str(irrep) + '_' + str( b + 1) + 'shells_zoom.pdf' if len(irrep) == 2: irrep_tex = '$' + irrep[0] + '^' + irrep[1] + '$' elif len(irrep) == 3 and irrep != 'iso': irrep_tex = '$' + irrep[0] + '_' + irrep[ 1] + '^' + irrep[2] + '$' plt.plot(E_list, irrep_eigs_array_list, '.') plt.plot(E_list, irrep_eigs_array_list_flip, 'o', mfc='none') plt.xlabel('E') plt.ylabel(r'$\lambda$') plt.title(r'$F_3^{-1}$ eigenvalues, ' + irrep_tex + ' irrep, ' + str(b + 1) + ' shells') for e0 in E_free: plt.axvline(x=e0, c='k', ls='--', lw=1) plt.xlim((Emin, Emax)) plt.ylim((Ymin, Ymax)) plt.tight_layout() plt.grid(True) if new_plots == True: plt.savefig(plotfile1) elif temp_plots == True: plt.savefig('temp.pdf') plt.ylim((ymin, ymax)) if new_plots == True: plt.savefig(plotfile2) elif temp_plots == True: plt.xlim((xmin, xmax)) plt.savefig('temp_zoom.pdf') plt.close()
plt.xlabel('time t'); plt.ylabel('EPSP kernel') # plot activation function plt.figure(); plt.plot(simulation.neuron_functions["sigma"](np.arange(-0,100))) plt.title('Sigmoidal activation function') plt.xlabel('input MB'); plt.ylabel('FR probability') # plot Bernoulli spike trains of neurons sim = 0 fig = plt.figure() for neuron in range(0,p): nAP = training_samples["action_potentials"][sim][neuron,:] plt.subplot(p,1,neuron+1) spikeIDX = np.nonzero(nAP)[0] for idx in np.arange(np.size(spikeIDX)): plt.axvline(x=spikeIDX[idx], color = 'b') plt.ylabel('spike'); plt.title('Neuron %s: %d events (%i Hz)' %(neuron,np.sum(nAP >0),np.round(np.sum(nAP >0)/time_period))) plt.gca().set_xlim([0, time_period/time_step]); plt.xlabel('time t (msec)'); # plot stats for neurons neurons = [0,1] for neuron in neurons: fig = plt.figure() nAP = training_samples["action_potentials"][sim][neuron,:] preAP = np.dot(adj_mat[:,neuron]>0, training_samples["action_potentials"][sim]) nMB = training_samples["membrane_potentials"][sim][neuron,:] nFR = training_samples["firing_probabilities"][sim][neuron,:] plt.subplot(211)
def calc_score(event_fname, baseline_fnames, input_template, band_name, t_start, t_end, half_window, output_fname, band_fol, figure_name, do_plot): baseline_values = [] if do_plot: fig = plt.figure(figsize=(10, 10)) axes = fig.add_subplot(111) for fname in baseline_fnames: val_fname = fname if input_template == '' else input_template.format( file_name=utils.namebase(fname)) vals = np.load(val_fname) # max_ind = np.argmax(np.max(vals, axis=1)) # max_vals = vals[max_ind] max_vals = np.max(vals, axis=0) # plt.plot(t_axis, np.argmax(vals, axis=0), 'g--') # plt.plot(t_axis, max_vals, 'g-', label=utils.namebase(fname)) # {}'.format(sz_ind)) baseline_values.append(max_vals) baseline_values = np.array(baseline_values) event_fname = event_fname if input_template == '' else input_template.format( file_name=utils.namebase(event_fname), band_name=band_name) event_vals = np.load(event_fname) t_axis = np.linspace(t_start + half_window, t_end - half_window, event_vals.shape[1]) # max_ind = np.argmax(np.max(event_vals, axis=1)) # max_event_vals = event_vals[max_ind] # plt.plot(t_axis, np.argmax(event_vals, axis=0), 'b-') max_event_vals = np.max(event_vals, axis=0) max_t = np.argmax(max_event_vals) max_node = np.argmax(event_vals[:, max_t]) if max_event_vals.shape > baseline_values[0].shape: max_event_vals = max_event_vals[:baseline_values[0].shape[0]] else: max_event_vals = np.pad( max_event_vals, (0, baseline_values[0].shape[0] - max_event_vals.shape[0]), 'constant', constant_values=np.nan) threshold_max = baseline_values.max(axis=0) threshold_ci = baseline_values.mean( axis=0) + 2 * baseline_values.std(axis=0) cross_indices = np.where((max_event_vals > threshold_ci) & (max_event_vals > threshold_max)) score = sum(max_event_vals[cross_indices] - threshold_ci[cross_indices]) \ if len(cross_indices) > 0 else 0 if do_plot: plt.plot(t_axis, max_event_vals, 'b-', label='epi-event') # {}'.format(sz_ind)) plt.plot(t_axis, threshold_max, 'g--', label='baseline max') plt.plot(t_axis, threshold_ci, 'y--', label='baseline \mu+2\std') # plt.fill_between(t_axis, 0, threshold_ci, # color='#539caf', alpha=0.4, label='baseline min-max') plt.axvline(x=0, linestyle='--', color='k') plt.xlabel('Time(s)', fontsize=18) # xticklabels = np.arange(t_start, t_end).tolist() # xticklabels[3] = 'SZ' # axes.set_xticklabels(xticklabels) plt.ylabel('max(Eigencentrality)', fontsize=16) # plt.ylim([0.3, 0.6]) plt.title('Eigencentrality ({})'.format(band_name), fontsize=16) plt.legend(loc='upper right', fontsize=16) plt.scatter(t_axis[cross_indices], max_event_vals[cross_indices], marker='x', c='r') print('Saving figure to {}'.format(utils.namebase(output_fname))) plt.savefig(op.join(band_fol, figure_name)) # utils.copy_file(output_fname, op.join(band_fol, figure_name)) plt.close() return score
# in :math:`$/hours/(years\ of\ education)`. # This representation of the coefficients has the advantage of making clear # the practical predictions of the model: # an increase of :math:`1` year in AGE means a decrease of :math:`0.030867$`, # while an increase of :math:`1` year in EDUCATION means an increase of # :math:`0.054699$`. # On the other hand, categorical variables (as UNION or SEX) are adimensional # numbers taking the value either of 0 or 1. Their coefficients are expressed # in :math:`$/hours`. Then, we cannot compare the magnitude of different # coefficients since the features have different natural scales, and hence # value ranges, because of their different unit of measure. # This is more evident if we plot the coefficients. coefs.plot(kind='barh', figsize=(9, 7)) plt.title('Ridge model, small regularization') plt.axvline(x=0, color='.5') plt.subplots_adjust(left=.3) ############################################################################### # Indeed, from the plot above the most important factor in determining WAGE # appears to be the # variable UNION, even if it is plausible that variables like EXPERIENCE # should have more impact. # Looking at the coefficient plot to extrapolate feature importance could be # misleading as some of them vary on a small scale, while others, like AGE, # varies a lot more, several decades. # This is evident if we compare feature standard deviations. X_train_preprocessed = pd.DataFrame( model.named_steps['columntransformer'].transform(X_train), columns=feature_names)
# - # Для наглядности построим графики этих функций. # + # Дискретная сетка hh = np.linspace(-70, 70, 100) # Подписи осей в формате TeX plt.xlabel(r'$H$, А/м') plt.ylabel(r'$B$, Т') plt.grid(True) # Вертикальная и горизонтальная черные линии X=0, Y=0, отмечающие оси координат plt.axvline(x=0.0, color='black') plt.axhline(y=0.0, color='black') # Графики с подписями в формате TeX, линии красная и синяя plt.plot(hh, B_up(hh), 'r', label = r'$B_\uparrow$' ) plt.plot(hh, B_down(hh), 'b', label = r'$B_\downarrow$') plt.legend(loc='best', fontsize=16); plt.show() # - # Для численного интегрирования воспользуемся методом `scipy.integrate.quad()` (quadrature) из библиотеки SciPy. Этот метод выполняет адаптивное вычисление определённого интеграла функции одной переменной и в том числе предоставляет возможность указания бесконечных пределов интегрирования. Метод возвращает значение интеграла и достигнутую точность. Qup, err1 = scipy.integrate.quad( B_up, -np.inf, +np.inf ) print( Qup, err1 ) Qdown, err2 = scipy.integrate.quad( B_down, -np.inf, +np.inf )
# xytext=(50, 30), textcoords='offset points', # arrowprops=dict(arrowstyle="->") # ) ax.plot(t, l1, label='Wheel FL') ax.plot(t, l2, label='Wheel FR') ax.plot(t, l3, label='Wheel ML') ax.plot(t, l4, label='Wheel BL') ax.plot(t, l5, label='Wheel MR') ax.plot(t, l6, label='Wheel BR') ax.set_xlim([10, 400]) ax.set_xlim([0.17, 0.23]) plt.title(r'$\lambda_{N}$ $for$ $each$ $wheel$') plt.xlabel(r'$T [s]$') plt.ylabel(r'$\lambda_{N}$ $[\frac{N}{s}]$') plt.legend(loc='lower right') seq = [7, 4, 3, 4] plt.axvline(x=50, color='black', dashes=seq) plt.axvline(x=100, color='black', dashes=seq) plt.axvline(x=150, color='black', dashes=seq) plt.axvline(x=220, color='black', dashes=seq) plt.axvline(x=300, color='black', dashes=seq) plt.show()
#x1=range(len(dataCNAc)) x1 = [] y1 = [] for i in dataCNAc: #x1=np.array([float(i[3]), float(i[4])]) #y1=np.array([float(i[13]),float(i[13])]) x1.append(int(i[1])) y = int(i[2]) if y > 100: y = 100 y1.append(y) plt.plot(x1, y1, 'ko', markersize=1) #plt.plot(x1, y1, linewidth=6, color='black') for ch in chr_CNA: plt.axvline(x=ch, color='0.75') plt.xticks(chr_CNA, range(1, len(chr_CNA) + 1)) plt.ylabel('DP') print('----DP has been completed----') #Drawing BAF #ax=fig.add_axes([0.08,0.05,0.9,0.4]) plt.subplot(312) #x_coin=0 x2 = [] y2 = [] for i in dataBafc: alt = float(i[2]) DP = float(i[3]) ref = DP - float(i[2]) if alt == 0 or DP == 0:
color='green', edgecolor='k') p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen', edgecolor='k') # bars for VotingClassifier p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue', edgecolor='k') p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue', edgecolor='k') # plot annotations plt.axvline(2.8, color='k', linestyle='dashed') ax.set_xticks(ind + width) ax.set_xticklabels([ 'LogisticRegression\nweight 1', 'GaussianNB\nweight 1', 'RandomForestClassifier\nweight 5', 'VotingClassifier\n(average probabilities)' ], rotation=40, ha='right') plt.ylim([0, 1]) plt.title('Class probabilities for sample 1 by different classifiers') plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left') plt.tight_layout() #plt.show()
# Uncomment and run to test your function: train_accuracy = compute_accuracy(X, y_response.values, log_reg) print(f"Accuracy on the training data: {train_accuracy:.2%}") # instead of using all the cells in VISam, use all the cells from the subnetworks we've identified lists = sorted(all_trials_window_spikes[300].items()) #flatten dictionary for plotting x, y = zip(*lists) plt.eventplot(y, 'horizontal', color='k') plt.axvline(x=0.5, color='r', linestyle='--') plt.axvline(x=dat['response_time'][300], color ='b', linestyle='--') plt.xlabel('Time (s)') plt.ylabel('Cell number') adjacency_matrix = all_trials_window_counts.T.corr() sns.heatmap(adjacency_matrix, fmt='.1g', vmin=-1, vmax=1, center=0, cmap='coolwarm', yticklabels=True, xticklabels=True).set_title(region) plt.show() test_train = all_trials_window_spikes[0][637]
yaml.dump(config, stream=open(yamlfile, 'w'), default_flow_style=False) ddisc_mjd = t2mjd(tjoin['ddisc'][msk][0]) # make posterior plots xmin, xmax = 1e9, -1e9 for i, m in enumerate(models): msk = (tjoin['name'] == n) & (tjoin['model'] == m) t0 = np.percentile(tjoin['posterior_t0'][msk][0], 50.) t1 = np.percentile(tjoin['posterior_t0'][msk][0], 2.7e-3 * 100.) t2 = np.percentile(tjoin['posterior_t0'][msk][0], 100. * (1. - 2.7e-3)) plt.axvline(t2mjd(t0) - ddisc_mjd, ls='-', color=plt.cm.Vega20c(0.2 * i), label=m + ', 50% quantile', lw=1) plt.axvline(t2mjd(t1) - ddisc_mjd, ls='--', color=plt.cm.Vega20c(0.2 * i), lw=1, label=m + ', $3\sigma$ quantile') plt.axvline(t2mjd(t2) - ddisc_mjd, ls='--', color=plt.cm.Vega20c(0.2 * i), lw=1) if t2mjd(t1) - ddisc_mjd < xmin: xmin = t2mjd(t1) - ddisc_mjd if t2mjd(t2) - ddisc_mjd > xmax:
# ============================================================================== # END SETTINGS # START PREPROCESSING # load the reference signal and tvl scan data ref_time, ref_amp = sm.read_reference_data(ref_file) data = THzData(tvl_file, basedir, gate=thz_gate, follow_gate_on=True) # adjust ref_time so initial value is 0 ps ref_time -= ref_time[0] # plot reference waveform and gate0 before modification so we can see what # it looks like plt.figure('Reference Waveform') plt.plot(ref_time, ref_amp, 'r') plt.axvline(ref_time[gate0], linestyle='--', color='k') plt.axvline(ref_time[gate1], linestyle='--', color='k') plt.title('Reference Waveform') plt.xlabel('Time (ps)') plt.ylabel('Amplitude') plt.grid() # remove front blip and add ramp up factor to help with FFT # create the frequency values that we will actually convert to frequency domain # in a little bit ref_freq_amp = copy.deepcopy(ref_amp) ref_freq_amp[:gate0] = 0 ref_freq_amp[gate1:] = 0 ref_freq_amp[gate0] = ref_freq_amp[gate0 + 1] / 2 ref_freq_amp[gate1] = ref_freq_amp[gate1 - 1] / 2
0.12, r"$a \Delta k = 0$", transform=ax.transAxes, fontsize=16, verticalalignment='top') ax.text(0.715, 0.985, r"$U_c/t = 3.85$", transform=ax.transAxes, fontsize=16, verticalalignment='top', color="#888888", rotation=90) plt.axhline(0, color='#dddddd', zorder=-2, linewidth=0.5) plt.axvline(3.85, linestyle='--', color='#888888', linewidth=1.0) filelist = glob.glob('data_of_q.txt') filelist.sort() cnt_q = 3 for filename in filelist: with open(filename) as f: lines = (line for line in f if not line.startswith('L')) new_del = [] for l in lines: new_del.append(','.join(l.split())) data = np.loadtxt(new_del, delimiter=',', skiprows=0) data_filter = list( filter(
# -------------- # code starts here df1 = df[df['paid.back.loan'] == 'No'] df1['purpose'].value_counts().plot(kind='bar') plt.show() # code ends here # -------------- # code starts here #median for installment inst_median = df['installment'].median() # mean for installment inst_mean = df['installment'].mean() #plot the histogram for installment df['installment'].plot(kind='hist') plt.title("Probability Distribution of INSTALLMENT") plt.axvline(inst_median, color='r', linestyle='dashed', linewidth=2) plt.axvline(inst_mean, color='g', linestyle='dashed', linewidth=2) plt.show() #histogram for log anual income mean = df['log.annual.inc'].mean() median = df['log.annual.inc'].median() df['log.annual.inc'].plot(kind='hist') plt.title("Probability Distribution of log.annualincome") plt.axvline(median, color='r', linestyle='dashed', linewidth=2) plt.axvline(mean, color='g', linestyle='dashed', linewidth=2) plt.show() # code ends here
plt.title('Imagem RGB') plt.subplot(3,2,2),plt.imshow(img_rec_gray) plt.xticks([]);plt.yticks([]) plt.title('Imagem em escala de cinza') plt.subplot(3,2,3),plt.imshow(img_s_manual) plt.xticks([]);plt.yticks([]) plt.title('Imagem Manual') plt.subplot(3,2,4),plt.imshow(img_s_otsu) plt.xticks([]);plt.yticks([]) plt.title('Imagem OTSU') plt.subplot(3,2,5),plt.plot(hist_gray) plt.axvline(x=ret, color = "g") plt.xticks([]) plt.title('Histograma Manual') plt.subplot(3,2,6),plt.plot(hist_gray) plt.axvline(x=ret1, color = "b") plt.xticks([]) plt.title('Histograma OTSU') plt.show() #f) Apresente uma figura contento a imagem selecionada nos sistemas RGB, Lab, HSV e YCrCb. imagens = [img_rgb,img_hsv,img_Lab,img_yCrCb] titulos = ['Imagem em RGB',"Imagem em HSV",'Imagem em Lab','Imagem em YCrCb'] for i in range(4):
Может ли ОДР состоять из одной точки? Да, может. Как пример, ``` f(x) = y = -x x, y >= 0 ``` """ # %% plt.xlim(-5, 5) plt.ylim(-5, 5) plt.axhline(y=0, color='k', linewidth=0.5) plt.axvline(x=0, color='k', linewidth=0.5) plt.plot([5, -5], [-5, 5], label='y = -x') plt.scatter([0], [0]) plt.legend() plt.show() # %% """ ### Задание 5 """ # %% X_MAX = 3.5