def plot_hist(data): if config.get('outputting', 'plot'): pdf, bins, patches = plt.hist(data[300], arange(data[300].min(), data[300].max(), 0.05), normed=True, facecolor='green', alpha=0.75) #n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75) if config.get('outputting', 'txt'): s= numpy.vstack((pdf,bins[0:len(bins)-1])).transpose() plt.savetxt(pname+'_distribution' + analysisParams['txtext'], s, fmt='%f %f')
def plot_by_parts(na270, na300, na450, na640): if config.get('outputting', 'txt'): plt.savetxt('na270.' + analysisParams['txtext'], na270, fmt='%0.6f %f') plt.savetxt('na300.' + analysisParams['txtext'], na300, fmt='%0.6f %f') plt.savetxt('na450.' + analysisParams['txtext'], na450, fmt='%0.6f %f') plt.savetxt('na640.' + analysisParams['txtext'], na640, fmt='%0.6f %f') if config.get('outputting', 'plot'): #plot hardcoded settings plt.subplots_adjust(top=0.95,hspace=0.4) plt.subplot(221) plot_vector(plt, '270', na270) plt.subplot(222) plot_vector(plt, '300', na300) plt.subplot(223) plot_vector(plt, '450', na450) plt.subplot(224) plot_vector(plt, '640', na640, 'sampling blocks') plt.savefig(pname + '.' + analysisParams['figext'])
ax.errorbar(24*(kitt[0]-gtime[0]),kitt[1],kitt[2]/2.0,label='Kitt Peak',fmt='o',color='red',ecolor='red',ms=0,elinewidth=4,capsize=6) plt.xlim(min(hours),max(hours)) plt.ylim(min(pwvsnow1),24) plt.xlabel('Time from start (hr)') plt.ylabel('PWVs (mm)') if night == 'n20161204': leg = ax.legend(loc='best',prop={'size':18}) ax.fill_between(hours,medPWVs + 3*pwvstd_hi,medPWVs-3*pwvstd_lo,facecolor='gray') plt.subplots_adjust(bottom=0.19) plt.text(1,20, night) plt.savefig('paperplots/outputs/CAMALGPS_pwvnow_%s.eps' %night) # Save PWVs plt.savetxt('pwvs/PWVnow_%s_noA2.txt' %night, zip(gtime,pwvsnow1, pwvsnow2),header='time(JD) PWVSnow1 (mm) PWVSnow2') # Plot against e/o #-------------------------------------- # CAMAL VS GPS ilap = np.where(((kitt[0]-gtime[0]) < (gtime[-1]-gtime[0])) & ((kitt[0]-gtime[0]) > 0))[0] pwv2 = interpolate.splev(kitt[0],(knots,PWVcoeffs,3)) ilap3 = np.where(((azam[0]-gtime[0]) < (gtime[-1]-gtime[0])) & ((azam[0]-gtime[0]) > 0))[0] pwv3 = interpolate.splev(azam[0],(knots,PWVcoeffs,3)) plt.figure(100) plt.plot(pwv2[ilap],kitt[1][ilap],'bo',label='Kitt Peak') plt.plot(pwv3[ilap3],azam[1][ilap3],'g+',label='Amado') plt.plot([0,20],[0,20],'k--') #plt.legend()
def extract_percentile(mydir="20160826", myname="PTF16fgz.npy", plot=True, write=True, maxperc=30): ''' Test to extract only the spaxels with higher signal to noise. ''' def get_header(myfile): header = "" with open(myfile) as f: ls = f.readlines() for li in ls: if (li.strip().startswith("#")): header += li.replace("#", "") return header header = get_header("/scr2/sedmdrp/redux/%s/%s" % (mydir, myname.replace(".npy", "_SEDM.txt"))) #Extraction parameters (chosen spaxels) E, meta = np.load("/scr2/sedmdrp/redux/%s/%s" % (mydir, myname)) #Extraction spectra Es = np.load("/scr2/sedmdrp/redux/%s/sp_%s" % (mydir, myname)) #EsA = np.load("/scr2/sedmdrp/redux/%s/sp_A_%s"%(mydir, myname)) #EsB = np.load("/scr2/sedmdrp/redux/%s/sp_B_%s"%(mydir, myname)) std = np.load("/scr2/sedmdrp/redux/%s/std-correction.npy" % mydir) dome = np.load("/scr2/sedmdrp/redux/%s/dome.npy" % mydir) mask = ~np.isnan(std[0]["correction"]) waves = std[0]["nm"][mask] wmask = (waves > 500) * (waves < 900) for percent in np.arange(00, maxperc, 10): Aflux = [] stdA = [] for spax in Es[0]['object_spaxel_ids_A']: try: l1, f1 = E[spax].get_flambda() lmask = (l1 > 500) * (l1 < 900) Aflux.append(np.nansum(f1[lmask])) stdA.append(np.std(f1[mask])) except: pass maskA = np.array(Aflux) > np.percentile(Aflux, percent) Bflux = [] stdB = [] for spax in Es[0]['object_spaxel_ids_B']: try: l1, f1 = E[spax].get_flambda() lmask = (l1 > 500) * (l1 < 900) Bflux.append(-1 * np.nansum(f1[lmask])) stdB.append(np.std(f1[mask])) except: pass maskB = np.array(Bflux) > np.percentile(Bflux, percent) medstdA = np.median(stdA) medstdB = np.median(stdB) Aspec = [] Bspec = [] for spax in Es[0]['object_spaxel_ids_A'][maskA]: try: l1, f1 = E[spax].get_flambda() d1, df1 = dome[0][spax].get_flambda() mask_outliers = np.repeat( True, len(l1)) #np.abs(f1 - np.median(f1))<8*medstdA specInt = interpolate.interp1d(l1[mask_outliers], f1[mask_outliers], kind="linear", bounds_error=False) #f1 = f1/df1 plt.plot(waves, specInt(waves), "r-", alpha=0.05) Aspec.append(specInt(waves)) except IOError: pass for spax in Es[0]['object_spaxel_ids_B'][maskB]: try: l2, f2 = E[spax].get_flambda() mask_outliers = np.abs(f1 - np.median(f1)) < 4 * medstdB specInt = interpolate.interp1d(l2[mask_outliers], f2[mask_outliers], kind="linear", bounds_error=False) d2, df2 = dome[0][spax].get_flambda() #f1 = f1/df1 plt.plot(waves, specInt(waves), "b-", alpha=0.05) Bspec.append(specInt(waves)) except: pass Aspec = np.array(Aspec) Bspec = np.array(Bspec) sp = interpolate.interp1d(waves, std[0]["correction"][mask], kind="linear", bounds_error=False) #spec = spec / np.max(spec) * np.max(np.nanmedian(Aspec, axis=0)) correction = sp(waves) specA = interpolate.interp1d(waves, np.nansum(Aspec, axis=0), kind="linear", bounds_error=False) specB = interpolate.interp1d(waves, np.nansum(Bspec, axis=0), kind="linear", bounds_error=False) specSum = (specA(waves) - specB(waves)) specSum = specSum / np.max(specSum[wmask]) * np.max( np.nanmedian(Aspec, axis=0)) spec = (specA(waves) - specB(waves)) * (correction ) #/np.median(correction)) spec = spec / np.max(spec[wmask]) * np.max(np.nanmedian(Aspec, axis=0)) writemask = (waves > 400) * (waves < 950) if (write): plt.savetxt("/home/nblago/classifications/%s/%d_%s" % (mydir, percent, myname.replace(".npy", ".txt")), np.array([ np.array(waves[writemask][::-1]) * 10, spec[writemask][::-1] ]).T, fmt="%.1f %.4e", header=header) if (plot): plt.plot(waves, np.nanmedian(Aspec, axis=0), "r-", lw=2, label="Median A") plt.plot(waves, np.nansum(Aspec, axis=0) * np.max(np.nanmedian(Aspec, axis=0)) / np.max(np.nansum(Aspec, axis=0)), "m--", lw=2, ls="--", label="Sum A") plt.plot(waves, -1 * np.nanmedian(Bspec, axis=0), "g-", lw=2, label="Median B") plt.plot(waves, -1 * np.nansum(Bspec, axis=0) * np.max(np.nanmedian(Bspec, axis=0)) / np.max(np.nansum(Bspec, axis=0)), "b--", lw=2, ls="--", label="Sum B") plt.plot( waves, np.nansum(Aspec, axis=0) * np.max(np.nanmedian( Aspec, axis=0)) / np.max(np.nansum(Aspec, axis=0)) - np.nanmedian(Aspec, axis=0), lw=1, label="Sum - Median A") plt.plot(waves, -1 * np.nansum(Bspec, axis=0) * np.max(np.nanmedian(Bspec, axis=0)) / np.max(np.nansum(Bspec, axis=0)) + np.nanmedian(Bspec, axis=0), lw=1, label="Sum - Median B") plt.plot(waves[writemask], spec[writemask], color="orange", lw=2) plt.plot(waves[writemask], specSum[writemask], color="cyan", lw=2) plt.legend(loc="best", frameon=False) plt.show()
if 1: if os.path.isfile(BACKGROUND_FILE): print 'reading background.txt' background = pylab.loadtxt(BACKGROUND_FILE) else: print 'getting new background image for equalization' numBackground = 5 background = pylab.zeros((768, )) for i in range(0, numBackground): print i data = reader.getData() background = background + data background = background / numBackground print pylab.savetxt('background.txt', background) delta = 500.0 - background else: print 'background subtraction disabled' delta = 0 i = 0 while 1: data = reader.getData() if data is None: continue data = data + delta if i == 0: pylab.figure(1) h_line, = pylab.plot(data, linewidth=2) h_level, = pylab.plot([0], [0], 'or')
def plot_by_temp(by_temp): plt.savetxt('by_temp.csv', by_temp, fmt='%f %f') plot_vector(plt, 'Rg(Temperature)', by_temp, 'Temperature') plt.savefig('by_temp.png')
"ecoli": ("BIOGRID-ORGANISM-Escherichia_coli_K12_W3110-3.5.166.tab2.txt", None), "celegans": ("BIOGRID-ORGANISM-Caenorhabditis_elegans-3.5.166.tab2.txt", 8), "athaliana": ("BIOGRID-ORGANISM-Arabidopsis_thaliana_Columbia-3.5.166.tab2.txt", 8) #"homosapiens": "BIOGRID-ORGANISM-Homo_sapiens-3.5.166.tab2.txt" } for i, f in enumerate(files.keys()): if os.path.isfile(f + "_fuzzy.pdf"): continue graph = load.build_graph_from_ppin_file("huge/" + files[f][0]) p, lb, Nb = TFD.tfd_fuzzy(graph) plt.savetxt(f + "_fuzzy.dat", np.stack((lb, Nb), axis=1)) plt.figure(i) plt.loglog(lb, Nb, 'o') x = np.linspace(min(np.log(lb)), max(np.log(lb)), 100) plt.loglog(np.exp(x), np.exp(x * p[0] + p[1]), label="Slope: {:.3f}".format(p[0])) plt.legend() plt.savefig(f + "_fuzzy.pdf") for i, f in enumerate(files.keys()): lb, Nb = np.loadtxt(f + "_fuzzy.dat", unpack = True) if files[f][1] is not None: end = files[f][1] else: end = len(lb)
("BIOGRID-ORGANISM-Caenorhabditis_elegans-3.5.166.tab2.txt", 8), "athaliana": ("BIOGRID-ORGANISM-Arabidopsis_thaliana_Columbia-3.5.166.tab2.txt", 8) #"homosapiens": "BIOGRID-ORGANISM-Homo_sapiens-3.5.166.tab2.txt" } for i, f in enumerate(files.keys()): if os.path.isfile(f + "_greedy.pdf"): continue graph = load.build_graph_from_ppin_file("huge/" + files[f][0]) p, lb, Nb = TFD.tfd_greedy(graph) plt.savetxt(f + "_greedy.dat", np.stack((lb, Nb), axis=1)) plt.figure(i) plt.loglog(lb, Nb, 'o') x = np.linspace(min(np.log(lb)), max(np.log(lb)), 100) plt.loglog(np.exp(x), np.exp(x * p[0] + p[1]), label="Slope: {:.3f}".format(p[0])) plt.legend() plt.savefig(f + "_greedy.pdf") for i, f in enumerate(files.keys()): lb, Nb = np.loadtxt(f + "_greedy.dat", unpack=True) if files[f][1] is not None: end = files[f][1]