def analytic_smf_evol(source='li-drory-march'): ''' Evolution of the analytic SMF evolution ''' prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(10, 10)) sub = fig.add_subplot(111) for i_z, z in enumerate(np.arange(0.0, 1.5, 0.25)): smf = SMF() analytic_mass, analytic_phi = smf.analytic(z, source=source) sub.plot(analytic_mass, analytic_phi, lw=4, ls='--', c=pretty_colors[i_z], label=r"$ z = " + str(round(z, 2)) + "$") sub.set_yscale('log') sub.set_ylim([10**-5, 10**-1]) sub.set_xlim([8.0, 12.0]) sub.legend(loc='upper right') fig.savefig(''.join(['figure/' 'analytic_smf_', source, '.png']), bbox_inches='tight') plt.close()
def analytic_smf_evol(source='li-drory-march'): ''' Evolution of the analytic SMF evolution ''' prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) for i_z, z in enumerate(np.arange(0.0, 1.5, 0.25)): smf = SMF() analytic_mass, analytic_phi = smf.analytic(z, source=source) sub.plot(analytic_mass, analytic_phi, lw=4, ls='--', c=pretty_colors[i_z], label=r"$ z = "+str(round(z,2))+"$") sub.set_yscale('log') sub.set_ylim([10**-5, 10**-1]) sub.set_xlim([8.0, 12.0]) sub.legend(loc='upper right') fig.savefig( ''.join([ 'figure/' 'analytic_smf_', source, '.png' ]), bbox_inches='tight' ) plt.close()
def plot_avg_Pkfast_test(**kwargs): ''' Quick test to compare average P_l(k) for Ngrid=960 in order to make sure it is consistent with P_l(k) for Ngrid=720. ''' prettyplot() pretty_colors = prettycolors() for i_l, l_i in enumerate([0, 2, 4]): # P(k) fast k_fast_i, plk_fast_i = pk_extrap.average_Pk(l_i, 10, Ngrid=960) k_i, plk_i = pk_extrap.average_Pk(l_i, 10, Ngrid=720) l_i_str = str(l_i) try: l_i_str = str(l_i) except TypeError: l_i_str = l_i # plot P(k) data fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) sub.scatter( k_fast_i, np.abs(plk_fast_i), c=pretty_colors[i_l*2+1] ) sub.plot( k_i, np.abs(plk_i), label=r"$l = "+l_i_str+"$", c='k', lw=2, ls='--' ) if 'xrange' in kwargs.keys(): sub.set_xlim(kwargs['xrange']) file_str = '_xranged' else: sub.set_xlim([1e-3, 1.0]) file_str = '' sub.set_ylabel(r"$\mathtt{|P_l(k)|}$", fontsize=40) sub.set_xlabel(r"$\mathtt{k}$", fontsize=40) sub.set_xscale('log') sub.set_yscale('log') sub.legend(loc='upper right') fig_file = ''.join([ 'qaplot_avgP', str(l_i), 'k_fast_test', file_str, '.png' ]) fig.savefig(fig_file, bbox_inches='tight') plt.close()
def test_fllp_k(k, l, rc=0.43): ''' plot f_l_l' for l = 0, 2 and l' = 0, 2, 4, 6, 8, 10 in order to determine how it behaves Parameter --------- - l : ell value Notes ----- ''' q_arr = np.logspace(-3, 3, num=100) n_mock = 7 krc = k * rc k_fit = 4. k_fixed = 4.34 data_dir = '/mount/riachuelo1/hahn/power/Nseries/Box/' prettyplot() pretty_colors = prettycolors() fig = plt.figure(1, figsize=(14,8)) sub = fig.add_subplot(111) maxmax, minmin = 0., 0. for i_lp, ellp in enumerate(range(20)): lp = 2 * ellp true_pk_file = ''.join([data_dir, 'power3600z_BoxN1.dat']) tr_k = np.loadtxt(true_pk_file, unpack=True, usecols =[0]) fllp = [] for q in q_arr: fllp.append(fourier_corr.f_l_lp(q*rc, krc, l, lp)) int_label = "$ l' = "+str(lp)+ "$" sub.plot(q_arr, np.array(fllp), c=pretty_colors[i_lp+1], lw=4, ls='-', label=int_label) maxmax = np.max([np.max(fllp), maxmax]) minmin = np.min([np.min(fllp), minmin]) sub.set_xscale('log') sub.set_xlabel(r"$\mathtt{q}$", fontsize=25) sub.set_ylabel(r"$\mathtt{f_{l, l'}(q r_c, k r_c)}$", fontsize=25) sub.vlines(tr_k[-1], minmin, maxmax, color='k', linestyles='--', linewidth=2) sub.text(2.*10**-3, 1.01*maxmax, r"k = "+str(round(k,2)), fontsize=20) sub.legend(loc='upper right') fig.savefig(''.join([ 'figure/', 'fllp.l', str(l), '.k', str(round(k,2)), '.png' ]), bbox_inches='tight') plt.close()
def plot_avg_P4k_comp(n_mocks, Ngrid=360, yscale='log', **kwargs): ''' Plot average P_l(k) ''' prettyplot() pretty_colors = prettycolors() # plot P(k) data fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) for i_l, l_i in enumerate([4, '4fast']): k_i, plk_i = pk_extrap.average_Pk(l_i, n_mocks, Ngrid=Ngrid) l_i_str = str(l_i) try: l_i_str = str(l_i) except TypeError: l_i_str = l_i sub.plot( k_i, np.abs(plk_i), label=r"$l = "+l_i_str+"$", c=pretty_colors[i_l*2+1], lw=4 ) if 'xrange' in kwargs.keys(): sub.set_xlim(kwargs['xrange']) else: sub.set_xlim([1e-3, 1.0]) if 'yrange' in kwargs.keys(): sub.set_ylim(kwargs['yrange']) else: pass sub.set_ylabel(r"$\mathtt{|P_4(k)|}$", fontsize=40) sub.set_xlabel(r"$\mathtt{k}$", fontsize=40) sub.set_xscale('log') if yscale == 'log': sub.set_yscale('log') else: pass sub.legend(loc='upper right') fig_file = ''.join([ 'qaplot_avgP4_4fast_k_comparison_', 'Ngrid', str(Ngrid), '.png' ]) fig.savefig(fig_file, bbox_inches='tight')
def delPcorr_0_ktrust_comp(ktrust, ell): ''' Comparison between Del P^corr |q=0 to q=k_trust of Nseries vs Nseries Box. RESULTS: As expected, there is negligible difference between Nseries and Nseries Box Del P^corr |_0^k_trust. ''' # Nseries (Geomtry) geo_corrdelPk_pickle_file = ''.join([ '/mount/riachuelo1/hahn/power/Nseries/', 'corrdelP', str(ell), 'k', '_qmax', str(round(ktrust,2)),'_', 'AVG_POWER_Q_CutskyN.fidcosmo.dat.grid960.P020000.box3600' '.p' ]) geo_k, geo_DelPk = pickle.load(open(geo_corrdelPk_pickle_file, 'rb')) # Nseries Box box_corrdelPk_pickle_file = ''.join([ '/mount/riachuelo1/hahn/power/Nseries/Box/', 'corrdelP', str(ell), 'k', '_qmax', str(round(ktrust,2)),'_', 'AVG_power3600z_BoxN.dat', '.p' ]) box_k, box_DelPk = pickle.load(open(box_corrdelPk_pickle_file, 'rb')) prettyplot() pretty_colors = prettycolors() fig = plt.figure(1) sub = fig.add_subplot(111) sub.plot(geo_k, geo_DelPk, c = 'k', lw = 4, ls='-', label='Geometry') sub.plot(box_k, box_DelPk, c = pretty_colors[2], lw = 4, ls='--', label='Box') sub.set_xscale('log') sub.set_xlim([10**-3,10**0]) sub.set_xlabel(r"$\mathtt{k}\;\;(\mathtt{Mpc}/h)$", fontsize=30) sub.set_ylabel(r"$\mathtt{\Delta P_{"+str(ell)+r"}^{corr}(k)\bigg|_{q = 0}^{q=k_{trust}}}$", fontsize=30) sub.legend(loc='upper left', scatterpoints = 1) fig.savefig( ''.join([ 'figure/', 'qaplot_delP_'+str(ell)+'corr_0_ktrust', str(ktrust), '_geoboxcomp.png' ]), bbox_inches="tight") codif.notif(subject='delPcorr_0_ktrust figure finished') plt.close()
def __init__(self, **kwargs): """ Class that describes dLOS distribution plots """ self.kwargs = kwargs # pretty figure up prettyplot() pretty_colors = prettycolors() self.fig = plt.figure(1) self.sub = self.fig.add_subplot(111) self.hist_max = 0.0
def plot_sfms_wetzel_vs_lee(): ''' Plot Wetzel parameterized SFMS and Lee et al. (2015) parameterized SFMS. ''' # wetzel wetzel_sfr_mstar_z, wetzel_sig_sfr_mstar_z = get_param_sfr_mstar_z() # Lee et al. (2015) z_mid = np.array([0.36, 0.55, 0.70, 0.85, 0.99, 1.19]) logM_lim = np.array([8.50, 9.00, 9.00, 9.30, 9.30, 9.30]) masses = np.arange(7.5, 12.0, 0.1) prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) for i_z, z in enumerate(z_mid): sub.scatter( masses, lee_et_al_2015(masses, z), c = pretty_colors[i_z], label = r"$\mathtt{z = "+str(round(z, 2))+"}$" ) sub.plot( masses, wetzel_sfr_mstar_z(masses, z), c = pretty_colors[i_z], ls = '--', lw = 3 ) #sub.vlines(logM_lim[i_z], -5.0, 2.0, color='k', linestyle='--', lw=2) sub.set_xlim([7.0, 12.0]) sub.set_ylim([-3.0, 2.0]) sub.set_xlabel(r'$\mathtt{log\;M_*}$', fontsize=30) sub.set_ylabel(r'$\mathtt{log\;SFR}$', fontsize=30) sub.legend(loc='lower right', scatterpoints=1) fig_file = ''.join([ 'figure/', 'qaplot_sfms_wetzel_vs_lee.png' ]) fig.savefig(fig_file, bbox_inches='tight') plt.show()
def test_photoz(catalog_name): """ Test that the assigned photometric redshifts in photoz correction method reproduce the CMASS photometric redshifts. """ catdict = {'name': catalog_name, 'n_mock': 20} corrdict = {'name': 'photoz'} cat_corr = {'catalog': catdict, 'correction': corrdict} dataclass = Data('data', cat_corr) dataclass.read() # only consider galaxies with photometric redshifts hasphotoz = np.where( dataclass.photoz > -999. ) deltaz_z = (dataclass.z[hasphotoz] - dataclass.photoz[hasphotoz]) / (1.0 + dataclass.z[hasphotoz]) prettyplot() pretty_colors = prettycolors() plt.figure(figsize=(8,8)) bovy.scatterplot( dataclass.z[hasphotoz], deltaz_z, scatter = True, levels = [0.68, 0.95, 0.997], color = pretty_colors[1], s = 3, xrange = [0.43, 0.7], yrange = [-0.3, 0.3], xlabel = '\mathtt{z_{spec}}', ylabel = r'\mathtt{\frac{|z_{spec} - z_{photo}|}{1 + z_{spec}}}' ) plt.show() # save figure to file fig_dir = '/home/users/hahn/powercode/FiberCollisions/figure/' fig_file = ''.join([ fig_dir, catalog_name, '_deltaz_zspec_zphoto.png' ]) plt.savefig(fig_file, bbox_inches='tight') plt.close() return None
def plot_fllp(l, k_value=0.3, rc=0.4): ''' plot f_l,l' ''' prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) for lp in [0,2,4]: q = np.arange( 0.002, 1.0, 0.005 ) start_time = time.time() fllp_integrated = [tophat.f_l_lp(q_i*rc, k_value*rc, l, lp, first_order=True) for q_i in q] print 'Integrated f_l,lp takes ', time.time() - start_time, ' seconds' start_time = time.time() fllp_estimated = [tophat.f_l_lp_est(q_i*rc, k_value*rc, l, lp) for q_i in q] print 'Estimated f_l,lp takes ', time.time() - start_time, ' seconds' sub.scatter( q, fllp_integrated, c=pretty_colors[lp], s=10, label = r"$\mathtt{l' = "+str(lp)+"}$ First order" ) sub.plot( q, fllp_estimated, c=pretty_colors[lp], lw = 4, ls = '--', label = r"theoretical estimate" ) sub.set_xlim([10**-3,10**0]) sub.set_xscale("log") sub.set_xlabel(r"$\mathtt{q}\;\;(\mathtt{Mpc}/h)$", fontsize=30) sub.set_ylabel(r"$\mathtt{f_{l,l'} (q r_{fc}, "+str(round(k_value,1))+"r_{fc}})$", fontsize=30) sub.legend(loc='upper left', scatterpoints=1) fig_file = ''.join(['qaplot_fl', str(l), 'lp024_k', str(k_value), '.png']) fig.savefig(fig_file, bbox_inches='tight') plt.close()
def plot_corrected_Pk(l, fs=1.0, rc=0.4): ''' ''' if l == 0: l_cols = [0, 1] else: l_cols = [0, 2] # true P_l(k) k, Pk = np.loadtxt( 'POWER_Q_CutskyN1.fidcosmo.dat.grid360.P020000.box3600', unpack = True, usecols = l_cols ) # Upweighted P_l(k) k, Pk_upw= np.loadtxt( 'POWER_Q_CutskyN1.fidcosmo.fibcoll.dat.grid360.P020000.box3600', unpack = True, usecols = l_cols ) corrdelP = pickle.load(open('delP'+str(l)+'k_corr.p', 'rb')) uncorrdelP = tophat.delP_uncorr(k, l, fs=fs, rc=rc) # FC P_l(k) fc_Pk = Pk + uncorrdelP + corrdelP prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) sub.plot(k, fc_Pk, color=pretty_colors[3], label='FC TopHat') sub.plot(k, Pk_upw, color=pretty_colors[5], label='Upweighted (data)') sub.plot(k, Pk, color=pretty_colors[1], label='True (data)') sub.set_xlim([10**-3,10**0]) sub.set_xscale("log") sub.set_xlabel(r"$\mathtt{k}\;\;(\mathtt{Mpc}/h)$", fontsize=30) sub.set_yscale('log') sub.set_ylabel(r"$\mathtt{P_{"+str(l)+"}(k)}$", fontsize=30) sub.legend(loc='upper left') fig.savefig('qaplot_P_'+str(l)+'_tophat.png', bbox_inches="tight") plt.close()
def plot_avg_Plk(l, n_mocks, Ngrid=360, yscale='linear', **kwargs): ''' Plot average P_l(k) ''' if not isinstance(l, list): ls = [l] else: ls = l prettyplot() pretty_colors = prettycolors() # plot P(k) data fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) for l_i in ls: k_i, plk_i = pk_extrap.average_Pk(l_i, n_mocks, Ngrid=Ngrid) sub.plot(k_i, plk_i, label=r"$l = "+str(l_i)+"$", c=pretty_colors[l_i+1], lw=4) if 'xrange' in kwargs.keys(): sub.set_xlim(kwargs['xrange']) else: sub.set_xlim([1e-3, 1.0]) if 'yrange' in kwargs.keys(): sub.set_ylim(kwargs['yrange']) else: pass sub.set_ylabel(r"$\mathtt{P_l(k)}$", fontsize=40) sub.set_xlabel(r"$\mathtt{k}$", fontsize=40) sub.set_xscale('log') if yscale == 'log': sub.set_yscale('log') else: pass sub.legend(loc='upper right') fig_file = ''.join([ 'qaplot_avgP', ''.join([str(l_i) for l_i in l]), 'k_', 'Ngrid', str(Ngrid), '.png' ]) fig.savefig(fig_file, bbox_inches='tight')
def plot_sfms_wetzel_vs_lee(): ''' Plot Wetzel parameterized SFMS and Lee et al. (2015) parameterized SFMS. ''' # wetzel wetzel_sfr_mstar_z, wetzel_sig_sfr_mstar_z = get_param_sfr_mstar_z() # Lee et al. (2015) z_mid = np.array([0.36, 0.55, 0.70, 0.85, 0.99, 1.19]) logM_lim = np.array([8.50, 9.00, 9.00, 9.30, 9.30, 9.30]) masses = np.arange(7.5, 12.0, 0.1) prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(10, 10)) sub = fig.add_subplot(111) for i_z, z in enumerate(z_mid): sub.scatter(masses, lee_et_al_2015(masses, z), c=pretty_colors[i_z], label=r"$\mathtt{z = " + str(round(z, 2)) + "}$") sub.plot(masses, wetzel_sfr_mstar_z(masses, z), c=pretty_colors[i_z], ls='--', lw=3) #sub.vlines(logM_lim[i_z], -5.0, 2.0, color='k', linestyle='--', lw=2) sub.set_xlim([7.0, 12.0]) sub.set_ylim([-3.0, 2.0]) sub.set_xlabel(r'$\mathtt{log\;M_*}$', fontsize=30) sub.set_ylabel(r'$\mathtt{log\;SFR}$', fontsize=30) sub.legend(loc='lower right', scatterpoints=1) fig_file = ''.join(['figure/', 'qaplot_sfms_wetzel_vs_lee.png']) fig.savefig(fig_file, bbox_inches='tight') plt.show()
def plot_2pcf(catalog): ''' ''' prettyplot() pretty_colors = prettycolors() data_dir = '/mount/riachuelo1/hahn/data/Nseries/' if catalog == 'true': pickle_name = ''.join([ data_dir, '2pcf_CutskyN1.fidcosmo.p' ]) elif catalog == 'fced': pickle_name = ''.join([ data_dir, '2pcf_CutskyN1.fidcosmo.fced.p' ]) elif catalog == 'nocoll': pickle_name = ''.join([ data_dir, '2pcf_CutskyN1.fidcosmo.nocoll.p' ]) pickle_data = pickle.load(open(pickle_name, "rb")) rp_bins = pickle_data[0] pi_bins = pickle_data[1] twopcf = pickle_data[2] #print twopcf fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) r_p, pi = np.meshgrid(rp_bins, pi_bins) sub.pcolormesh(r_p, pi, twopcf.T, cmap=plt.cm.afmhot) sub.set_xscale('log') sub.set_xlabel('$\mathtt{r_{p}}$', fontsize=40) sub.set_ylabel('$\pi$', fontsize=40) sub.set_xlim([0.0, 20.0]) sub.set_ylim([0.0, 20.0]) sub.set_title('N-series '+catalog.upper() + ' Catalog') plt.show() """
def plot_avg_Plk_negative(l, n_mocks, Ngrid=360): ''' Plot the negative portion of the average P_l(k). The y-axis is linear not logarithmic ''' if not isinstance(l, list): ls = [l] else: ls = l prettyplot() pretty_colors = prettycolors() # plot P(k) data fig = plt.figure(figsize=(10,10)) sub = fig.add_subplot(111) for l_i in ls: k_i, plk_i = pk_extrap.average_Pk(l_i, n_mocks, Ngrid=Ngrid) negative = np.where(plk_i < 0.) if l_i < 4: sub.plot( k_i[negative], plk_i[negative], label=r"$P_"+str(l_i)+"(k)$", c=pretty_colors[l_i+1], lw=4) else: sub.scatter( k_i[negative], plk_i[negative], label=r"$P_"+str(l_i)+"(k)$", c=pretty_colors[l_i+1]) sub.set_xlim([1e-3, 1.0]) sub.set_ylabel(r"$\mathtt{P_l(k)}$", fontsize=40) sub.set_xlabel(r"$\mathtt{k}$", fontsize=40) sub.set_xscale('log') sub.legend(loc='upper right') plt.show()
def test_pk_extrap_scatter(ell, n_mocks, Ngrid=960, k_fit=0.25, k_fixed=0.6, **kwargs): ''' test the scatter in pk extrapolation ''' prettyplot() pretty_colors = prettycolors() fig = plt.figure() sub = fig.add_subplot(111) alphas, ns = [], [] for i_mock in range(1, n_mocks+1): # default cat-corr for Nseries cat_corr = { 'catalog': {'name': 'nseriesbox', 'n_mock': i_mock}, 'correction': {'name': 'true'} } spec_i = CorrSpec('pk', cat_corr, ell=ell, Ngrid=Ngrid) print spec_i.file_name spec_i.read() spec_i_spec = getattr(spec_i, 'p'+str(ell)+'k') alpha_i, n_i = pk_extrap.pk_powerlaw_bestfit(spec_i.k, spec_i_spec, k_fit=k_fit, k_fixed=k_fixed) if i_mock == 1: pk_label = 'Data' pk_extrap_label = 'Extrap.' else: pk_label = None pk_extrap_label = None sub.plot(spec_i.k, np.abs(spec_i_spec), lw=2, c=pretty_colors[0], label=pk_label) sub.plot( np.arange(k_fit, 10, 0.01), np.abs(pk_extrap.pk_powerlaw(np.arange(k_fit, 10, 0.01), [alpha_i, n_i], k_fixed=k_fixed)), ls='--', lw=2, c=pretty_colors[1], label = pk_extrap_label ) alphas.append(alpha_i) ns.append(n_i) alphas = np.array(alphas) ns = np.array(ns) # x-axis sub.set_xlabel(r'$\mathtt{k}$', fontsize=30) sub.set_xscale('log') sub.set_xlim([0.1, 10.]) # y-axis sub.set_ylabel(r'$\mathtt{P_'+str(ell)+'(k)}$', fontsize=30) sub.set_yscale('log') sub.set_ylim([10**0, 10**5]) sum_stat = ''.join([ r"$\bar{\alpha}, \sigma_{\alpha} = ", str(round(np.mean(alphas), 1)), ",\;", str(round(np.std(alphas),1)), "$", '\n', r"$\bar{n}, \sigma_{n} = ", str(round(np.mean(ns),2)), ",\;", str(round(np.std(ns),2)), "$" ]) sub.text(1.0, 10**4.25, sum_stat, fontsize=20) sub.legend(loc='lower right') plt.show()
def cmass_zspec_zphoto(): """ Compare matching spectroscopic redshifts and photometric redshifts of the CMASS catalog. """ prettyplot() pretty_colors = prettycolors() # matching spectroscopic and photometric redshifts z_spec, z_photo = match_zspec_zphoto_cmass() # scatter plot of z_spec versus z_photo bovy.scatterplot( z_spec, z_photo, scatter=True, color=pretty_colors[1], s=3, xrange=[0.0, 1.0], yrange=[0.0, 1.0], xlabel='\mathtt{z_{spec}}', ylabel='\mathtt{z_{photo}}' ) plt.plot(np.arange(0.0, 2.0, 0.1), np.arange(0.0, 2.0, 0.1), c='k', lw=4) # y = x fig_dir = '/home/users/hahn/powercode/FiberCollisions/figure/' fig_file = ''.join([ fig_dir, 'cmass_zspec_zphoto.png' ]) plt.savefig( fig_file, bbox_inches='tight' ) plt.clf() # scatter plot of delta z/(1+z) vs z delta_z = (z_spec - z_photo) / (1.0 + z_spec) bovy.scatterplot( z_spec, delta_z, scatter=True, levels=[0.68, 0.95, 0.997], color=pretty_colors[1], s=3, xrange=[0.43, 0.7], yrange=[-0.3, 0.3], xlabel='\mathtt{z_{spec}}', ylabel=r'\mathtt{\frac{|z_{spec} - z_{photo}|}{1\;+\;z_{spec}}}' ) fig_file = ''.join([ fig_dir, 'cmass_delta_zphoto_zspec.png' ]) plt.savefig( fig_file, bbox_inches='tight' ) return None
def qPqfllp_comp(k, ell, rc=0.43): ''' Compare \Sum_l' q P(q) f_l,l'(q*rc, k*rc) of Nseries vs Nseries Mock ''' q_arr = np.logspace(-3, 3, num=100) krc = k * rc prettyplot() pretty_colors = prettycolors() fig = plt.figure(1, figsize=(14,8)) sub = fig.add_subplot(111) maxmax, minmin = 0., 0. for mock in ['nseries', 'nseriesbox']: qPqfllp = np.zeros(len(q_arr)) if mock == 'nseries': # Nseries n_mock = 20 data_dir = '/mount/riachuelo1/hahn/power/Nseries/' filename = lambda s: ''.join([data_dir, 'POWER_Q_CutskyN', str(s), '.fidcosmo.dat.grid960.P020000.box3600']) ell_range = 3 i_col = 1 lstyle = '-.' else: # Nseries box n_mock = 7 data_dir = '/mount/riachuelo1/hahn/power/Nseries/Box/' filename = lambda s: ''.join([data_dir, 'power3600z_BoxN', str(s), '.dat']) ell_range = 6 i_col = 3 lstyle = '--' for i_lp, ellp in enumerate(range(ell_range)): lp = 2 * ellp for i_mock in xrange(1, n_mock+1): true_pk_file = filename(i_mock) if lp == 0: if mock == 'nseriesbox': l_index = -1 else: l_index = 1 else: l_index = 1 + int(lp/2) tr_k, tr_pk_i = np.loadtxt( true_pk_file, unpack = True, usecols =[0,l_index] ) if i_mock == 1: tr_pk = tr_pk_i else: tr_pk += tr_pk_i if mock == 'nseriesbox': tr_specs = (2.0*np.pi)**3 * tr_pk / np.float(n_mock) else: tr_specs = tr_pk/ np.float(n_mock) # interpolation function Pk_interp = interp1d(tr_k, tr_specs, kind='cubic') qPqfllp_lp = np.zeros(len(q_arr)) for i_q, q in enumerate(q_arr): Pq = pq_noextrap(q, Pk_interp, k_min=tr_k[0], k_max=tr_k[-1]) fllp = fourier_corr.f_l_lp(q*rc, krc, ell, lp) qPqfllp_lp[i_q] = q * Pq * fllp if mock == 'nseries': sub.plot(q_arr, qPqfllp_lp, c=pretty_colors[i_lp], lw=2, ls=lstyle) else: sub.plot(q_arr, qPqfllp_lp, c=pretty_colors[i_lp], lw=2, ls=lstyle, label="l'="+str(lp)) qPqfllp += qPqfllp_lp sub.plot(q_arr, qPqfllp, c=pretty_colors[i_col], lw=4, ls='-', label=mock) sub.vlines(tr_k[-1], sub.axis()[2], sub.axis()[3], color='k', linestyles='--', linewidth=2) sub.set_xlim([10.**-3, 10.**2]) sub.set_xscale('log') sub.set_xlabel(r"$\mathtt{q}$", fontsize=25) sub.set_ylabel(r"$\mathtt{\sum\limits_{l'} q P(q) f_{l, l'}(q r_c, k r_c)}$", fontsize=25) sub.text(1.5*10**-3, 0.5*(sub.axis()[2]+sub.axis()[3]), r"k = "+str(round(k,2)), fontsize=20) sub.legend(loc='upper right') fig.savefig(''.join([ 'figure/', 'SUMqPqf', str(ell), 'lp.nseries_nseriesbox.comparison.k', str(round(k,2)), '.png' ]), bbox_inches='tight') plt.close()
def qaplot_sfms_envcount_fitting(Mrcut=18, sfq_test=True): """ Test functions of the SF-MS module that fits the group catalog SFMS If sfq_test is specified, then SFR(M*,z) cutoff for SF/Q classification is plotted on top of the SF-MS plots """ #prettyplot() pretty_colors = prettycolors() # read SF galaxies from the envcount catalog file_dir = 'dat/wetzel_tree/envcount/' sdss_envcount_file = ''.join([file_dir, 'envcount_cylr2.5h35_thresh75_sdss_active_z0.05_0.12_primuszerr.fits']) sdss_sf_data = mrdfits(sdss_envcount_file) primus_envcount_file = ''.join([file_dir, 'envcount_cylr2.5h35_thresh75_active_z0.2_1.0_lit.fits']) primus_sf_data = mrdfits(primus_envcount_file) for i_z, z_mid in enumerate([0.1, 0.3, 0.5, 0.7, 0.9]): if z_mid < 0.2: sf_data = sdss_sf_data # impose isolation criteria, mass completeness limit (should I ?) and edge cuts centrals = np.where( (sf_data.envcount == 0.0) & (sf_data.mass > sf_data.masslimit) & (sf_data.edgecut == 1) ) else: sf_data = primus_sf_data # impose isolation criteria, mass completeness limit (should I ?) and edge cuts centrals = np.where( (sf_data.redshift >= z_mid - 0.1) & (sf_data.redshift < z_mid + 0.1) & (sf_data.envcount == 0.0) & (sf_data.mass > sf_data.masslimit) & (sf_data.edgecut == 1) ) print sf_data.weight[centrals] bovy.scatterplot( sf_data.mass[centrals], sf_data.sfr[centrals], scatter = True, levels = [0.68, 0.95, 0.997], weights = sf_data.weight[centrals], s = 3, xrange = [9.5, 12.0], yrange = [-1.5, 1.5], xlabel = 'log \; M_{*}', ylabel = 'log \; SFR' ) #color = pretty_colors[1], # SDSS group catalog best fit gc_zmid, gc_slope, gc_yint = get_bestfit_sfms_groupcat(Mrcut=Mrcut, clobber=True) mass_bin = np.arange(9.0, 12.0, 0.25) # mass bin plt.plot( mass_bin, gc_slope * (mass_bin-10.5) + gc_yint, c='k', lw=6, ls='--' ) # Envcount catalog best fit ec_zmid, ec_slope, ec_yint = get_bestfit_sfms_envcount(clobber=True) plt.plot( mass_bin, ec_slope[i_z] * (mass_bin-10.5) + ec_yint[i_z], c = pretty_colors[i_z+3], lw = 4, label = str(ec_zmid[i_z]) ) avg_sfrs, sig_sfrs, ngals = get_sfr_mstar_z_envcount( mass_bin, [z_mid for i in xrange(len(mass_bin))] ) enough_gal = np.where(np.array(avg_sfrs) > -998) plt.errorbar( mass_bin[enough_gal], np.array(avg_sfrs)[enough_gal], yerr = np.array(sig_sfrs)[enough_gal], lw = 4, c = pretty_colors[1], label='Average SFR' ) plt.legend(loc='lower right') if sfq_test: plt.plot(mass_bin, sfr_cut(mass_bin, z_mid), c='k', lw=4, ls='--') fig_name = ''.join(['figure/', 'qaplot_sfms_fitting_envcount_z', str(z_mid), '.png']) plt.savefig(fig_name, bbox_inches='tight') plt.close() return None
def plot_cute2pcf(n_mocks, n_rp, n_pi, corrections=['true', 'upweighted', 'collrm'], scale='large', **kwargs): ''' Plot xi(r_p, pi) from CUTE 2PCF code ''' prettyplot() pretty_colors = prettycolors() if scale == 'large': contour_range = np.arange(-3.0, 2.5, 0.5) elif scale == 'small': contour_range = np.arange(-0.6, 3.0, 0.15) elif file_flag == 'smaller': contour_range = np.arange(-0.4, 3.2, 0.2) for corr in corrections: # for each correction fig = plt.figure(figsize=(15,10)) sub = fig.add_subplot(111) for i_mock in n_mocks: # for each mocks corr_file = ''.join([ '/mount/riachuelo1/hahn/2pcf/corr/', 'corr_2pcf_CutskyN', str(i_mock), '.', corr, '.cute2pcf.', scale, 'scale.dat' ]) print corr_file tpcf = np.loadtxt(corr_file, unpack=True) if i_mock == n_mocks[0]: rp_bins = tpcf[0].reshape(n_rp, n_pi)[0] pi_bins = tpcf[1].reshape(n_rp, n_pi)[:,0] r_p, pi = np.meshgrid(rp_bins, pi_bins) twop_corr = tpcf[2].reshape(n_rp, n_pi) else: twop_corr += tpcf[2].reshape(n_rp, n_pi) twop_corr /= np.float(len(n_mocks)) # average 2PCF # contour of log(xi(r_p, pi)) cont = sub.contourf(r_p, pi, np.log10(twop_corr.T), contour_range, cmap=plt.cm.afmhot) #sub.contour(r_p, pi, np.log10(twop_corr.T), contour_range, linewidths=4, cmap=plt.cm.afmhot) plt.colorbar(cont) sub.vlines(0.4, 0.0, np.max(r_p), lw=4, linestyle='--', color='red') sub.set_xlabel('$\mathtt{r_{p}}$', fontsize=40) sub.set_ylabel('$\pi$', fontsize=40) sub.set_xlim([np.min(rp_bins), np.max(rp_bins)]) sub.set_ylim([np.min(pi_bins), np.max(pi_bins)]) sub.set_title(r'$\mathtt{'+corr.upper()+r"}\;log\xi(r_{||}, r_\perp)$", fontsize=40) fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', '2pcf_Nseries_', corr, '_', str(len(n_mocks)), 'mocks.', scale, '.png' ]) fig.savefig(fig_name, bbox_inches="tight") plt.close()
def plot_lpsum_qPqfllp(ell, rc=0.43): ''' Compare \Sum_l' q P(q) f_l,l'(q*rc, k*rc) of Nseries vs Nseries Mock ''' q_arr = np.logspace(-3, 3, num=100) prettyplot() pretty_colors = prettycolors() fig = plt.figure(1, figsize=(10,7)) sub = fig.add_subplot(111) for i_k, k in enumerate([0.01, 0.05, 0.1, 0.3, 0.5, 0.8]): print k krc = k * rc for mock in ['nseries', 'nseriesbox']: if mock == 'nseries': # Nseries n_mock = 20 filename = lambda s: ''.join([ '/mount/riachuelo1/hahn/power/Nseries/', 'POWER_Q_CutskyN', str(s), '.fidcosmo.dat.grid960.P020000.box3600']) ell_range = 3 lstyle = '--' else: # Nseries box n_mock = 7 filename = lambda s: ''.join([ '/mount/riachuelo1/hahn/power/Nseries/Box/', 'power3600z_BoxN', str(s), '.dat']) ell_range = 6 lstyle = '-' qPqfllp = np.zeros(len(q_arr)) for i_lp, ellp in enumerate(range(ell_range)): lp = 2 * ellp for i_mock in xrange(1, n_mock+1): true_pk_file = filename(i_mock) if lp == 0: if mock == 'nseriesbox': l_index = -1 else: l_index = 1 else: l_index = 1 + int(lp/2) tr_k, tr_pk_i = np.loadtxt(true_pk_file, unpack = True, usecols =[0,l_index]) if i_mock == 1: tr_pk = tr_pk_i else: tr_pk += tr_pk_i if mock == 'nseriesbox': tr_specs = (2.0*np.pi)**3 * tr_pk / np.float(n_mock) else: tr_specs = tr_pk/ np.float(n_mock) # interpolation function Pk_interp = interp1d(tr_k, tr_specs, kind='cubic') qPqfllp_lp = np.zeros(len(q_arr)) for i_q, q in enumerate(q_arr): Pq = pq_noextrap(q, Pk_interp, k_min=tr_k[0], k_max=tr_k[-1]) fllp = fourier_corr.f_l_lp(q*rc, krc, ell, lp) qPqfllp_lp[i_q] = q * Pq * fllp qPqfllp += qPqfllp_lp label_str = None if mock == 'nseriesbox': if i_k == 0: label_str = mock+ '\n k = '+str(round(k, 2)) else: label_str = 'k = '+str(round(k, 2)) else: if i_k == 0: label_str = mock sub.plot(q_arr, qPqfllp, c=pretty_colors[i_k], lw=2, ls=lstyle, label=label_str) sub.vlines(tr_k[-1], sub.axis()[2], sub.axis()[3], color='k', linestyles='--', linewidth=2) # x axis sub.set_xlim([10.**-3, 10.**2]) sub.set_xscale('log') sub.set_xlabel(r"$\mathtt{q}$", fontsize=26) # y axis if ell == 0: sub.set_ylim([0.0, 2000.0]) elif ell == 2: sub.set_ylim([-2000.0, 1000.0]) sub.set_ylabel(r"$\mathtt{\sum\limits_{l'} q P(q) f_{l, l'}(q r_c, k r_c)}$", fontsize=25) sub.legend(loc='upper right') fig.savefig(''.join(['figure/', 'SUMqPqf', str(ell), 'lp.nseries_nseriesbox..png']), bbox_inches='tight', dpi=150) plt.close()
def plot_bao_cmass_cute2pcf(n_rp, n_pi, xiform='asinh', cmap='hot', **kwargs): ''' Plot xi(r_p, pi) from CUTE 2PCF code specifically for CMASS ''' prettyplot() mpl.rcParams['font.size']=24 pretty_colors = prettycolors() contour_range = 10 fig = plt.figure(figsize=(14,10)) sub = fig.add_subplot(111) corr_file = ''.join([ '/mount/riachuelo1/hahn/2pcf/corr/', 'corr_2pcf_cmass-dr12v4-N-Reid.cute2pcf.BAOscale.dat' ]) print corr_file tpcf = np.loadtxt(corr_file, unpack=True) rp_bins = tpcf[0].reshape(n_rp, n_pi)[0] pi_bins = tpcf[1].reshape(n_rp, n_pi)[:,0] twop_corr = tpcf[2].reshape(n_rp, n_pi) # annoying stuff to reflect the quadrants quad_rp_bins = np.hstack([-1.0 * rp_bins[::-1], rp_bins]) quad_pi_bins = np.hstack([-1.0 * pi_bins[::-1], pi_bins]) quad_tpcf_0, quad_tpcf_1, quad_tpcf_2 = [], [], [] for i_pi, pibin in enumerate(quad_pi_bins): for i_rp, rpbin in enumerate(quad_rp_bins): quad_tpcf_0.append(rpbin) quad_tpcf_1.append(pibin) if i_pi < n_pi: ipi = n_pi - i_pi - 1 else: ipi = i_pi % n_pi if i_rp < n_rp: irp = n_rp - i_rp - 1 else: irp = i_rp % n_rp quad_tpcf_2.append(twop_corr[irp, ipi]) #if (np.abs(rpbin) < 10.) and (np.abs(pibin) >95.): # print twop_corr[irp, ipi] quad_tpcf_0 = np.array(quad_tpcf_0) quad_tpcf_1 = np.array(quad_tpcf_1) quad_tpcf_2 = np.array(quad_tpcf_2) quad_rp_bins = quad_tpcf_0.reshape(2*n_rp, 2*n_pi)[0] quad_pi_bins = quad_tpcf_1.reshape(2*n_rp, 2*n_pi)[:,0] quad_twop_corr = quad_tpcf_2.reshape(2*n_rp, 2*n_pi) quad_rp, quad_pi = np.meshgrid(quad_rp_bins, quad_pi_bins) if cmap == 'hot': colormap = plt.cm.afmhot else: colormap = plt.cm.Paired # contour of log(xi(r_p, pi)) if xiform == 'log': print np.min(quad_twop_corr), np.max(quad_twop_corr) norm = mpl.colors.SymLogNorm(0.005, vmin=-0.02, vmax=4.0, clip=True) cont = sub.pcolormesh(quad_rp, quad_pi, quad_twop_corr, norm=norm, cmap=colormap) ticker = mpl.ticker.FixedLocator([-0.01, 0.0, 0.5, 0.1, 1.0, 3.0]) elif xiform == 'asinh': contour_range = np.arange(-0.025, 0.105, 0.005) #cont = sub.contourf(quad_rp, quad_pi, np.arcsinh(10. * quad_twop_corr), contour_range, cmap=plt.cm.afmhot) norm = mpl.colors.Normalize(-0.1, 0.1, clip=True) cont = sub.pcolormesh(quad_rp, quad_pi, np.arcsinh(10. * quad_twop_corr), norm=norm, cmap=colormap) #cont.set_interpolation('none') elif xiform == 'none': contour_range = np.arange(-0.5, 5.0, 0.05) cont = sub.contourf(quad_rp, quad_pi, quad_twop_corr, contour_range, cmap=colormap) #cont = sub.contourf(r_p, pi, twop_corr.T, contour_range, cmap=plt.cm.afmhot) else: raise ValueError if xiform == 'log': plt.colorbar(cont, ticks=ticker) else: plt.colorbar(cont) sub.set_xlabel('$\mathtt{r_{\perp}} \; (\mathtt{Mpc}/h)$', fontsize=50) sub.set_ylabel('$\mathtt{r_{||}} \; (\mathtt{Mpc}/h)$', fontsize=50) sub.set_xlim([np.min(quad_rp_bins), np.max(quad_rp_bins)]) sub.set_ylim([np.min(quad_pi_bins), np.max(quad_pi_bins)]) if cmap == 'hot': colormap_str = 'hot' else: colormap_str = 'jet' if xiform == 'log': fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', 'log2pcf_cmass-dr12v4-N-Reid.BAO.', colormap_str ,'.png' ]) elif xiform == 'asinh': sub.set_title(r'$\mathtt{'+corr.upper()+r"}\;arcsinh\:10\times\xi(r_{||}, r_\perp)$", fontsize=40) fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', 'arcsinh2pcf_cmass-dr12v4-N-Reid.BAO.', colormap_str ,'.png' ]) elif xiform == 'none': sub.set_title(r'$\mathtt{'+corr.upper()+r"}\;\xi(r_{||}, r_\perp)$", fontsize=40) fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', '2pcf_cmass-dr12v4-N-Reid.BAO.', colormap_str ,'.png' ]) plt.gca().set_aspect('equal', adjustable='box') fig.savefig(fig_name, bbox_inches="tight") plt.close()
def test_qPqfllp(l, lp, rc=0.43): ''' Test the polynomial estimates of the f_l_lp integrals by comparing q P(q) f_l,l'(q, k) with the integrals evaluated versus polynomial estimates. Parameter --------- - l : - lp : Notes ----- - Comparison assumes that there is No extrapolation ''' q_arr = np.logspace(-3, 3, num=100) n_mock = 7 k_fit = 4. k_fixed = 4.34 data_dir = '/mount/riachuelo1/hahn/power/Nseries/Box/' for i_mock in xrange(1, 8): true_pk_file = ''.join([data_dir, 'power3600z_BoxN', str(i_mock), '.dat']) if lp == 0: l_index = -1 else: l_index = 1 + int(lp/2) tr_k, tr_pk_i = np.loadtxt( true_pk_file, unpack = True, usecols =[0,l_index] ) if i_mock == 1: tr_pk = tr_pk_i else: tr_pk += tr_pk_i tr_pk /= 7. tr_specs = (2.0*np.pi)**3 * tr_pk # interpolation function Pk_interp = interp1d(tr_k, tr_pk, kind='cubic') prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(14,8)) sub = fig.add_subplot(111) maxmax, minmin = 0., 0. for ik, k in enumerate([0.01, 0.05, 0.1, 0.5]): krc = k * rc qPqfllp = [] qPqfllp_est = [] for q in q_arr: Pq = pq_noextrap(q, Pk_interp, k_min=tr_k[0], k_max=tr_k[-1]) fllp = fourier_corr.f_l_lp(q*rc, krc, l, lp) fllp_est = fourier_corr.f_l_lp_est(q*rc, krc, l, lp) qPqfllp.append(q * Pq * fllp) qPqfllp_est.append(q * Pq * fllp_est) int_label = '$ k = '+str(round(k, 2))+ '$' est_label = '$ k = '+str(round(k, 2))+ '$ estimate' sub.plot(q_arr, np.array(qPqfllp), c=pretty_colors[ik+1], lw=4, ls='-', label=int_label) sub.plot(q_arr, np.array(qPqfllp_est), c='k', lw=2, ls='--', label=est_label) maxmax = np.max([np.max(qPqfllp), maxmax]) minmin = np.min([np.min(qPqfllp), minmin]) sub.set_xscale('log') sub.set_xlabel(r"$\mathtt{q}$", fontsize=25) sub.set_ylabel(r"$\mathtt{q P(q) f_{l, l'}(q r_c, k r_c)}$", fontsize=25) if l == 0: sub.vlines(tr_k[-1], -20., 100., color='k', linestyles='--', linewidth=2) sub.set_ylim([-20, 20]) elif l == 2: sub.vlines(tr_k[-1], -25., 5., color='k', linestyles='--', linewidth=2) sub.set_ylim([-5, 5]) sub.text(2.*10**-3, 1.01*maxmax, r"l = "+str(l)+", l' = "+str(lp), fontsize=20) sub.legend(loc='upper right') #plt.show() fig.savefig(''.join([ 'figure/', 'qPqfllp.l', str(l), '.lp', str(lp), '.noextrap.png' ]), bbox_inches='tight') plt.close()
def plot_cute2pcf_residual(n_mocks, n_rp, n_pi, corrections=['true', 'upweighted', 'collrm'], scale='large', **kwargs): ''' Plot xi(r_p, pi) from CUTE 2PCF code ''' prettyplot() pretty_colors = prettycolors() if scale == 'large': contour_range = np.arange(-0.05, 0.05, 0.005) elif scale == 'small': contour_range = np.arange(-0.1, 0.11, 0.01) elif scale == 'smaller': contour_range = np.arange(-0.5, 0.5, 0.05) elif scale == 'verysmall': contour_range = 20 for corr in corrections: # for each correction for i_mock in n_mocks: # for each mocks corr_file = ''.join([ '/mount/riachuelo1/hahn/2pcf/corr/', 'corr_2pcf_CutskyN', str(i_mock), '.', corr, '.cute2pcf.', scale, 'scale.dat' ]) print corr_file tpcf = np.loadtxt(corr_file, unpack=True) if i_mock == n_mocks[0]: rp_bins = tpcf[0].reshape(n_rp, n_pi)[0] pi_bins = tpcf[1].reshape(n_rp, n_pi)[:,0] r_p, pi = np.meshgrid(rp_bins, pi_bins) twop_corr = tpcf[2].reshape(n_rp, n_pi) else: twop_corr += tpcf[2].reshape(n_rp, n_pi) twop_corr /= np.float(len(n_mocks)) # average 2PCF if corr == 'true': true_corr = twop_corr.T continue print (twop_corr.T).shape fig = plt.figure(figsize=(15,10)) sub = fig.add_subplot(111) # contour of 1 - (1 + xi^fc)/(1+xi^true) residual_2pcf = 1.0 - (1.0 + twop_corr.T)/(1.0 + true_corr) print np.max(residual_2pcf) cont = sub.contourf( r_p, pi, residual_2pcf, contour_range, cmap=plt.cm.afmhot ) plt.colorbar(cont) sub.vlines(0.4, 0.0, np.max(r_p), lw=4, linestyle='--', color='red') sub.set_xlabel('$\mathtt{r_{p}}$', fontsize=40) sub.set_ylabel('$\pi$', fontsize=40) sub.set_xlim([np.min(rp_bins), np.max(rp_bins)]) sub.set_ylim([np.min(pi_bins), np.max(pi_bins)]) sub.set_title(r"$1 - (1 + \xi^\mathtt{"+corr.upper()+r"})/(1+ \xi^\mathtt{TRUE})$", fontsize=40) fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', '2pcf_Nseries_', corr, '_', str(len(n_mocks)), 'mocks.', scale, '.tophat_comparison.png' ]) fig.savefig(fig_name, bbox_inches="tight") plt.close()
def test_quenching_fraction(tau_prop={'name': 'instant'}, n_snap0=13, **kwargs): ''' Plot evolution of the quenching population in the CenQue SSFR distribution Parameters ---------- Mrcut : Absolute magnitude cut that specifies the group catalog nsnaps : List of snapshot #s to plot ''' if tau_prop['name'] in ('instant', 'constant', 'satellite', 'long'): tau_str = ''.join(['_', tau_prop['name'], 'tau']) elif tau_prop['name'] in ('line'): tau_str = ''.join([ '_', tau_prop['name'], 'tau', '_Mfid', str(tau_prop['fid_mass']), '_slope', str(round(tau_prop['slope'], 4)), '_yint', str(round(tau_prop['yint'], 4)) ]) prettyplot() pretty_color = prettycolors() fig1 = plt.figure(1, figsize=(15, 7)) sub1 = fig1.add_subplot(111) fig2 = plt.figure(2, figsize=(12, 7)) sub2 = fig2.add_subplot(111) # Overplot CenQue of specified Snapshots snaps = [] for i_nsnap in reversed(xrange(1, n_snap0)): fqing_file = ''.join([ '/data1/hahn/f_quenching/', 'quenching_fraction', tau_str, '_nsnap', str(i_nsnap), '.dat' ]) mass_bin, fqing = np.loadtxt(fqing_file, skiprows=1, unpack=True, usecols=[0, 1]) #print i_nsnap, 'slope = ', (fqing[-1] - fqing[0])/(mass_bin[-1] - mass_bin[0]) #print i_nsnap, 'slope = ', (fqing[-2] - fqing[0])/(mass_bin[-2] - mass_bin[0]) #print i_nsnap, 'slope = ', (fqing[-3] - fqing[0])/(mass_bin[-3] - mass_bin[0]) #print ((fqing[-1] - fqing[0])/(mass_bin[-1] - mass_bin[0]) + (fqing[-2] - fqing[0])/(mass_bin[-2] - mass_bin[0]) + (fqing[-3] - fqing[0])/(mass_bin[-3] - mass_bin[0]))/3.0 sub1.plot(mass_bin, fqing, c=pretty_color[i_nsnap + 1], lw='4', label='Snapshot' + str(i_nsnap)) #sub1.plot(mass_bin, (0.03 * (np.array(mass_bin) - 9.5))*(1.8 - get_z_nsnap(i_nsnap))**5.0, ls='--', lw='3', c=pretty_color[i_nsnap+1]) if i_nsnap == n_snap0 - 1: fqing_m = [] for i_m, mass in enumerate(mass_bin): fqing_m.append([]) for i_m, mass in enumerate(mass_bin): fqing_m[i_m].append(fqing[i_m]) snaps.append(get_z_nsnap(i_nsnap)) #print fqing_m for i_m, mass in enumerate(mass_bin): #print mass, np.around(fqing_m[i_m],4) #print 'slope = ', (fqing_m[i_m][-1] - fqing_m[i_m][0])/(snaps[0] - snaps[-1]) #print 'slope = ', (fqing_m[i_m][-2] - fqing_m[i_m][0])/(snaps[0] - snaps[-2]) #print 'slope = ', (np.log10(fqing_m[i_m][-1]) - np.log10(fqing_m[i_m][1]))/(snaps[1] - snaps[-1]) fqing_massbin = np.array(fqing_m[i_m]) fqing_massbin[np.where(fqing_massbin == 0.)] = 10.**-10 #print snaps, fqing_massbin sub2.scatter(snaps, fqing_massbin, c=pretty_color[i_m + 1]) sub2.plot(snaps, fqing_massbin, lw='3', c=pretty_color[i_m + 1], label=r'$\mathtt{M_* =\;}$' + str(mass)) #sub2.plot(snaps, (0.03 * (mass - 9.5)) * (1.8 - np.array(snaps))**2.0, ls='--', lw='3', c=pretty_color[i_m+1]) del snaps del fqing_m sub1.legend(loc='lower right') sub1.set_yscale('log') sub1.set_ylim([0.0, 1.0]) sub1.set_xlim([9.5, 14.0]) sub1.set_xlabel('Stellar Mass ($M_*$)') sub1.set_ylabel('Predicted Quenching Fraction') sub2.legend(loc='lower right') sub2.set_yscale('log') sub2.set_xlim([0.9, -0.3]) sub2.set_ylim([0.0001, 1.0]) sub2.set_xlabel('Redshift ($\mathtt{z}$)') sub2.set_ylabel('Predicted Quenching Fraction') fig1.savefig(''.join([ '/home/users/hahn/research/pro/tinker/central_quenching/figure/', 'f_quenching', tau_str, '.png' ]), bbox_inches='tight') fig2.savefig(''.join([ '/home/users/hahn/research/pro/tinker/central_quenching/figure/', 'f_quenching_evol', tau_str, '.png' ]), bbox_inches='tight') fig1.clear() fig2.clear() plt.close()
def plot_bao_cute2pcf(n_mocks, n_rp, n_pi, corrections=['true'], f_down=0.2, xiform='asinh', cmap='hot', **kwargs): ''' Plot xi(r_p, pi) from CUTE 2PCF code ''' prettyplot() mpl.rcParams['font.size']=24 pretty_colors = prettycolors() contour_range = 10 for corr in corrections: # for each correction fig = plt.figure(figsize=(14,10)) sub = fig.add_subplot(111) for i_mock in n_mocks: # for each mocks corr_file = ''.join([ '/mount/riachuelo1/hahn/2pcf/corr/', 'corr_2pcf_CutskyN', str(i_mock), '.', corr, '.cute2pcf.', str(round(f_down,2)), 'down.BAOscale.dat' ]) print corr_file tpcf = np.loadtxt(corr_file, unpack=True) if i_mock == n_mocks[0]: rp_bins = tpcf[0].reshape(n_rp, n_pi)[0] pi_bins = tpcf[1].reshape(n_rp, n_pi)[:,0] twop_corr = tpcf[2].reshape(n_rp, n_pi) else: twop_corr += tpcf[2].reshape(n_rp, n_pi) twop_corr /= np.float(len(n_mocks)) # average 2PCF quad_rp_bins = np.hstack([-1.0 * rp_bins[::-1], rp_bins]) quad_pi_bins = np.hstack([-1.0 * pi_bins[::-1], pi_bins]) quad_tpcf_0, quad_tpcf_1, quad_tpcf_2 = [], [], [] for i_pi, pibin in enumerate(quad_pi_bins): for i_rp, rpbin in enumerate(quad_rp_bins): quad_tpcf_0.append(rpbin) quad_tpcf_1.append(pibin) if i_pi < n_pi: ipi = n_pi - i_pi - 1 else: ipi = i_pi % n_pi if i_rp < n_rp: irp = n_rp - i_rp - 1 else: irp = i_rp % n_rp quad_tpcf_2.append(twop_corr[irp, ipi]) #if (np.abs(rpbin) < 10.) and (np.abs(pibin) >95.): # print twop_corr[irp, ipi] quad_tpcf_0 = np.array(quad_tpcf_0) quad_tpcf_1 = np.array(quad_tpcf_1) quad_tpcf_2 = np.array(quad_tpcf_2) quad_rp_bins = quad_tpcf_0.reshape(2*n_rp, 2*n_pi)[0] quad_pi_bins = quad_tpcf_1.reshape(2*n_rp, 2*n_pi)[:,0] quad_twop_corr = quad_tpcf_2.reshape(2*n_rp, 2*n_pi) quad_rp, quad_pi = np.meshgrid(quad_rp_bins, quad_pi_bins) if cmap == 'hot': colormap = plt.cm.afmhot else: colormap = plt.cm.Paired # contour of log(xi(r_p, pi)) if xiform == 'log': print np.min(quad_twop_corr), np.max(quad_twop_corr) norm = mpl.colors.SymLogNorm(0.001, vmin=-0.01, vmax=5.0, clip=True) cont = sub.pcolormesh(quad_rp, quad_pi, quad_twop_corr, norm=norm, cmap=colormap) #cont = sub.contourf(quad_rp, quad_pi, np.log10(quad_twop_corr), contour_range, cmap=plt.cm.afmhot) ticker = mpl.ticker.FixedLocator([-0.01, 0.0, 0.5, 0.1, 1.0, 3.0]) elif xiform == 'asinh': contour_range = np.arange(-0.025, 0.105, 0.005) #cont = sub.contourf(quad_rp, quad_pi, np.arcsinh(10. * quad_twop_corr), contour_range, cmap=plt.cm.afmhot) norm = mpl.colors.Normalize(-0.1, 0.1, clip=True) cont = sub.pcolormesh(quad_rp, quad_pi, np.arcsinh(10. * quad_twop_corr), norm=norm, cmap=colormap) #cont.set_interpolation('none') elif xiform == 'none': contour_range = np.arange(-0.5, 5.0, 0.05) cont = sub.contourf(quad_rp, quad_pi, quad_twop_corr, contour_range, cmap=colormap) #cont = sub.contourf(r_p, pi, twop_corr.T, contour_range, cmap=plt.cm.afmhot) else: raise ValueError #sub.contour(r_p, pi, np.log10(twop_corr.T), contour_range, linewidths=4, cmap=plt.cm.afmhot) if xiform == 'log': plt.colorbar(cont, ticks=ticker) else: plt.colorbar(cont) sub.set_xlabel('$\mathtt{r_{\perp}} \; (\mathtt{Mpc}/h)$', fontsize=50) sub.set_ylabel('$\mathtt{r_{||}} \; (\mathtt{Mpc}/h)$', fontsize=50) sub.set_xlim([np.min(quad_rp_bins), np.max(quad_rp_bins)]) sub.set_ylim([np.min(quad_pi_bins), np.max(quad_pi_bins)]) if cmap == 'hot': colormap_str = 'hot' else: colormap_str = 'jet' if xiform == 'log': #sub.set_title(r'$\mathtt{'+corr.upper()+r"}\;log\xi(r_{||}, r_\perp)$", fontsize=40) fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', 'log2pcf_Nseries_', corr, '_', str(len(n_mocks)), 'mocks.', str(round(f_down,2)), 'down.BAO.', colormap_str ,'.png' ]) elif xiform == 'asinh': sub.set_title(r'$\mathtt{'+corr.upper()+r"}\;arcsinh\:10\times\xi(r_{||}, r_\perp)$", fontsize=40) fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', 'arcsinh2pcf_Nseries_', corr, '_', str(len(n_mocks)), 'mocks.', str(round(f_down,2)), 'down.BAO.', colormap_str, '.png' ]) elif xiform == 'none': sub.set_title(r'$\mathtt{'+corr.upper()+r"}\;\xi(r_{||}, r_\perp)$", fontsize=40) fig_name = ''.join([ '/home/users/hahn/powercode/FiberCollisions/figure/', '2pcf_Nseries_', corr, '_', str(len(n_mocks)), 'mocks.', str(round(f_down,2)), 'down.BAO.', colormap_str, '.png' ]) plt.gca().set_aspect('equal', adjustable='box') plt.show()
def cmass_deltaz_zspec_zphoto_test(overplot=True): ''' Spherematch photometric and spectroscopic catalogs to determine standard deviation redshift errors of photmetric redshifts wrt spectroscopic redshifts Parameters ---------- Notes ----- * Using pyspherematch * Details on CMASS redshifts in: https://trac.sdss3.org/wiki/BOSS/clustering/WGCatalogCode * Assuming delta_z/z distribution is gaussian ''' prettyplot() # set up plot pretty_colors = prettycolors() # matching spectroscopic and photometric redshifts z_spec, z_photo = match_zspec_zphoto_cmass() delta_z = (z_spec - z_photo) / (1.0 + z_spec) if overplot: bovy.scatterplot( z_spec, delta_z, scatter=True, levels=[0.68, 0.95, 0.997], color=pretty_colors[1], s=3, xrange=[0.43, 0.7], yrange=[-0.3, 0.3], xlabel='\mathtt{z_{spec}}', ylabel=r'\mathtt{\frac{|z_{spec} - z_{photo}|}{1\;+\;z_{spec}}}' ) # summary statistics (mu and sigma) of delta z / (1+z) z_mid, z_low, z_high, mu_deltaz, sigma_deltaz = cmass_deltaz_zspec_zphoto() plt.errorbar( z_mid, mu_deltaz, yerr=sigma_deltaz, c=pretty_colors[3] ) plt.scatter( z_mid, mu_deltaz, c=pretty_colors[3] ) fig_dir = '/home/users/hahn/powercode/FiberCollisions/figure/' fig_file = ''.join([ fig_dir, 'cmass_deltaz_zspec_zphoto.png' ]) plt.savefig( fig_file, bbox_inches='tight' ) plt.close()
def plot_pk_comp(cat_corrs, n_mock, ell=0, type='ratio', **kwargs): ''' Plot comparison of average power spectrum monopole or quadrupole (avg(P(k))) for multiple a list of catalog and correction specifications. Main use is to compare the effects of fiber collisions correction method. However, it can be used to compare any power spectra as long as cat_corr dictionary is specified. -------------------------------------------------------------------------- Paramters -------------------------------------------------------------------------- cat_corrs : list of catalog correction dictionary n_mock : number of mocks ell : ell-th component of multipole decomposition type : 'regular' compares the actual P2(k) values, 'ratio' compares the ratio with the true P2(k), 'residual' compares the difference with the true P2(k) -------------------------------------------------------------------------- Notes -------------------------------------------------------------------------- * Long ass code with a lot of idiosyncracies. * Make sure k values agree with each other. -------------------------------------------------------------------------- Example -------------------------------------------------------------------------- cat_corrs = [ {'catalog': {'name': 'nseries'}, 'correction': {'name': 'true'}}, {'catalog': {'name': 'nseries'}, 'correction': {'name': 'upweight'}}, {'catalog': {'name': 'nseries'}, 'correction': {'name': 'dlospeak', 'fit': 'gauss', 'sigma': 3.9, 'fpeak': 0.68}} ] plot_pk_comp(cat_corrs, 84, quad=False, type='Pk') plot_pk_comp(cat_corrs, 84, quad=False, type='ratio') ''' if 'Ngrid' in kwargs.keys(): Ngrid = kwargs['Ngrid'] else: Ngrid = 360 if isinstance(n_mock, int): n_mock_list = [ n_mock for i in xrange(len(cat_corrs)) ] else: if len(n_mock) != len(cat_corrs): raise ValueError() else: n_mock_list = n_mock corr_str = '' prettyplot() # set up plot pretty_colors = prettycolors() if 'figsize' in kwargs.keys(): fig = plt.figure(1, kwargs['figsize']) else: fig = plt.figure(1, figsize=(7, 8)) # set up figure sub = fig.add_subplot(111) for i_corr, cat_corr in enumerate(cat_corrs): catdict = cat_corr['catalog'] corrdict = cat_corr['correction'] specdict = { 'P0': 20000, 'Lbox': 3600, 'Ngrid': Ngrid, 'ell': ell } cat_corr_i = { 'catalog': {'name': catdict['name'], 'n_mock': 1}, 'correction': corrdict, 'spec': specdict } avg_spec = AvgSpec(n_mock_list[i_corr], 'pk', cat_corr_i) avg_spec.read() spec_type = 'pk' spec_key = ''.join(['p', str(ell), 'k']) k_arr = avg_spec.k avg_pk = getattr(avg_spec, spec_key) if type == 'Pk': # Compare P(k) to each other sub.plot( k_arr, np.abs(avg_pk), color = pretty_colors[i_corr + 1], label = plot_label(cat_corr), lw = 4 ) elif type == 'Pk_err': # Compare P(k) with sample variance error bar pk_err = avg_spec.stddev() pk_err[np.abs(pk_err) > np.abs(avg_pk)] = avg_pk[np.abs(pk_err) > np.abs(avg_pk)] * 0.999999 sub.fill_between(k_arr, np.abs(avg_pk - pk_err), np.abs(avg_pk + pk_err), color = pretty_colors[i_corr + 1], alpha=0.9) sub.plot(k_arr, np.abs(avg_pk), lw=4, color = pretty_colors[i_corr + 1], label = plot_label(cat_corr)) #sub.errorbar( # k_arr, avg_pk, # yerr = [pk_err, pk_err], # color = pretty_colors[i_corr + 1], # label = plot_label(cat_corr), # fmt='--o' # ) elif type == 'Pk_all': if isinstance(n_mock_list[i_corr], int): n_mock_list_i = range(1, n_mock_list[i_corr]+1) else: n_mock_list_i = n_mock_list[i_corr] for i_mock in n_mock_list_i: k_i, spec_i_spec = avg_spec.spec_i(i_mock) sub.plot( k_i, spec_i_spec, color = '0.25', lw = 1 ) sub.plot( k_arr, avg_pk, color = pretty_colors[i_corr + 1], label = plot_label(cat_corr), lw = 4 ) elif type == 'kPk': # Compare k^1.5 * P(k) with each other. Enhances the # BAO signature? (Upon Paco's request). kPk = k_arr**1.5 * avg_pk sub.scatter( k_arr, kPk, color = pretty_colors[i_corr+1], label = plot_label(cat_corr) ) elif type == 'ratio': # Compare the ratio of the power spectra (P/P_denom) if i_corr == 0 : avg_pk_denom = avg_pk #denom_cat = catdict['name'] denom_cat = corrdict['name'] else: sub.scatter( k_arr, avg_pk/avg_pk_denom, color = pretty_colors[i_corr+1], label = plot_label(cat_corr) ) print plot_label(cat_corr) largescale = np.where(k_arr < 0.2) smallscale = np.where(k_arr > 0.2) print np.sum( np.abs((avg_pk/avg_pk_denom) - 1.0 ) ) print 'Large scale k < 0.2' print np.sum( np.abs((avg_pk[largescale]/avg_pk_denom[largescale]) - 1.0 ) ) print 'Small scale k > 0.2' print np.sum( np.abs((avg_pk[smallscale]/avg_pk_denom[smallscale]) - 1.0 ) ) if corrdict['name'] == 'true': pk_err = avg_spec.stddev() sub.plot( k_arr, 1.0 + pk_err/np.abs(avg_pk), color = 'k', lw = 2, ls = '-.', label = r"$\mathtt{1 + \Delta P^{true} (k) / P^{true}}$" ) sub.plot( k_arr, 1.0 + -1.0 * pk_err/np.abs(avg_pk), color = 'k', lw = 2, ls = '-.' ) elif type == 'l1_norm': if i_corr == 0 : avg_pk_denom = avg_pk denom_cat = catdict['name'] else: sub.scatter( k_arr, avg_pk - avg_pk_denom, color = pretty_colors[i_corr+1], label = plot_label(cat_corr) ) print plot_label(cat_corr) print (avg_pk-avg_pk_denom)[-10:] del avg_pk # Specify corrections for figure file name if 'dlospeak' in corrdict['name']: try: corr_str += ''.join([ catdict['name'], '_', corrdict['name'], '_', corrdict['fit'], '_', '_sigma', str(corrdict['sigma']), 'fpeak', str(corrdict['fpeak']) ]) except KeyError: corr_str += ''.join([ catdict['name'], '_', corrdict['name'], '_', '_sigma', str(corrdict['sigma']) ]) elif corrdict['name'] == 'fourier_tophat': corr_str += ''.join([ catdict['name'], '_', corrdict['name'], '.fs', str(round(corrdict['fs'], 1)), '.rc', str(round(corrdict['rc'], 2)), '.kfit', str(round(corrdict['k_fit'], 2)), '.kfixed', str(round(corrdict['k_fixed'], 2)) ]) else: corr_str += ''.join([ catdict['name'], '_', corrdict['name'] ]) # Dictate the x-range and y-range of the plotting # based on type of comparison if (type == 'Pk') or (type == 'Pk_err') or (type == 'Pk_all'): if 'yrange' in kwargs.keys(): ylimit = kwargs['yrange'] yytext = 10**.5 * min(ylimit) else: ylimit = [10**2,10**5.5] yytext = 10**2.5 if 'ylabel' in kwargs.keys(): ylabel = kwargs['ylabel'] else: ylabel = r'$\mathtt{|P_'+str(ell)+'(k)|}$' if 'xscale' in kwargs.keys(): sub.set_xscale(kwargs['xscale']) else: sub.set_xscale('log') if 'yscale' in kwargs.keys(): sub.set_yscale(kwargs['yscale']) else: sub.set_yscale('log') if type == 'Pk': resid_str = '' elif type == 'Pk_err': resid_str = '_err' elif type == 'Pk_all': resid_str = '_all' elif type == 'kPk': if 'yrange' in kwargs.keys(): ylimit = kwargs['yrange'] yytext = 10**.5*min(ylimit) else: ylimit = [10**0,10**2.0] yytext = 10**0.1 if 'ylabel' in kwargs.keys(): ylabel = kwargs['ylabel'] else: ylabel = r'$\mathtt{k^{1.5} P_0(k)}$' if 'xscale' in kwargs.keys(): sub.set_xscale(kwargs['xscale']) else: sub.set_xscale('log') if 'yscale' in kwargs.keys(): sub.set_yscale(kwargs['yscale']) else: sub.set_yscale('log') resid_str = '_kPk' elif type == 'ratio': if 'yrange' in kwargs.keys(): ylimit = kwargs['yrange'] yytext = 0.05 + min(ylimit) else: ylimit = [0.5, 1.5] yytext = 0.55 ylabel = ''.join([ r"$\mathtt{\overline{P_", str(ell), "(k)}/\overline{P_", str(ell), r"(k)_{\rm{", denom_cat, "}}}}$" ]) if 'xscale' in kwargs.keys(): sub.set_xscale(kwargs['xscale']) else: sub.set_xscale('log') if 'yscale' in kwargs.keys(): sub.set_yscale(kwargs['yscale']) else: pass resid_str = '_ratio' elif type == 'residual': if 'yrange' in kwargs.keys(): ylimit = kwargs['yrange'] yytext = np.mean(ylimit) else: ylimit = [0.0, 5.0] yytext = 2.5 ylabel = ''.join([ r"$\mathtt{|\overline{P_", str(ell), "(k)} - \overline{P_", str(ell), r"(k)_{\rm{True}}}|/\Delta P_", str(ell), "}$" ]) if 'xscale' in kwargs.keys(): sub.set_xscale(kwargs['xscale']) else: sub.set_xscale('log') if 'yscale' in kwargs.keys(): sub.set_yscale(kwargs['yscale']) if (kwargs['yscale'] == 'log') and \ ('yrange' not in kwargs.keys()): ylimit = [10**-3, 10**1] else: pass resid_str = '_residual' elif type == 'l1_norm': if 'yrange' in kwargs.keys(): ylimit = kwargs['yrange'] yytext = 0.05 + min(ylimit) else: ylimit = None yytext = 0.55 ylabel = ''.join([ r"$\mathtt{\overline{P_", str(ell), "(k)} - \overline{P_", str(ell), r"(k)_{\rm{", denom_cat, "}}}}$" ]) if 'xscale' in kwargs.keys(): sub.set_xscale(kwargs['xscale']) else: sub.set_xscale('log') if 'yscale' in kwargs.keys(): sub.set_yscale(kwargs['yscale']) else: pass resid_str = '_l1norm' else: raise NotImplementedError('asdfasdfasdf') if 'xrange' in kwargs.keys(): # specify x-range sub.set_xlim(kwargs['xrange']) yxtext = 1.5*min(kwargs['xrange']) else: sub.set_xlim([10**-3,10**0]) yxtext = 1.5*10**-3 if type == 'ratio': sub.axhline(y = 1.0, lw=2, ls='--', c='k') sub.set_ylim(ylimit) sub.set_xlabel('k (h/Mpc)', fontsize=20) sub.set_ylabel(ylabel, fontsize=20) # Display the number of mocks for given catalog so that # I know how many mocks the P(k) is averaged over. n_mock_text = '\n'.join([ ' '.join([ str(n_mock_list[ii]), ((cat_corrs[ii])['catalog'])['name'].upper() ]) for ii in xrange(len(n_mock_list)) ]) sub.text(yxtext, yytext, n_mock_text) sub.legend(scatterpoints=1, loc='upper left', prop={'size':14}) try: n_mock_str = '_'.join([str(nm) for nm in n_mock]) except TypeError: n_mock_str = str(n_mock) fig_name = ''.join([ spec_key, '_', n_mock_str, 'mock_fibcoll_', corr_str, resid_str, '_comparison_Ngrid', str(Ngrid), '.png' ]) fig_dir = '/home/users/hahn/powercode/FiberCollisions/figure/' print fig_name fig.savefig( ''.join([fig_dir, fig_name]), bbox_inches="tight" ) #plt.show() plt.close() return None
def cosmic_variance_kbin(ell): ''' Tests the correlation of the cosmic variance by calculating it with different k_bins ''' specdict = { 'P0': 20000, 'Lbox': 3600, 'Ngrid': 960, 'ell': ell } true_cat_corr = { 'catalog': {'name': 'nseries', 'n_mock': 1}, 'correction': {'name': 'true'}, 'spec': specdict } true_spec = AvgSpec(20, 'pk', true_cat_corr) orig_stddev = len(true_spec.stddev()) orig_k = true_spec.k orig_avg_spec = true_spec.avg_spec rebin_stddev = len(true_spec._stddev_kbin('double')) rebin_k = true_spec.k rebin_avg_spec = true_spec.avg_spec prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=[14,8]) sub = fig.add_subplot(111) orig_spec_lower = orig_avg_spec - orig_stddev orig_spec_upper = orig_avg_spec + orig_stddev if orig_spec_lower.min() < 0.: orig_spec_lower[np.where(orig_spec_lower < 0.)] = 10**-3 orig_spec_upper[np.where(orig_spec_upper < 0.)] = 10**-3 rebin_spec_lower = rebin_avg_spec - rebin_stddev rebin_spec_upper = rebin_avg_spec + rebin_stddev if rebin_spec_lower.min() < 0.: rebin_spec_lower[np.where(rebin_spec_lower < 0.)] = 10**-3 rebin_spec_upper[np.where(rebin_spec_upper < 0.)] = 10**-3 sub.fill_between(orig_k, orig_spec_lower, orig_spec_upper, color = pretty_colors[0], label = 'Original k binning') sub.fill_between(rebin_k, rebin_spec_lower, rebin_spec_upper, color = pretty_colors[2], label = 'Rebinned k binning') sub.set_xscale('log') sub.set_yscale('log') sub.set_xlim([10**-3, 10**0]) sub.set_ylim([10**2, 10**5]) sub.set_xlabel(r"$\mathtt{k}\;\;(\mathtt{Mpc}/h)$", fontsize=30) sub.set_ylabel(r"$\mathtt{P_{"+str(ell)+"}(k)}$", fontsize=30) fig.savefig( ''.join(['figure/' 'P', str(ell), 'k.', 'cosmic_variance.kbinning_test.png']), bbox_inches='tight') plt.close()
def plot_delpoverp_comp(cat_corrs, n_mock, ell=0, **kwargs): ''' Plot comparison of delta P/ avg(P) for multiple lists of catalog and correction specifications. -------------------------------------------------------------------------- Paramters -------------------------------------------------------------------------- cat_corrs : list of catalog correction dictionary n_mock : number of mocks ell : ell-th component of multipole decomposition -------------------------------------------------------------------------- Notes -------------------------------------------------------------------------- * Long ass code with a lot of idiosyncracies. * Make sure k values agree with each other. -------------------------------------------------------------------------- Example -------------------------------------------------------------------------- cat_corrs = [ {'catalog': {'name': 'nseries'}, 'correction': {'name': 'true'}}, {'catalog': {'name': 'nseries'}, 'correction': {'name': 'upweight'}}, {'catalog': {'name': 'nseries'}, 'correction': {'name': 'dlospeak', 'fit': 'gauss', 'sigma': 3.9, 'fpeak': 0.68}} ] ''' if 'Ngrid' in kwargs.keys(): Ngrid = kwargs['Ngrid'] else: Ngrid = 360 if isinstance(n_mock, int): n_mock_list = [ n_mock for i in xrange(len(cat_corrs)) ] else: if len(n_mock) != len(cat_corrs): raise ValueError() else: n_mock_list = n_mock corr_str = '' prettyplot() # set up plot pretty_colors = prettycolors() fig = plt.figure(1, figsize=(14, 8)) # set up figure sub = fig.add_subplot(111) for i_corr, cat_corr in enumerate(cat_corrs): catdict = cat_corr['catalog'] corrdict = cat_corr['correction'] specdict = { 'P0': 20000, 'Lbox': 3600, 'Ngrid': Ngrid, 'ell': ell } cat_corr_i = { 'catalog': {'name': catdict['name'], 'n_mock': 1}, 'correction': corrdict, 'spec': specdict } avg_spec = AvgSpec(n_mock_list[i_corr], 'pk', cat_corr_i) avg_spec.read() spec_type = 'pk' spec_key = ''.join(['p', str(ell), 'k']) k_arr = avg_spec.k avg_pk = getattr(avg_spec, spec_key) pk_err = avg_spec.stddev() sub.plot( k_arr, pk_err/np.abs(avg_pk), color = pretty_colors[i_corr + 1], label = plot_label(cat_corr), lw = 4 ) # Specify corrections for figure file name if 'dlospeak' in corrdict['name']: try: corr_str += ''.join([ catdict['name'], '_', corrdict['name'], '_', corrdict['fit'], '_', '_sigma', str(corrdict['sigma']), 'fpeak', str(corrdict['fpeak']) ]) except KeyError: corr_str += ''.join([ catdict['name'], '_', corrdict['name'], '_', '_sigma', str(corrdict['sigma']) ]) else: corr_str += ''.join([ catdict['name'], '_', corrdict['name'] ]) # x-axis if 'xscale' in kwargs.keys(): sub.set_xscale(kwargs['xscale']) else: sub.set_xscale('log') if 'xrange' in kwargs.keys(): # specify x-range sub.set_xlim(kwargs['xrange']) yxtext = 1.5*min(kwargs['xrange']) else: sub.set_xlim([10**-3,10**0]) yxtext = 1.5*10**-3 sub.set_xlabel('k (h/Mpc)', fontsize=20) # y-axis if 'ylabel' in kwargs.keys(): ylabel = kwargs['ylabel'] else: ylabel = r'$\mathtt{\Delta P_'+str(ell)+'(k)/|\overline{P_'+str(ell)+'}|}$' if 'yrange' in kwargs.keys(): # specify x-range sub.set_ylim(kwargs['yrange']) else: sub.set_ylim([-1.,1.0]) sub.set_xlabel('k (h/Mpc)', fontsize=20) sub.set_ylabel(ylabel, fontsize=20) # Display the number of mocks for given catalog so that # I know how many mocks the P(k) is averaged over. n_mock_text = '\n'.join([ ' '.join([ str(n_mock_list[ii]), ((cat_corrs[ii])['catalog'])['name'].upper() ]) for ii in xrange(len(n_mock_list)) ]) sub.text(yxtext, 0.05, n_mock_text) sub.legend(scatterpoints=1, loc='upper left', prop={'size':14}) try: n_mock_str = '_'.join([str(nm) for nm in n_mock]) except TypeError: n_mock_str = str(n_mock) plt.figtext(0, 0, "testing \n testing") fig_name = ''.join([ 'del', spec_key, 'over', spec_key, '_', n_mock_str, 'mock_fibcoll_', corr_str, '_comparison_Ngrid', str(Ngrid), '.png' ]) fig_dir = '/home/users/hahn/powercode/FiberCollisions/figure/' fig.savefig( ''.join([fig_dir, fig_name]), bbox_inches="tight" ) #plt.show() plt.close() return None
def test_delPk_corr_scatter(ell, n_mocks, Ngrid=960, k_fit=0.25, k_fixed=0.6, fs=1.0, rc=0.43, **kwargs): ''' test the scatter in del P(k)^corr from extrapolation of each individual mock catalog realizations ''' prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(14,8)) sub = fig.add_subplot(111) for i_mock in range(1, n_mocks+1): # default cat-corr for Nseries cat_corr = { 'catalog': {'name': 'nseries', 'n_mock': i_mock}, 'correction': {'name': 'true'} } spec_i = Spec('pk', cat_corr, ell=4, Ngrid=Ngrid) spec_i.read() specs_i = [getattr(spec_i, 'p0k'), getattr(spec_i, 'p2k'), getattr(spec_i, 'p4k')] # extrapolation parameters for P0, P2, P4 extrap_pars = [] for i_ell, ellp in enumerate([0,2,4]): extrap_pars.append( pk_extrap.pk_powerlaw_bestfit(spec_i.k, specs_i[i_ell], k_fit=k_fit, k_fixed=k_fixed) ) print extrap_pars # calculate delP corrdelP = fourier_corr.delP_corr(spec_i.k, specs_i, ell, fs=fs, rc=rc, extrap_params=extrap_pars, k_fixed=k_fixed) if i_mock == 1: delpk_label = r"$\mathtt{\Delta P^{corr}}$ (Extrap.)" else: delpk_label = None sub.plot(spec_i.k, corrdelP, lw=2, c=pretty_colors[i_mock % 20], label=delpk_label) # upw P_l(k) - true P_l(k0 true_cat_corr = { 'catalog': {'name': 'nseries', 'n_mock': 1}, 'correction': {'name': 'true'}, } true_avg_spec = AvgSpec(n_mocks, 'pk', true_cat_corr, ell=ell, Ngrid=Ngrid) true_avg_spec.read() upweight_cat_corr = true_cat_corr.copy() upweight_cat_corr['correction']['name'] = 'upweight' upweight_avg_spec = AvgSpec(n_mocks, 'pk', upweight_cat_corr, ell=ell, Ngrid=Ngrid) upweight_avg_spec.read() sub.plot( true_avg_spec.k, getattr(upweight_avg_spec, 'p'+str(ell)+'k') - getattr(true_avg_spec, 'p'+str(ell)+'k'), c = 'k', lw = 4, ls = '--', label = r"$\mathtt{P^{upw}(k) - P^{true}(k)}$" ) # x-axis sub.set_xlabel(r'$\mathtt{k}$', fontsize=30) sub.set_xscale('log') sub.set_xlim([10**-3, 10**0.]) # y-axis sub.set_ylabel(r"$\mathtt{\Delta P_{"+str(ell)+"}(k)}$", fontsize=30) sub.set_ylim([-50., 300.]) sub.legend(loc='upper left') fig_file = ''.join([ 'figure/', 'qaplot_delP_corr_extrap_scatter.', str(n_mocks), 'mocks.', 'kfit', str(round(k_fit,2)), '.', 'kfixed', str(round(k_fixed,2)), '.png' ]) fig.savefig(fig_file, bbox_inches="tight") plt.close()
def test_qPqfllp_k(k, l, rc=0.43, noextrap=''): ''' Test the polynomial estimates of the f_l_lp integrals. Parameter --------- - l : - lp : Notes ----- ''' q_arr = np.logspace(-3, 3, num=100) n_mock = 7 krc = k * rc k_fit = 4. k_fixed = 4.34 data_dir = '/mount/riachuelo1/hahn/power/Nseries/Box/' prettyplot() pretty_colors = prettycolors() fig = plt.figure(1, figsize=(14,8)) sub = fig.add_subplot(111) maxmax, minmin = 0., 0. for i_lp, ellp in enumerate(range(6)): lp = 2 * ellp for i_mock in xrange(1, 8): true_pk_file = ''.join([data_dir, 'power3600z_BoxN', str(i_mock), '.dat']) if lp == 0: l_index = -1 else: l_index = 1 + int(lp/2) tr_k, tr_pk_i = np.loadtxt( true_pk_file, unpack = True, usecols =[0,l_index] ) if i_mock == 1: tr_pk = tr_pk_i else: tr_pk += tr_pk_i tr_pk /= 7. tr_specs = (2.0*np.pi)**3 * tr_pk # interpolation function Pk_interp = interp1d(tr_k, tr_specs, kind='cubic') # extrapolation parameter tr_extrap_par = pk_extrap.pk_powerlaw_bestfit(tr_k, tr_specs, k_fit=4., k_fixed=4.34) qPqfllp = [] Pqs = [] for q in q_arr: if not noextrap: Pq = pq(q, Pk_interp, tr_extrap_par, k_min=tr_k[0], k_max=tr_k[-1], k_fixed=4.34) else: Pq = pq_noextrap(q, Pk_interp, k_min=tr_k[0], k_max=tr_k[-1], k_fixed=4.34) fllp = fourier_corr.f_l_lp(q*rc, krc, l, lp) qPqfllp.append( q * Pq * fllp ) Pqs.append(Pq) int_label = "$ l' = "+str(lp)+ "$" sub.plot(q_arr, np.array(qPqfllp), c=pretty_colors[i_lp+1], lw=4, ls='-', label=int_label) maxmax = np.max([np.max(qPqfllp), maxmax]) minmin = np.min([np.min(qPqfllp), minmin]) sub.set_xscale('log') sub.set_xlabel(r"$\mathtt{q}$", fontsize=25) sub.set_ylabel(r"$\mathtt{q P(q) f_{l, l'}(q r_c, k r_c)}$", fontsize=25) sub.vlines(tr_k[-1], minmin, maxmax, color='k', linestyles='--', linewidth=2) sub.text(2.*10**-3, 1.01*maxmax, r"k = "+str(round(k,2)), fontsize=20) sub.legend(loc='upper right') fig.savefig(''.join([ 'figure/', 'qPqfllp', noextrap, '.l', str(l), '.k', str(round(k,2)), '.png' ]), bbox_inches='tight') plt.close()
def test_quenching_fraction( tau_prop = {'name': 'instant'}, n_snap0 = 13, **kwargs ): ''' Plot evolution of the quenching population in the CenQue SSFR distribution Parameters ---------- Mrcut : Absolute magnitude cut that specifies the group catalog nsnaps : List of snapshot #s to plot ''' if tau_prop['name'] in ('instant', 'constant', 'satellite', 'long'): tau_str = ''.join(['_', tau_prop['name'], 'tau']) elif tau_prop['name'] in ('line'): tau_str = ''.join([ '_', tau_prop['name'], 'tau', '_Mfid', str(tau_prop['fid_mass']), '_slope', str(round(tau_prop['slope'], 4)), '_yint', str(round(tau_prop['yint'],4)) ]) prettyplot() pretty_color = prettycolors() fig1 = plt.figure(1, figsize=(15, 7)) sub1 = fig1.add_subplot(111) fig2 = plt.figure(2, figsize=(12, 7)) sub2 = fig2.add_subplot(111) # Overplot CenQue of specified Snapshots snaps = [] for i_nsnap in reversed(xrange(1, n_snap0)): fqing_file = ''.join([ '/data1/hahn/f_quenching/', 'quenching_fraction', tau_str, '_nsnap', str(i_nsnap), '.dat' ]) mass_bin, fqing = np.loadtxt( fqing_file, skiprows=1, unpack=True, usecols=[0,1] ) #print i_nsnap, 'slope = ', (fqing[-1] - fqing[0])/(mass_bin[-1] - mass_bin[0]) #print i_nsnap, 'slope = ', (fqing[-2] - fqing[0])/(mass_bin[-2] - mass_bin[0]) #print i_nsnap, 'slope = ', (fqing[-3] - fqing[0])/(mass_bin[-3] - mass_bin[0]) #print ((fqing[-1] - fqing[0])/(mass_bin[-1] - mass_bin[0]) + (fqing[-2] - fqing[0])/(mass_bin[-2] - mass_bin[0]) + (fqing[-3] - fqing[0])/(mass_bin[-3] - mass_bin[0]))/3.0 sub1.plot(mass_bin, fqing, c=pretty_color[i_nsnap+1], lw='4', label='Snapshot'+str(i_nsnap)) #sub1.plot(mass_bin, (0.03 * (np.array(mass_bin) - 9.5))*(1.8 - get_z_nsnap(i_nsnap))**5.0, ls='--', lw='3', c=pretty_color[i_nsnap+1]) if i_nsnap == n_snap0-1: fqing_m = [] for i_m, mass in enumerate(mass_bin): fqing_m.append([]) for i_m, mass in enumerate(mass_bin): fqing_m[i_m].append(fqing[i_m]) snaps.append(get_z_nsnap(i_nsnap)) #print fqing_m for i_m, mass in enumerate(mass_bin): #print mass, np.around(fqing_m[i_m],4) #print 'slope = ', (fqing_m[i_m][-1] - fqing_m[i_m][0])/(snaps[0] - snaps[-1]) #print 'slope = ', (fqing_m[i_m][-2] - fqing_m[i_m][0])/(snaps[0] - snaps[-2]) #print 'slope = ', (np.log10(fqing_m[i_m][-1]) - np.log10(fqing_m[i_m][1]))/(snaps[1] - snaps[-1]) fqing_massbin = np.array(fqing_m[i_m]) fqing_massbin[np.where(fqing_massbin == 0.)] = 10.**-10 #print snaps, fqing_massbin sub2.scatter(snaps, fqing_massbin, c=pretty_color[i_m+1]) sub2.plot(snaps, fqing_massbin, lw='3', c=pretty_color[i_m+1], label=r'$\mathtt{M_* =\;}$'+str(mass)) #sub2.plot(snaps, (0.03 * (mass - 9.5)) * (1.8 - np.array(snaps))**2.0, ls='--', lw='3', c=pretty_color[i_m+1]) del snaps del fqing_m sub1.legend(loc='lower right') sub1.set_yscale('log') sub1.set_ylim([0.0, 1.0]) sub1.set_xlim([9.5, 14.0]) sub1.set_xlabel('Stellar Mass ($M_*$)') sub1.set_ylabel('Predicted Quenching Fraction') sub2.legend(loc='lower right') sub2.set_yscale('log') sub2.set_xlim([0.9, -0.3]) sub2.set_ylim([0.0001, 1.0]) sub2.set_xlabel('Redshift ($\mathtt{z}$)') sub2.set_ylabel('Predicted Quenching Fraction') fig1.savefig( ''.join([ '/home/users/hahn/research/pro/tinker/central_quenching/figure/', 'f_quenching', tau_str, '.png']), bbox_inches='tight' ) fig2.savefig( ''.join([ '/home/users/hahn/research/pro/tinker/central_quenching/figure/', 'f_quenching_evol', tau_str, '.png']), bbox_inches='tight' ) fig1.clear() fig2.clear() plt.close()
def qaplot_sfms_groupcat_fitting(Mrcut=18, sfq_test=True): """ Function tests the SF-MS fitting functions and examines the SF-MS of group catalog altogether as a sanity check. If sfq_test is specified, then SFR(M*,z) cutoff for SF/Q classification is plotted on top of the SF-MS plots """ # star-forming central galaxies from the SDSS group catalog sf_cen = sf_centrals(Mrcut=Mrcut) #prettyplot() pretty_colors = prettycolors() bovy.scatterplot( sf_cen.mass, sf_cen.sfr, scatter = True, levels = [0.68, 0.95, 0.997], color = pretty_colors[1], s = 3, xrange = [9.5, 12.0], yrange = [-1.5, 1.5], xlabel = 'log \; M_{*}', ylabel = 'log \; SFR' ) # SDSS group catalog best fit gc_zmid, gc_slope, gc_yint = get_bestfit_sfms_groupcat(Mrcut=Mrcut, clobber=True) mass_bin = np.arange(9.0, 12.0, 0.25) # mass bin plt.plot( mass_bin, gc_slope * (mass_bin-10.5) + gc_yint, c='k', lw=6, ls='--' ) # Envcount catalog best fit ec_zmid, ec_slope, ec_yint = get_bestfit_sfms_envcount() for i_z in xrange(len(ec_zmid)): plt.plot( mass_bin, ec_slope[i_z] * (mass_bin-10.5) + ec_yint[i_z], c = pretty_colors[i_z+3], lw = 4, label = str(ec_zmid[i_z]) ) avg_sfrs, sig_sfrs, ngals = get_sfr_mstar_z_groupcat( mass_bin, Mrcut=Mrcut ) enough_gal = np.where(np.array(avg_sfrs) > -998) plt.errorbar( mass_bin[enough_gal], np.array(avg_sfrs)[enough_gal], yerr = np.array(sig_sfrs)[enough_gal], lw = 4, c = pretty_colors[1], label='Average SFR' ) if sfq_test: plt.plot(mass_bin, sfr_cut(mass_bin, 0.05), c='k', lw=4, ls='--') plt.legend(loc='lower right') fig_name = ''.join(['figure/', 'qaplot_sfms_fitting_groupcat_', str(Mrcut), '.png']) plt.savefig(fig_name, bbox_inches='tight') plt.close()
def test_f_l_lp_est(l, lp, rc=0.43): ''' Test the polynomial estimates of the f_l_lp integrals. Parameter --------- - l : - lp : Notes ----- - Consistent for l, lp < 6 - Possible break-down in the estimates for l or lp = 10? ''' q_arr = np.logspace(-3, 3, num=100) x_arr = q_arr * rc prettyplot() pretty_colors = prettycolors() fig = plt.figure(figsize=(14,8)) sub = fig.add_subplot(111) maxmax, minmin = 0., 0. for ik, k in enumerate([0.01, 0.05, 0.1, 0.5]): krc = k * rc fllp_est = [] fllp_int = [] for x in x_arr: #poly_time = time.time() if lp <= 10: fllp_est.append(fourier_corr.f_l_lp_est(x, krc, l, lp)) #print 'polynomial estimate takes ', time.time() - poly_time #int_time = time.time() fllp_int.append(fourier_corr.f_l_lp(x, krc, l, lp)) #print 'integration takes ', time.time() - int_time int_label = 'Integrated $ k = '+str(round(k, 2))+ '$' if ik == 0: est_label = 'Polynomial Estimate' else: est_label = None # plot the results sub.plot(q_arr, np.array(fllp_int), c=pretty_colors[ik+1], lw=4, ls='-', label=int_label) if lp <= 10: sub.plot(q_arr, np.array(fllp_est), c=pretty_colors[0], lw=3, ls='--', label=est_label) else: fllp_est = fllp_int maxmax = np.max([np.max([np.max(fllp_int), np.max(fllp_est)]), maxmax]) minmin = np.min([np.min([np.min(fllp_int), np.min(fllp_est)]), minmin]) sub.set_xscale('log') sub.set_xlabel(r"$\mathtt{q}$", fontsize=25) sub.set_ylabel(r"$\mathtt{f_{l, l'}(q r_c, k r_c)}$", fontsize=25) sub.set_ylim([-1.1, 1.1]) sub.text(2.*10**-3, 1.01*maxmax, r"l = "+str(l)+", l' = "+str(lp), fontsize=20) sub.legend(loc='upper right') fig.savefig(''.join([ 'figure/', 'f_l_lp_estimate', '.l', str(l), '.lp', str(lp), '.png' ]), bbox_inches='tight') plt.close()
def qaplot_parameterized_sfms(): """ QAplot comparison for the parameterized SFMS redshift evolution in comparison to observed SF-MS redshift evolution. """ gc_zmid, gc_slope, gc_yint = get_bestfit_sfms_groupcat() ec_zmid, ec_slope, ec_yint = get_bestfit_sfms_envcount( fid_mass = 10.5 ) avg_sfr_sfms, sig_sfr_sfms = get_param_sfr_mstar_z() mass_bin = np.arange(9.0, 12.0, 0.25) # mass bin fig = plt.figure() sub = fig.add_subplot(111) sub.scatter( 0.03, gc_yint, c='k', s=50, label='Observed GroupCatalog' ) sub.scatter( ec_zmid, ec_yint, c=prettycolors()[3], label='Observed EnvCount' ) sfr_fidmass = np.array([ avg_sfr_sfms(10.5, zmid) for zmid in np.arange(0.0, 1.0, 0.05) ]) sub.plot( np.arange(0.0, 1.0, 0.05), sfr_fidmass, c='k', lw=4, ls='--', label='Parameterized' ) sub.plot( np.arange(0.0, 1.0, 0.05), sfr_fidmass+0.15, c=prettycolors()[3], lw=4, ls='--' ) sfr_cutoff = np.array([ sfr_cut(10.5, ec_zmid[i_z]) for i_z in xrange(len(ec_zmid)) ]) sub.plot(ec_zmid, sfr_cutoff, c='k', lw=3, ls='--', label='SF/Q Classification') sub.set_xlim([0.0, 1.0]) sub.set_ylim([-1.0, 2.0]) sub.set_ylabel('SFR(M=10.5,z)', fontsize=20) sub.set_xlabel('Redshift (z)', fontsize=20) sub.legend(scatterpoints=1, loc='upper left') plt.show()