def test_fit_tofz(): z_table, t_table = UT.zt_table() cosmo = FlatLambdaCDM(H0=70, Om0=0.274) prettyplot() fig = plt.figure() sub = fig.add_subplot(111) for deg in range(2,10): coeff = UT.fit_tofz(deg) if deg > 5: print 'deg = ', deg, coeff tofz = np.poly1d(coeff) z_arr = np.arange(0., 2., 0.1) t_arr = cosmo.age(z_arr).value sub.plot(z_arr, (tofz(z_arr) - t_arr)/t_arr, label='Degree='+str(deg)) t_of_z = interpolate.interp1d(z_arr, t_arr, kind='cubic') tint = t_of_z(z_table[1:20])#np.interp(t_table[:20], t_arr, z_arr) sub.scatter(z_table[1:20], (t_table[1:20] - tint)/tint, c='k', s=30) sub.plot(np.arange(0., 2., 0.1), np.repeat(-0.025, len(np.arange(0., 2., 0.1))), c='k', ls='--', lw=3) sub.plot(np.arange(0., 2., 0.1), np.repeat(0.025, len(np.arange(0., 2., 0.1))), c='k', ls='--', lw=3) sub.set_ylim([-0.05, 0.05]) sub.set_xlim([0., 2.]) sub.legend(loc='upper left') plt.show()
def f(data=data4,ra=ra,dec=dec,freq=freq,label='ICs_10'): h=67.77 cosmo = FlatLambdaCDM(H0=h, Om0=0.307) z=1420.4/(freq/10**6)-1 dc = np.array(cosmo.comoving_distance(z))*h/100 #z: Mpc/h N=50 data=data[N]*10**-3 freq=freq[N] dc=dc[N] Hx=(dec[1]-dec[0])/180.*np.pi*dc Hy=-(ra[1]-ra[0])/180.*np.pi*dc #print Hy Lx=Hx*len(dec) Ly=Hy*len(ra) print np.pi*2/Lx freq_x=np.fft.fftfreq(len(dec),1./len(dec)) freq_y=np.fft.fftfreq(len(ra),1./len(ra)) deltak=np.fft.fft2(data) Pk=np.abs(deltak)**2*(Lx*Ly/len(ra)**2/len(dec)**2) k=np.sqrt((2*np.pi/Lx)**2*freq_x[None,:]**2+(2*np.pi/Ly)**2*freq_y[:,None]**2) ################################################################################ bin=10 edges=np.linspace(k.min(),k.max(),bin+1,endpoint=True) n=np.histogram(k,edges)[0] print n k_bin=np.histogram(k,edges,weights=k)[0] print data.shape print k.shape print Pk.shape pk=np.histogram(k,edges,weights=Pk)[0] plt.semilogy(k_bin/n,pk/n,'.--',label=label)
def test_approx_DL(): for z in np.linspace(0.01, 4, num=10): z = 2. v1 = approx_DL()(z) cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=None) v2 = cosmo.luminosity_distance(z).value assert abs(v1/v2 - 1) < 0.01
def Grid(self,Nx=64,Ny=64,Nz=256): print 'griding',Nx,Ny,Nz h=self.h ra=self.ra ra=ra-ra.mean() dec=self.dec dec=dec-dec.mean() self.Nx=Nx self.Ny=Ny self.Nz=Nz cosmo = FlatLambdaCDM(H0=h, Om0=0.307) redshift=1420./(self.freq/10**6)-1 dc = np.array(cosmo.comoving_distance(redshift))*h/100 #z: Mpc/h pos_x=dc[:,None,None]*np.cos(np.pi/180.*dec[None,None,:])*np.cos(np.pi/180.*ra[None,:,None]) pos_y=dc[:,None,None]*np.cos(np.pi/180.*dec[None,None,:])*np.sin(np.pi/180.*ra[None,:,None]) pos_z=dc[:,None,None]*np.sin(np.pi/180.*dec[None,None,:])+np.zeros_like(ra[None,:,None]) # print pos_x.max(),pos_x.min() # print pos_y.max(),pos_y.min() # print pos_z.max(),pos_z.min() #put redshift direction on z axis. and before this, dec>>z ra>>y redshift>>x #after transformation, ra>>x dec>>y redshift>>z self.DPM=np.c_[pos_y.reshape(-1),pos_z.reshape(-1),pos_x.reshape(-1)] self.bin_x=np.linspace(pos_y.min()-10**-5,pos_y.max()+10**-5,Nx+1) self.bin_y=np.linspace(pos_z.min()-10**-5,pos_z.max()+10**-5,Ny+1) self.bin_z=np.linspace(pos_x.min()-10**-5,pos_x.max()+10**-5,Nz+1)
def LogLikelihood(pos, obs, sigmas, par): """ Input: Array with the position in the parameters space Array with observed values Array with sigmas Dictionary of indexes of parameters in the arrays Return: Log likelihood """ mb_obs, x1_obs, c_obs, z_obs = obs sigma_mb, sigma_x1, sigma_c = sigmas cosmo = FlatLambdaCDM(H0=70, Om0=pos[par["Omega_m"]]) mu = cosmo.distmod(z_obs).value x1 = pos[-2 * len(obs[0]) : -len(obs[0])] c = pos[-len(obs[0]) :] mb_true = pos[par["MB"]] - pos[par["alpha"]] * x1 + pos[par["beta"]] * c + mu # + pos[ par['sigma_int'] ] # I'm not sure this is correct. We need to check how to deal with the two # scatters (sigma_int and sigma_mb) likelihood_m_obs = LogGaussian(mb_obs, mb_true, sigma_mb + pos[par["sigma_int"]]) likelihood_x1 = LogGaussian(x1_obs, x1, sigma_x1) likelihood_c = LogGaussian(c_obs, c, sigma_c) return np.sum([likelihood_m_obs, likelihood_x1, likelihood_c])
def absolute_magnitude_lim(z, app_mag_lim, cosmo=None): """ give the absolute magnitude limit as a function of redshift for a flux-limited survey. Parameters ---------- z: array_like redshift app_mag_lim: float apparent magnitude limit cosmo: cosmology object Returns ------- M,z: np.array, np.array absolute magnitude in mag+5loh(h) units """ if cosmo==None: from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=70, Om0=0.3) print('Warning, no cosmology specified, using default:',cosmo) d_L = cosmo.luminosity_distance(z).value M = apparent_to_absolute_magnitude(app_mag_lim, d_L) return M-5.0*np.log10(cosmo.h)
def overplot_ruler(ax, z, pixsize=0.396, rlength_arcsec=10., nx=64, ny=64): """ Params ------ ax: kpc_per_arcsec pixsize=0.396 in arcsec rulerlength_arcsec=10. in arcsec """ # import from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=70, Om0=0.3) # set up xmid = 0.5*nx+5 # x ending point of bar y = ny-10. # y loc of bar # conversion kpc_per_arcsec = 1./cosmo.arcsec_per_kpc_proper(z) rlength_pix = rlength_arcsec/pixsize rlength_kpc = (rlength_arcsec*kpc_per_arcsec).value #===== plotting ax.plot([xmid-rlength_pix, xmid], [y, y], color='white', lw=2) ax.plot([xmid-rlength_pix, xmid-rlength_pix], [y+1., y-1.], color='white', lw=2) ax.plot([xmid, xmid], [y+1., y-1.], color='white', lw=2) ax.text(xmid+4., y+1, '10" ('+'%.0f'%rlength_kpc+' kpc)', color='white', fontsize=12)
def get_lum(rmid, zfinal_dict): flux = get_raw_lum(rmid) z = zfinal_dict[int(rmid)] cosmo = FlatLambdaCDM(H0=70.0, Om0=0.3) dl_MPC = cosmo.luminosity_distance(z) dl_cm = float(dl_MPC.value * 3.085677581 * (10.0 ** 24.0)) lum = 4.0 * 3.1415926 * dl_cm * dl_cm * flux[0] * 10. ** (0. - 17.) * 5100.0 return [lum, flux[1] * lum]
def angular_diameter(size,redshift): ## size must be in kpc cosmo = FlatLambdaCDM(H0=70, Om0=0.3) ## defining the cosmology => see the documentation and details of the parameters and their values lumdist=cosmo.luminosity_distance(redshift) ## value in kpc zang=size*(1+redshift)**2/(1000.*lumdist)*180./np.pi*3600. ## arcsec return zang
def simple_flux_from_greybody(lambdavector, Trf = None, b = None, Lrf = None, zin = None, ngal = None): ''' Return flux densities at any wavelength of interest (in the range 1-10000 micron), assuming a galaxy (at given redshift) graybody spectral energy distribution (SED), with a power law replacing the Wien part of the spectrum to account for the variability of dust temperatures within the galaxy. The two different functional forms are stitched together by imposing that the two functions and their first derivatives coincide. The code contains the nitty-gritty details explicitly. Cosmology assumed: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009) Inputs: alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003] betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985] Trf = rest-frame temperature [in K; default = 20K] Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10] zin = galaxy redshift [default = 0.001] lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)]; AUTHOR: Lorenzo Moncelsi [[email protected]] HISTORY: 20June2012: created in IDL November2015: converted to Python ''' nwv = len(lambdavector) nuvector = c * 1.e6 / lambdavector # Hz nsed = 1e4 lambda_mod = loggen(1e3, 8.0, nsed) # microns nu_mod = c * 1.e6/lambda_mod # Hz #Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009) cosmo = FlatLambdaCDM(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273) conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz) Lir = Lrf / conversion # Jy x Hz Ain = np.zeros(ngal) + 1.0e-36 #good starting parameter betain = np.zeros(ngal) + b alphain= np.zeros(ngal) + 2.0 fit_params = Parameters() fit_params.add('Ain', value= Ain) #fit_params.add('Tin', value= Trf/(1.+zin), vary = False) #fit_params.add('betain', value= b, vary = False) #fit_params.add('alphain', value= alphain, vary = False) #pdb.set_trace() #THE LM FIT IS HERE #Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal)) Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal,Trf/(1.+zin),b,alphain)) #pdb.set_trace() flux_mJy=sed(Pfin.params,nuvector,ngal,Trf/(1.+zin),b,alphain) return flux_mJy
def DistanceFraction(H0, Om, z_gals, z_clust): Cos = FlatLambdaCDM(H0=H0, Om0=Om) Ds = Cos.angular_diameter_distance(z_gals) Dl = Cos.angular_diameter_distance(z_clust) #Calculate Angular Diameter Distance between objects and Cluster DM1 = Cos.comoving_distance(z_clust) DM2 = Cos.comoving_distance(z_gals) Dls = (DM2 - DM1)/(1 + z_gals) return NP.array(Ds/(Dls*Dl))
def calculate_rf_lens_magnitudes(lens_redshift, velocity_dispersion, filters_dict): """Calculates the reference-frame lens magnitudes in multiple filters Parameters ---------- lens_redshift : float Redshift of the lens velocity_dispersion : float Velocity dispersion of the lens filters_dict : dict (See output of `get_sdss_filters` for details) Throughputs of various filters Returns ------- dict Each key is one of string characters 'u', 'g', 'r', 'i', 'z' representing the filter Each value is the reference-frame apparent magnitude of the quasar in the 'key' filter, of type float """ from stellarpop import tools from lenspop import population_functions, distances from astropy.cosmology import FlatLambdaCDM # Instantiate Distance distance = distances.Distance() #TODO: necessary? # Instantiate LensPopulation lenspop = population_functions.LensPopulation_() # Instantiate FlatLambdaCDM cosmology with reasonable parameters cosmology = FlatLambdaCDM(H0=70.0, Om0=0.3) lens_sed = tools.getSED('BC_Z=1.0_age=9.000gyr') velocity_dispersion = np.atleast_1d(velocity_dispersion) # Absolute --> apparent magnitude conversion in the R-band lens_abmag_r = tools.ABFilterMagnitude(filters_dict['r'], lens_sed, lens_redshift) distance_modulus = cosmology.distmod(lens_redshift).value lens_appmag_r = lens_abmag_r + distance_modulus # [Reference frame] Absolute --> apparent magnitude conversion in the R-band rf_lens_abmag_r, _ = lenspop.EarlyTypeRelations(velocity_dispersion) rf_lens_appmag = {} rf_lens_appmag['r'] = rf_lens_abmag_r + distance_modulus # Quantity which is added to ~magnitude to convert it into reference-frame ~magnitude offset_rf = rf_lens_abmag_r - lens_abmag_r # Converting absolute magnitude to reference-frame apparent magnitude for band in 'ugiz': rf_lens_appmag[band] = tools.ABFilterMagnitude(filters_dict[band], lens_sed, lens_redshift) + offset_rf + distance_modulus return rf_lens_appmag
def L_nu_from_magAB(magAB = None, z = None): import numpy as np """Convert absolute magnitude into luminosity (erg s^-1 Hz^-1).""" from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=70, Om0=0.3) m_zero = 30. d = cosmo.comoving_distance( z ).value factor = 4. * np.pi * d**2. L_nu = factor * 10.**((magAB - m_zero)/(-2.5)) return L_nu
def Pos(self,freq=None,ra=None,dec=None): # freq ->> x axis; ra ->> y axis; dec ->> z axis self.shape=(freq.shape[0],ra.shape[0],dec.shape[0]) cosmo = FlatLambdaCDM(H0=self.h, Om0=self.Om0) redshift=1420./(freq/10**6)-1 dc = np.array(cosmo.comoving_distance(redshift))*self.h/100 #z: Mpc/h self.pos_z=dc[:,None,None]*np.sin(dec[None,None,:]/180.*np.pi)*np.ones(self.shape) self.pos_y=dc[:,None,None]*np.sin(ra[None,:,None]/180.*np.pi)*np.ones(self.shape) self.pos_x=dc[:,None,None]*np.ones(self.shape) DPM=np.c_[self.pos_x.reshape(-1),self.pos_y.reshape(-1),self.pos_z.reshape(-1)] bin_x=np.linspace(self.pos_x.min()-10**-5,self.pos_x.max()+10**-5,self.Nx+1) bin_y=np.linspace(self.pos_y.min()-10**-5,self.pos_y.max()+10**-5,self.Ny+1) bin_z=np.linspace(self.pos_z.min()-10**-5,self.pos_z.max()+10**-5,self.Nz+1) return DPM,bin_x,bin_y,bin_z
def test_fit_cosmology(): """Test fitting cosmology on simulated data.""" # Generate some fake data. cosmo = FlatLambdaCDM(H0=70., Om0=0.25) z = np.random.rand(200) mb = -19.3 + cosmo.distmod(z).value mberr = 0.2 * np.ones_like(z) # fit to fake data fitted_cosmo = fitting.fit_cosmology(z, mb, mberr) # check that fitted H0 value is same as input assert_allclose(cosmo.H0.value, fitted_cosmo.H0.value, rtol=1e-4)
def find_nearest_in_Mpc(sam, ref, cosmo=None): if cosmo is None: from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(70, 0.3) ang_diam_dists = cosmo.angular_diameter_distance(ref['z']) ref_idx = [] sep_Mpc = [] for ra, dec in sam['ra', 'dec']: seps = angsep(ref['ra'], ref['dec'], ra, dec, sepunits='radian') seps *= ang_diam_dists.value sep_Mpc.append(seps.min()) ref_idx.append(seps.argmin()) ref_idx = np.array(ref_idx) sep_Mpc = np.array(sep_Mpc) return ref_idx, sep_Mpc
def ag_mods(model,ti,tf,nui,nuf,j_ang,E_k,n,eps_b,eps_e,p,z,Gamma): """ Creates GRB afterglow light curves or spectra """ # define physical constants in cgs units c = 2.998e10 me = 9.109e-28 mp = 1.673e-24 e = 4.803e-10 sigma_t = 6.652e-25 # Calculate luminosity distance cosmo = FlatLambdaCDM(H0=70,Tcmb0=2.725,Om0=0.3) d_l = (cosmo.luminosity_distance(z).value*3.086e+24) t_dec,B,P_max,R,Ne = bw_props(Gamma,E_k,n,eps_b,p,mp,me,c,e,sigma_t) if model == 'SPECTRUM': freq = np.logspace(np.log10(nui),np.log10(nuf),1000) ymod = np.zeros(len(freq)) for i in range(len(freq)): ymod[i] = model_flux(t_dec,B,P_max,R,Ne,d_l,z,mp,me,e,c,sigma_t, ti,freq[i],Gamma,E_k,n,eps_b,eps_e,p,j_ang) plt.figure() plt.plot(freq,ymod,color='black',marker=' ') plt.xlabel('$\\nu$ (Hz)') plt.ylabel('Flux Density (mJy)') plt.xscale('log') plt.yscale('log') #plt.savefig('spectrum_mod.png') plt.show() if model == 'LC': times = np.logspace(np.log10(ti),np.log10(tf),1000) ymod = np.zeros(len(times)) for i in range(len(times)): ymod[i] = model_flux(t_dec,B,P_max,R,Ne,d_l,z,mp,me,e,c,sigma_t, times[i],nui,Gamma,E_k,n,eps_b,eps_e,p,j_ang) plt.figure() plt.plot(times,ymod,color='black',marker=' ') plt.xlabel('Time since trigger (s)') plt.ylabel('Flux Density (mJy)') plt.xscale('log') plt.yscale('log') #plt.savefig('lightcurve_mod.png') plt.show() return 0
def __init__(self, N_sn, seed, pop2=False, asifIa = False): super(Data, self).__init__() self.N_sn = N_sn self.seed = seed self.omega_M=0.28 self.cosmo=FlatLambdaCDM(70,self.omega_M) self.zmin=0.1 self.zmax=1.4 self.sigma_snIa=0.1 self.sigma_nonIa=1. self.sigma_nonIa_2=0.25 self.alpha_snIa=2. self.alpha_nonIa=self.alpha_snIa*10**(-2./2.5) self.alpha_nonIa_2=self.alpha_snIa*10**(-0.5/2.5) self.frac_Ia_0=.95 self.frac_Ia_1=.2 self.asifIa = asifIa if pop2: self.frac_nonIa_0=1. self.frac_nonIa_1=0.2 else: self.frac_nonIa_0=1. self.frac_nonIa_1=1. numpy.random.seed(seed) self.initialize_()
def __init__(self, configfile): self.filepath = os.path.abspath(configfile) self.configs = yaml.load(open(configfile)) logger.info("Loaded configurations from file: %s" % configfile) for item in [("H0", 71.0), ("Om0", 0.27), ("clobber", False), ("dtype", "float32"), ("unit", "K"), "zmin", "zmax", "dz", "Lside", "Nside", "infiles_pattern", "outfile"]: if isinstance(item, tuple): option, default = item setattr(self, option, self.configs.get(option, default)) else: setattr(self, item, self.configs[item]) logger.info("Set configurations") self.cosmo = FlatLambdaCDM(H0=self.H0, Om0=self.Om0)
def LC(z, obsfilter, time_resolution = 0.1, absmag_V = -19.3, magsystem = 'ab', modelphase = 0, template = 'salt2'): cosmo = FlatLambdaCDM(H0=69.6, Om0=0.286) #z = 0.05 #obsfiler = = 'besselb' model = sncosmo.Model(source=template) model.set(z=0) magdiff = model.bandmag('bessellv','vega',[0])-absmag_V templatescale = 10**(0.4*magdiff) epochs = np.linspace(model.mintime(),model.maxtime(),(model.maxtime()-model.mintime())/time_resolution) model.set(x0=templatescale,z=z) absmag = model.bandmag(obsfilter,magsystem,epochs) DM = cosmo.distmod(z) obsmag = absmag+DM.value return Table([epochs,obsmag],names=('phase', 'mag'))
def __init__(self): ombh2 = 0.022 omb0 = ombh2/param_dict['h']**2. #param_dict['omega_m'] = 0.3089 from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0 = param_dict['h']*100., Om0 = param_dict['omega_m'], Ob0 = omb0) self.rho_bar = cosmo.critical_density(0.).to('M_sun/Mpc3').value * cosmo.Om0 self.kmin = 1e-4 self.kmax = 40. self.h = cosmo.h self.nk = 1000 self.ombh2 = 0.022#cosmo.Ob0 * cosmo.h**2. self.omch2 = cosmo.Om0 * cosmo.h**2. self.ns = 0.965 pars = camb.CAMBparams() pars.set_cosmology(H0=self.h * 100, ombh2=self.ombh2, omch2=self.omch2) pars.set_dark_energy() #re-set defaults pars.InitPower.set_params(ns=self.ns) self.cambmatterpower = camb.get_matter_power_interpolator(pars, hubble_units = 0, kmax = self.kmax, k_hunit = 0, nonlinear = 0)
def get_sdss_sample(): from scipy import interpolate filepath = cu.get_output_path() + 'processed_data/NYU_VAGC/' galaxy_catalogue = 'nyu_lss_mpa_vagc_dr7' f = h5py.File(filepath+galaxy_catalogue+'.hdf5', 'r') GC = f.get(galaxy_catalogue) #halo catalogue GC = np.array(GC) #for name in GC.dtype.names: print(name) #trim the catalogue a bit zmin = 0.01 zmax = 0.2 selection = (GC['M']<=17.6) & (GC['Z']<zmax) & (GC['Z']>zmin) &\ (GC['ZTYPE']==1) & (GC['FGOTMAIN']>0) GC = GC[selection] sm_key = 'sm_MEDIAN' GC[sm_key] = GC[sm_key]+np.log10(0.7**2.0) #make cuts on data to get a clean and complete sample cosmo = FlatLambdaCDM(H0=100, Om0=0.3) z = np.linspace(0.001,1,1000) dL = cosmo.luminosity_distance(z).value #make cheater fit to dl(z) function dL_z = interpolate.interp1d(z,dL,kind='linear') Mstar_lim = (4.852 + 2.246*np.log10(dL) + 1.123*np.log10(1+z) - 1.186*z)/(1.0-0.067*z) #dL = cosmo.luminosity_distance(GC['Z']).value dL = dL_z(GC['Z']) z = GC['Z'] LHS = (4.852 + 2.246*np.log10(dL) + 1.123*np.log10(1.0+z) - 1.186*z)/(1.0-0.067*z) keep = (GC[sm_key]>LHS) GC = GC[keep] return GC
def convert_Fline2Lbol(lineflux,linefluxerr,redshift,verbose=True): """ Converting an observed integrated line flux [erg/s/cm2] to bolometric luminoisity [erg/s] --- EXAMPLE OF USE --- import NEOGALmodels as nm LbolLsun, LbolLsunerr = nm.convert_Fline2Lbol(2000,100,4.1) """ cosmo = FlatLambdaCDM(H0=70, Om0=0.3) if verbose: print ' - Estimating bolometric luminoisity for flat standard cosmology (H0=70, Om0=0.3, OL0=0.7)' DL = cosmo.luminosity_distance(redshift).value # DLplus = cosmo.luminosity_distance(redshift+redshifterr).value # DLminus = cosmo.luminosity_distance(redshift-redshifterr).value Mpc2cm = 3.086 # 10**24 cm/Mpc Asphere = 4*np.pi*(DL*Mpc2cm)**2 # 10**48 cm2 Lbol = lineflux*Asphere # 10**28 erg/s ; assuming line fluxes are in 10**-20 erg/s/cm2 Lbolerr = linefluxerr*Asphere # 10**28 erg/s ; assuming line fluxes are in 10**-20 erg/s/cm2 LbolLsun = Lbol/3.826*10**-5 # in units of Lbol_sun = 3.826*10**33 erg/s LbolLsunerr = Lbolerr/3.826*10**-5 # in units of Lbol_sun = 3.826*10**33 erg/s if verbose: print ' - Retunring luminoisity in units of Lbol_sun = 3.826e33 erg/s' if verbose: print ' - Result is: '+str(LbolLsun)+' +/- '+str(LbolLsunerr)+' [3.826e33 erg/s]' return LbolLsun, LbolLsunerr
def predict_colour_for_loop(tq, tau, z): cosmo = FlatLambdaCDM(H0 = 71.0, Om0 = 0.26) age = cosmo.age(z) print age #time = N.arange(0, 0.01, 0.003) #t = N.arange(0, 13.7, 0.01) #time = N.append(time, t[1:]) time = N.arange(28.0)/2.0 dir ='/Users/becky/Projects/Green-Valley-Project/bc03/models/Padova1994/chabrier/ASCII/' model = 'extracted_bc2003_lr_m62_chab_ssp.ised_ASCII' data = N.loadtxt(dir+model) sfr = N.zeros(len(time)*len(tq)*len(tau)).reshape(len(time), len(tq), len(tau)) nuv_u = N.zeros_like(sfr) u_r = N.zeros_like(sfr) nuv_u_age = N.zeros(len(age)*len(tq)*len(tau)).reshape(len(age),len(tq), len(tau)) u_r_age = N.zeros_like(nuv_u_age) for m in range(len(tq)): for n in range(len(tau)): sfr[:,m,n] = expsfh(tau[n], tq[m], time) total_flux = assign_fluxes.assign_total_flux(data[0,1:], data[1:,0], data[1:,1:], time*1E9, sfr[:,m,n]) nuv_u[:,m,n], u_r[:,m,n] = get_colours(total_flux, data) nuv_u_age[:,m,n] = N.interp(age, time, nuv_u[:,m,n]) u_r_age[:,m,n] = N.interp(age, time, u_r[:,m,n]) return nuv_u_age, u_r_age
def __init__(self, box_size=50, ra_min=0, ra_max=np.pi / 2, dec_min=0, dec_max=np.pi / 2, min_z=0, max_z=1, seed=None, hubble=71.0, omega_m=0.27, omega_d=0.73, unique=False, subcone_id=0): box_size = box_size self.box_size = box_size self.ra_min = ra_min * units.rad self.ra_max = ra_max * units.rad self.dec_min = dec_min * units.rad self.dec_max = dec_max * units.rad self.min_z = min_z self.max_z = max_z if seed is None: seed = random.randint(0, 1000000) self.seed = seed self.unique = unique self.cosmology = FlatLambdaCDM(hubble, omega_m, omega_d) self.subcone_id = subcone_id
def __init__(self, omega_m, omega_l, h, minz, maxz, area=None, boxsize=None, one_metric_group=False, parallel=False, ministry_name=None): """ Initialize a ministry object Arguments --------- omega_m : float Matter density parameter now omega_l : float Lambda density parameter now h : float Dimensionless hubble constant minz : float Minimum redshift maxz : float Maximum redshift area : float, optional The area spanned by all catalogs held """ self.ministry_name = ministry_name self.omega_m = omega_m self.omega_l = omega_l self.h = h self.cosmo = FlatLambdaCDM(H0=100*h, Om0=omega_m) self.minz = minz self.maxz = maxz self.one_metric_group = one_metric_group self.parallel = parallel self.galaxycatalog = None self.halocatalog = None self.ministry_name = ministry_name if area is None: self.area = 0.0 else: self.area = area if minz!=maxz: self.lightcone = True else: self.lightcone = False self.boxsize = boxsize self.volume = self.calculate_volume(self.area,self.minz,self.maxz)
cosmo = co.Planck13 import astropy.units as uu from hmf import MassFunction import matplotlib matplotlib.use('pdf') matplotlib.rcParams['font.size']=12 import matplotlib.pyplot as p from scipy.interpolate import interp1d from scipy.misc import derivative from astropy.cosmology import FlatLambdaCDM import astropy.units as u cosmo = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206) omega = lambda zz: cosmo.Om0*(1+zz)**3. / cosmo.efunc(zz)**2 DeltaVir_bn98 = lambda zz : (18.*n.pi**2. + 82.*(omega(zz)-1)- 39.*(omega(zz)-1)**2.)/omega(zz) #Quantity studied qty = "mvir" # working directory dir = join(os.environ['MULTIDARK_LIGHTCONE_DIR'], qty) # loads summary file data = fits.open( join(dir, "MD_"+qty+"_summary.fits"))[1].data NminCount = 100 Npmin = 300 nolim = [0,1e17] limits_04 = n.log10([Npmin*9.63 * 10**7, 5e12])
def pah_6_2(objid,aorkey,detlvl,spectrum,scale,z,filepath): #rest frame wavelenth spectrum = spectrum.dropna() wave = spectrum.wavelength * (1/(z+1.)) shifted_1 = spectrum.flux_jy[spectrum.wavelength >= 14.06935] shifted_2 = spectrum.flux_jy[spectrum.wavelength < 14.06935] * scale flux = shifted_2.append(shifted_1) * (1/(z+1.)) flux_err = spectrum.flux_jy_err * (1/(z+1.)) wave_dup = wave[wave.duplicated(keep=False)] flux_dup = flux[wave.duplicated(keep=False)] flux_dup_err = flux_err[wave.duplicated(keep=False)] if len(wave[wave.duplicated(keep=False)]) > 0.: #cut the first bad ones #wave_cut[wave_cut.dupyerlicated(keep='first')] i=0 while len(wave[wave.duplicated(keep=False)]) > 0.: new_flux = ((flux_dup.iloc[i]/flux_dup_err.iloc[i]**2) + (flux_dup.iloc[i+1]/flux_dup_err.iloc[i+1]**2))/((1/flux_dup_err.iloc[i]**2) + (1/flux_dup_err.iloc[i+1]**2)) #error of weighted mean new_flux_err = np.sqrt(1/ ((1/flux_dup_err.iloc[i]**2) + (1/flux_dup_err.iloc[i+1]**2))) wave_loc_keep = wave_dup.index[i] wave_loc_drop = wave_dup.index[i+1] #drop first wave = wave.drop(wave_loc_drop) flux = flux.drop(wave_loc_drop) flux_err = flux_err.drop(wave_loc_drop) # now keep weighted mean flux flux[wave_loc_keep] = new_flux flux_err[wave_loc_keep] = new_flux_err i=i+2 cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) dl_mpc = cosmo.luminosity_distance(z) #dl in Mpc gammar_r = 0.030 line = 6.22 temp_wave = np.arange(5.5,7.0,0.01) # determine a fitting window on either side of function #cutout a 3sigma region on either side of feature sub_wave = wave[(wave >= 5.95) & (wave <= 6.55)] sub_flux = flux[(wave >= 5.95) & (wave <= 6.55)] sub_flux_err = flux_err[(wave >= 5.95) & (wave <= 6.55)] if len(sub_wave) > 3: #fit with underlying continuum one = sub_wave*0 + 1 #some centroid noise delta=pd.Series(data=np.arange(-.1,.1,.005)) rms = pd.Series(data=np.zeros(len(delta))) for k in range(len(delta)): center = line + delta[k] drude = gammar_r**2 /((((sub_wave/center) - (center/sub_wave))**2) + gammar_r**2) A = [(a,b,c) for a,b,c in zip(one,sub_wave,drude)] #can't use sklearn BECAUSE THEY DONT GIVE COVARIANCE MATRIX WTH #LR = LinearRegression(fit_intercept=True) #LR.fit(A,sub_flux) #soln_coeffs = LR.coef_ #model = soln_coeffs[0] + soln_coeffs[1]*two + soln_coeffs[2] * gauss ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave + ols_result.params.x2 * drude rms[k] = np.sqrt(np.sum(((sub_flux-model)**2)/sub_flux_err**2)) min_delta = delta[rms == rms.min()].values[0] center = line + min_delta drude = gammar_r**2 /((((sub_wave/center) - (center/sub_wave))**2) + gammar_r**2) #A = [(b,c) for b,c in zip(sub_wave,drude)] A = [(a,b,c) for a,b,c in zip(one,sub_wave,drude)] ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave + ols_result.params.x2 * drude temp_model = ols_result.params.const + ols_result.params.x1*temp_wave + ols_result.params.x2 * gammar_r**2 /((((temp_wave/center) - (center/temp_wave))**2) + gammar_r**2) model_cont = ols_result.params.const + ols_result.params.x1 * sub_wave #line luminosity in 1e42 ergs/sec dl = dl_mpc.value * 3.086e24 #Mpc to cm if z == 0: dl = 1. line_ew = np.pi*.5 *(ols_result.params.x2) * gammar_r * line /(ols_result.params.const + ols_result.params.x1 * line) #line_ew_err1 = ((ols_result.params.x2)/(ols_result.params.const + ols_result.params.x1 * line)) * np.sqrt(abs(((ols_result.bse.x2))/ols_result.params.x2) + abs((flux_err[pah]/wave[pah]**2)/use_c_no_ice)) a = ols_result.params.x2 b = ols_result.params.const c = ols_result.params.x1 a_err = ols_result.bse.x2 b_err = ols_result.bse.const c_err = ols_result.bse.x1 a_err_sq = ols_result.bse.x2**2 b_err_sq = ols_result.bse.const**2 c_err_sq = ols_result.bse.x1**2 c1 = (np.pi/2.)* (gammar_r*line) c2 = line partial_a_sq = (c1 * (b+(c2*c))**-1)**2 partial_b_sq = (c1 * a * (b+(c2*c))**-2)**2 partial_c_sq = (c1 * c2 *a * (b+(c2*c))**-2)**2 line_ew_err = np.sqrt((a_err_sq*partial_a_sq) + (b_err_sq*partial_b_sq) + (c_err_sq*partial_c_sq)) ''' ew_err = np.pi*.5 *(a) * gammar_r * line /(b + c * line) line_ew_err = unumpy.std_devs(ew_err) ''' line_lum = dl**2*(1/(1+z))* 4*np.pi* (a)* (2.99e14)*gammar_r/((line))* 1e-23* np.pi/2 line_lum_err = dl**2*(1/(1+z))* 4*np.pi* (ols_result.bse.x2)* (2.99e14)*gammar_r/((line))* 1e-23* np.pi/2 snr = ols_result.params.x2 / ols_result.bse.x2 '''HEY LOOK HERE OVERWRITING LINE LUM TO LINE FLUX BECAUSE IM LAZY FOR TEST''' line_flux = (1/(1+z))* (ols_result.params.x2)* (2.99e14)*gammar_r/((line))*(1e-23)* np.pi/2 #W m-2 line_flux_err = (1/(1+z))* (ols_result.bse.x2)* (2.99e14)*gammar_r/((line))*(1e-23)* np.pi/2# W m-2 #line_lum = line_flux #line_lum_err = line_flux_err ''' #DIRECT INTEGRATION #ice x2 = wave[(wave > 5.9) & (wave < 6.0)] y2 = flux[(wave > 5.9) & (wave < 6.0)] if len(x2) > 2: x2 = (x2[y2==min(y2)]).values[0] y2 = (y2[y2==min(y2)]).values[0] x3 = wave[(wave > 6.4) & (wave < 6.56)] y3 = flux[(wave > 6.4) & (wave < 6.56)] x3 = (x3[y3==min(y3)]).values[0] y3 = (y3[y3==min(y3)]).values[0] spx=[x2,x3] spy=[y2,y3] points = zip(spx, spy) # Sort list of tuples by x-value points = sorted(points, key=lambda point: point[0]) # Split list of tuples into two list of x values any y values spx, spy = zip(*points) mx = wave[(wave >= x2) & (wave <= x3)] my = flux[(wave >= x2) & (wave <= x3)] #continuum flux flux_lin = sp.interpolate.interp1d(spx, spy, kind='linear')(mx) df_flxspl=pd.DataFrame() df_flxspl['mx'] = mx df_flxspl['flux_lin'] = flux_lin pah = (wave >= x2) & (wave <= x3) use_flux = (flux[pah] - df_flxspl.flux_lin)/wave[pah]**2 wave_no_ice = mx[(mx < 6.56) & (mx > 5.9)] use_c_no_ice =df_flxspl.flux_lin[(mx < 6.56) & (mx > 5.9)] EW_ice=max(sp.integrate.cumtrapz(wave_no_ice,use_flux/use_c_no_ice)) err_1 = (use_flux/use_c_no_ice) * np.sqrt(abs((flux_err[pah]/wave[pah]**2)/use_flux) + abs((flux_err[pah]/wave[pah]**2)/use_c_no_ice)) pah_err_ice =(wave[1]-wave[0])*np.sqrt(err_1**2).sum() print "EW_ice: " + str(EW_ice) print "err: " + str(pah_err_ice) # no ice 5.5 to 6.8 x2 = wave[(wave > 5.5) & (wave < 6.0)] y2 = flux[(wave > 5.5) & (wave < 6.0)] if len(x2) > 2: x2 = (x2[y2==min(y2)]).values[0] y2 = (y2[y2==min(y2)]).values[0] x3 = wave[(wave > 6.4) & (wave < 6.8)] y3 = flux[(wave > 6.4) & (wave < 6.8)] x3 = (x3[y3==min(y3)]).values[0] y3 = (y3[y3==min(y3)]).values[0] spx=[x2,x3] spy=[y2,y3] points = zip(spx, spy) # Sort list of tuples by x-value points = sorted(points, key=lambda point: point[0]) # Split list of tuples into two list of x values any y values spx, spy = zip(*points) mx = wave[(wave >= x2) & (wave <= x3)] my = flux[(wave >= x2) & (wave <= x3)] #continuum flux flux_lin = sp.interpolate.interp1d(spx, spy, kind='linear')(mx) df_flxspl=pd.DataFrame() df_flxspl['mx'] = mx df_flxspl['flux_lin'] = flux_lin pah = (wave >= x2) & (wave <= x3) use_flux = (flux[pah] - df_flxspl.flux_lin)/wave[pah]**2 wave_no_ice = mx[(mx <= x3) & (mx >= x2)] use_c_no_ice =df_flxspl.flux_lin[(mx <= x3) & (mx >= x2)] EW_ice_wide=max(sp.integrate.cumtrapz(wave_no_ice,use_flux/use_c_no_ice)) err_1 = (use_flux/use_c_no_ice) * np.sqrt(abs((flux_err[pah]/wave[pah]**2)/use_flux) + abs((flux_err[pah]/wave[pah]**2)/use_c_no_ice)) pah_err_ice_wide =(wave[1]-wave[0])*np.sqrt(err_1**2).sum() print "EW_ice_wide: " + str(EW_ice_wide) print "err: " + str(pah_err_ice_wide) else: err_1 = np.nan EW_ice = np.nan pah_err_ice = np.nan ''' fig = plt.figure() #plt.ion() ax = fig.add_subplot(111) plt.title(" Aorkey: "+str(aorkey) + ' Line: 6.2) plt.suptitle("Detlvl: " + str(detlvl)) ax.plot(sub_wave,sub_flux,'.') ax.errorbar(sub_wave,sub_flux,yerr=sub_flux_err,fmt=None) ax.plot(sub_wave,model) ax.plot(sub_wave,model_cont) ax.annotate("Line Lum 10^42 ergs/s: " + str(round(line_lum/1e42,3))+ " err: "+ str(round(line_lum_err/1e42,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.8),xy=(.1,.8)) ax.annotate("Mean Squared Error: "+str(round(ols_result.mse_model,6)), xytext=(.1,.6),xy=(.1,.6)) ax.annotate("R^2: " + str(round(ols_result.rsquared,3)),xytext=(.1,.4),xy=(.1,.4)) plt.savefig(filepath+'/'+aorkey+'_'+str(line)+'.png') plt.close() ''' fig = plt.figure() #plt.ion() ax = fig.add_subplot(111) plt.title(" Aorkey: "+str(aorkey) + ' Line' + str(line)) plt.suptitle("Detlvl: " + str(detlvl)) ax.plot(sub_wave,sub_flux,'.',label='Points fit via Drude') ax.plot(mx,my,'.',label ='Points used via Direct Int') ax.errorbar(sub_wave,sub_flux,yerr=sub_flux_err,fmt=None) ax.plot(sub_wave,model,label='Drude model') ax.plot(sub_wave,model_cont,label='No Ice Continuum') ax.plot(mx,df_flxspl.flux_lin,label='Ice Continuum') ax.plot(spx,spy,'o',label ='Ice Anchors') ax.annotate("Continuum Treatment Chosen: " + cont_flag,xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.8),xy=(.1,.8)) plt.legend() fig.set_size_inches(18.5, 10.5) fig.savefig(filepath+'/'+aorkey+'_'+str(line)+'.png') plt.close() ''' else: line_ew = np.nan line_ew_err = np.nan line_lum=np.nan line_lum_err = np.nan snr = np.nan line_flux = np.nan line_flux_err = np.nan return line_lum,line_lum_err,line_ew,line_ew_err,line_flux,line_flux_err
class Source(object): def __init__(self, Zs): self.Zs = Zs self.compute_distances() self.setup_grid(NX=100,NY=100,pixscale=0.1) return # ---------------------------------------------------------------------- def compute_distances(self): self.cosmological = FlatLambdaCDM(H0=71.0, Om0=0.2669) self.Ds = self.cosmological.angular_diameter_distance(self.Zs) # ---------------------------------------------------------------------- def read_source_from(self, fitsfile): ''' Read an image from a fitsfile, and setup its grid. Here we need to properly read in the wcs coordinate information so that the grid is spaced correctly. This means we have to extract the pixel scale from the FITS header. ''' if fitsfile is None: raise Exception("You need to provide an image.\n") hdulist = fits.open(fitsfile) self.hdr = hdulist[0].header self.intensity = hdulist[0].data hdulist.close() if self.hdr['NAXIS'] == 2: if self.intensity.shape == (self.hdr['NAXIS1'],self.hdr['NAXIS2']): self.NX,self.NY = self.intensity.shape elif self.intensity.shape ==(self.hdr['NAXIS2'],self.hdr['NAXIS1']): self.NY,self.NX = self.intensity.shape else: raise Exception("Your image is formatted incorrectly.\n") else: assert len(self.intensity.shape) == 3 if self.intensity.shape == (self.hdr['NAXIS'],self.hdr['NAXIS1'],self.hdr['NAXIS2']): self.Naxes,self.NX,self.NY = self.intensity.shape elif self.intensity.shape ==(self.hdr['NAXIS'],self.hdr['NAXIS2'],self.hdr['NAXIS1']): self.Naxes,self.NY,self.NX = self.intensity.shape else: raise Exception("Your image is formatted incorrectly.\n") self.set_pixscale() # Set up a new pixel grid to go with this new kappa map: self.setup_grid() return # ---------------------------------------------------------------------- def setup_grid(self, NX=None, NY=None, pixscale=None): ''' Make two arrays, x and y, that define the extent of the maps - pixscale is the size of a pixel, in arcsec. - ''' if NX is not None: self.NX = NX if NY is not None: self.NY = NY if pixscale is not None: self.pixscale = pixscale xgrid = np.arange(-self.NX/2.0,(self.NX)/2.0,1.0)*self.pixscale+self.pixscale ygrid = np.arange(-self.NY/2.0,(self.NY)/2.0,1.0)*self.pixscale+self.pixscale self.beta_x, self.beta_y = np.meshgrid(xgrid,ygrid) return # ---------------------------------------------------------------------- def set_pixscale(self): # Modern FITS files: if 'CD1_1' in self.hdr.keys(): determinant = self.hdr['CD1_1']*self.hdr['CD2_2'] \ - self.hdr['CD1_2']*self.hdr['CD2_1'] self.pixscale = 3600.0*np.sqrt(np.abs(determinant)) # Older FITS files: elif 'CDELT1' in self.hdr.keys(): self.pixscale = 3600.0*np.sqrt(np.abs(self.hdr['CDELT1']*self.hdr['CDELT2'])) # Simple FITS files with no WCS information (bad): else: self.pixscale = 1.0 return # ---------------------------------------------------------------------- def build_from_clumps(self,size=2.0,clump_size = 0.1,axis_ratio=1.0, orientation=0.0,center=[0,0], Nclumps=50, n = 1 , error =10**-8,singlesource=False,seeds=[1,2,3],Flux=1.0): #raise Exception("cannot build source from clumps yet. \n") ''' Build source from gaussian clumps centered about specified position. Accepted parameters are as follows: - Size of the source, in kpc (approximately the half light radius) - Size of individual clumps. RMS determines spread in clump size. - Axis ratio, or the ratio of the major axis to the minor axis. - Orientation angle of the source measured in radians from the positive x-axis. - Position of the source center (in arcsec). - The number of clumps making up the source. - The average brightness of each clump. - The standard deviation of the brightness for individual clumps. The clump brightness follows a lognormal distribution - Sersic index n ''' self.axis_ratio = axis_ratio self.orientation = orientation self.Nclumps = Nclumps self.center = center self.n = n self.Flux=Flux #compute rms radius in arcsec self.size = np.arctan((size *units.kpc) / self.Ds ).to(units.arcsec).value self.clump_size = np.arctan((clump_size*units.kpc)/self.Ds).to(units.arcsec).value #pick random positions inside of 4r_eff # rlist = np.sqrt(np.random.random(Nclumps))*4.0*self.size # thetalist = np.random.random(Nclumps)*2*np.pi if singlesource ==False: # seed random # generation np.random.seed(seeds[0]) xpos_orig = np.random.exponential(self.size,Nclumps)/np.sqrt(self.axis_ratio)*np.random.choice([-1,1],Nclumps) np.random.seed(seeds[1]) ypos_orig = np.random.exponential(self.size,Nclumps)*np.sqrt(self.axis_ratio)*np.random.choice([-1,1],Nclumps) np.random.seed(seeds[2]) self.xlist = xpos_orig*np.cos(self.orientation)-ypos_orig*np.sin(self.orientation) + self.center[0] self.ylist = xpos_orig*np.sin(self.orientation)+ypos_orig*np.cos(self.orientation) +self.center[1] else: self.xlist = np.array([center[0],100000000]) self.ylist = np.array([center[1],100000000]) #determine constant b_n which allows us to use r as half light radius def fx(n,bn): return(2*sp.gammainc(2*n,bn)-1) #initial guess follows approximation from Capaccioli 1989 x0 = 1.9992*n - 0.3271 x1 = x0 + 0.01 j = 0 epsilon = abs(x1-x0) while epsilon > error and j<100000: fx0 = fx(n,x0) fx1 = fx(n,x1) x0,x1 = x1 , x1 - fx1*(x1-x0) / (fx1-fx0) epsilon = abs(x1-x0) j+=1 if j ==100000: #solution didn't converge, lets just approximate it. self.b_n = 1.9992*n-0.3271 else: self.b_n = x1 #self.Blist = np.exp(-self.b_n*((np.sqrt((np.cos(self.orientation)*(self.xlist-self.center[0])-np.sin(self.orientation)*(self.ylist-self.center[1]))**2*self.axis_ratio+((self.xlist-self.center[0])*np.sin(self.orientation)+(self.ylist-self.center[1])*np.cos(self.orientation))**2/self.axis_ratio)/self.size)**(1/self.n)-1)) np.random.seed(seeds[2]) self.Slist = np.random.exponential(self.clump_size,self.Nclumps) for i in range(self.Nclumps): if i==0: self.intensity = (1.0/np.sqrt(2*np.pi))*np.exp(-0.5*((self.beta_x-self.xlist[i])**2+(self.beta_y-self.ylist[i])**2)/self.Slist[i]**2) else: self.intensity +=(1.0/np.sqrt(2*np.pi))*np.exp(-0.5*((self.beta_x-self.xlist[i])**2+(self.beta_y-self.ylist[i])**2)/self.Slist[i]**2) self.intensity *= np.exp(-self.b_n*((np.sqrt((np.cos(self.orientation)*(self.beta_x-self.center[0])+np.sin(self.orientation)*(self.beta_y-self.center[1]))**2*self.axis_ratio+(-(self.beta_x-self.center[0])*np.sin(self.orientation)+(self.beta_y-self.center[1])*np.cos(self.orientation))**2/self.axis_ratio)/self.size)**(1/self.n)-1)) # Normalize flux of all sources to input total flux self.intensity *= self.Flux/(np.sum(self.intensity)*self.pixscale**2) return # ---------------------------------------------------------------------- def build_sersic_clumps(self,Nnuclei=1,NclumpsPerNucleus=1,\ x0=0,y0=0,q=1.,phi=0.,r_hl=0.1,n=1., seed1 = 0): ''' Build a source that (generally) follows a sersic profile, but with clumps that are broken into nuclei, allowing for an extra level of structure compared to a simple analytic source. Takes: - Nnuclei: Number of nuclei - NclumpsPerNucleus: Number of clumps per nucleus - x0,y0,q,phi,r_hl,n: parameters of the sersic profile - seed1-4: random seeds Returns: - void: Updates self.intensity ''' bn = evil.Compute_bn(n) # create nuclei np.random.seed(seed1) xn,yn = self.draw_clump_nuclei_positions(Nnuclei,x0,y0,q,r_hl,phi,n) sn = self.draw_clump_nuclei_sizes(Nnuclei,r_hl) for i in range(len(sn)): if i ==0: xc,yc = self.draw_clump_positions(NclumpsPerNucleus,sn[i],xn[i],yn[i]) sc = self.draw_clump_sizes_powerlaw(NclumpsPerNucleus,0.05*sn[i],sn[i]) else: xctemp,yctemp = self.draw_clump_positions(NclumpsPerNucleus,sn[i],xn[i],yn[i]) xc = np.append(xc,xctemp) yc = np.append(yc,yctemp) sc = np.append(sc, self.draw_clump_sizes_powerlaw(NclumpsPerNucleus,0.01*sn[i],sn[i])) # add clumps to image for i in range(len(sc)): if i ==0: self.intensity = np.exp(-0.5*((self.beta_x-xc[i])**2+(self.beta_y-yc[i])**2)/sc[i]**2) else: self.intensity += np.exp(-0.5*((self.beta_x-xc[i])**2+(self.beta_y-yc[i])**2)/sc[i]**2) return def draw_clump_sizes_powerlaw(self,Nclumps,min_size,max_size,index=-1): ''' draw a list of clump radii (in arcsec) from a power-law distribution with a specified index, and between a minimum and maximum size. Takes: - Nclumps: The number of clumps to draw sizes for - min_size: The minimum size of clumps - max_size: The maximum size of clumps - index: The power-law index - seed: An integer specifying the random state Returns: - sizes: A list of sizes drawn randomly from the power-law distribution ''' # setup interpolation function that will be used for generator x = 10**np.linspace(np.log10(min_size),np.log10(max_size),100000) y = x**index y -= np.min(y) y /= np.max(y) finterp = interp1d(y,x,'linear') draws = np.random.random(Nclumps) sizes = finterp(draws) return sizes def draw_clump_nuclei_positions(self,Nnuclei,x0,y0,q,r_hl,phi,n): ''' Draw a list of x and y coordinates for source nuclei from a sersic distribution. Takes: Nnuclei: Number of nuclei (clump superstructures) x0,y0: center of the source q,phi: the axis ratio and rotation angle of the source r_hl: The half-light radius of the source n: Sersic index seed: a seed to control the random generator Returns: x,y: Randomly drawn coordinates of nuclei''' # first generate a sersic profile r = np.linspace(0,5*r_hl,100000) bn = evil.Compute_bn(n) Ir = evil.Sersic(r*np.cos(0.),r*np.sin(0.),0.,0.,1.,r_hl,0.,n,bn) # Get CDF and normalize Prob = np.flipud(np.cumsum(np.flipud(Ir))) Prob /= np.max(Prob) # Interpolate random numbers to CDF to get sersic random numbers finterp = interp1d(Prob,r,'linear') draws = np.random.random(Nnuclei) radius = finterp(draws) # draw random angles angle = np.random.random(Nnuclei)*2*np.pi # transform radius and angle to x,y, position xp = radius*np.cos(angle)/q yp = radius*np.sin(angle)*q # rotate x = np.cos(phi)*(xp)+np.sin(phi)*(yp) y = -np.sin(phi)*(xp)+np.cos(phi)*(yp) # adjust center x += x0 y += y0 return x,y def draw_clump_nuclei_sizes(self,Nnuclei,src_size): ''' Arbitrarily defined nuclei size are taken to be inversely proportional to source size. randomly generate these sizes. ''' mu = 2*src_size/np.sqrt(float(Nnuclei)) sigma = 0.2*src_size/np.sqrt(float(Nnuclei)) return np.random.normal(mu,sigma,Nnuclei) def draw_clump_positions(self,Nclumps,nuclei_size,x,y): ''' Draw gaussian random positions for clumps. ''' xc = np.random.normal(x,nuclei_size,Nclumps) yc = np.random.normal(y,nuclei_size,Nclumps) return xc,yc # ---------------------------------------------------------------------- def write_source_to(self,fitsfile,overwrite=False): ''' Write an image of the source to a fits file. ''' hdu = fits.PrimaryHDU(self.intensity) hdu.header['CDELT1'] = self.pixscale / 3600.0 hdu.header['CDELT2'] = self.pixscale / 3600.0 hdu.writeto(fitsfile, clobber=overwrite) return
def pah_8_5(objid,aorkey,detlvl,spectrum,scale,z,fittype,filepath): #rest frame wavelenth spectrum = spectrum.dropna() wave = spectrum.wavelength * (1/(z+1.)) shifted_1 = spectrum.flux_jy[spectrum.wavelength >= 14.06935] shifted_2 = spectrum.flux_jy[spectrum.wavelength < 14.06935] * scale flux = shifted_2.append(shifted_1) flux = spectrum.flux_jy flux_err = spectrum.flux_jy_err cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) dl_mpc = cosmo.luminosity_distance(z) #dl in Mpc gammar_r = 0.050 gammar_n = 0.039 fratio = 2.18 line = 8.33 line_n = 8.61 temp_wave = np.arange(7.8,9.5,0.01) # determine a fitting window on either side of function #cutout a 3sigma region on either side of feature sub_wave = wave[(wave >= 7.8) & (wave <= 9.5)] sub_flux = flux[(wave >= 7.8) & (wave <= 9.5)] sub_flux_err = flux_err[(wave >= 7.8) & (wave <= 9.5)] sub_wave2= (sub_wave-line)**2 sub_wave3 = (sub_wave - line)**3 if len(sub_wave) > 3: #fine scale x #fit with underlying continuum one = sub_wave*0 + 1.0 #some centroid noise delta=pd.Series(data=np.arange(-.03,.03,.005)) rms = pd.Series(data=np.zeros(len(delta))) for k in range(len(delta)): center = line + delta[k] center_n = line_n + delta[k] drude = (gammar_r**2 /((((sub_wave/center) - (center/sub_wave))**2) + gammar_r**2)) + (fratio * gammar_n**2 /((((sub_wave/center_n) - (center_n/sub_wave))**2) + gammar_n**2)) A = [(a,b,c,d,e) for a,b,c,d,e in zip(one,sub_wave,sub_wave2,sub_wave3,drude)] #can't use sklearn BECAUSE THEY DONT GIVE COVARIANCE MATRIX WTH #LR = LinearRegression(fit_intercept=True) #LR.fit(A,sub_flux) #soln_coeffs = LR.coef_ #model = soln_coeffs[0] + soln_coeffs[1]*two + soln_coeffs[2] * gauss ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave + ols_result.params.x2 * sub_wave2 + ols_result.params.x3 * sub_wave3 + ols_result.params.x4 * drude rms[k] = np.sqrt(np.sum(((sub_flux-model)**2)/sub_flux_err**2)) min_delta = delta[rms == rms.min()].values[0] center = line + min_delta center_n = line_n + min_delta drude = (gammar_r**2 /((((sub_wave/center) - (center/sub_wave))**2) + gammar_r**2)) + (fratio * gammar_n**2 /((((sub_wave/center_n) - (center_n/sub_wave))**2) + gammar_n**2)) A = [(a,b,c,d,e) for a,b,c,d,e in zip(one,sub_wave,sub_wave2,sub_wave3,drude)] ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave + ols_result.params.x2 * sub_wave2 + ols_result.params.x3 * sub_wave3 + ols_result.params.x4 * drude temp_model = ols_result.params.const + ols_result.params.x1 * temp_wave +ols_result.params.x2 * (temp_wave - line)**2 +ols_result.params.x3 * (temp_wave - line)**3 + ols_result.params.x4 *(gammar_r**2)/(((temp_wave/center) - (center/temp_wave))**2 + gammar_r**2)+ols_result.params.x4 *fratio*(gammar_n**2)/ (((temp_wave/center_n) - (center_n/temp_wave))**2 + gammar_n**2) temp_cont = ols_result.params.const + ols_result.params.x1 * temp_wave +ols_result.params.x2 * (temp_wave - line)**2 +ols_result.params.x3 * (temp_wave - line)**3 #line luminosity in 1e42 ergs/sec dl = dl_mpc.value * 3.086e24 #Mpc to cm line_lum = dl**2*(1/(1+z))* 4*np.pi* (ols_result.params.x4)* (2.99e14)*(fratio*gammar_n+ gammar_r)/((line))* 1e-23* np.pi/2 line_lum_err = dl**2*(1/(1+z))* 4*np.pi* (ols_result.bse.x4)* (2.99e14)*(fratio*gammar_n+ gammar_r)/((line))* 1e-23* np.pi/2 snr = ols_result.params.x4 / ols_result.bse.x4 fig = plt.figure() #plt.ion() ax = fig.add_subplot(111) plt.title(" Aorkey: "+str(aorkey) + ' PAH' + str(line)) plt.suptitle("Detlvl: " + str(detlvl)) ax.plot(sub_wave,sub_flux,'.') ax.errorbar(sub_wave,sub_flux,yerr=sub_flux_err,fmt=None) ax.plot(temp_wave,temp_model) ax.plot(temp_wave,temp_cont) ax.annotate("Line Lum 10^42 ergs/s: " + str(round(line_lum/1e42,3))+ " err: "+ str(round(line_lum_err/1e42,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.8),xy=(.1,.8)) ax.annotate("Mean Squared Error: "+str(round(ols_result.mse_model,6)),xycoords='axes fraction',textcoords='axes fraction', xytext=(.1,.6),xy=(.1,.6)) ax.annotate("R^2: " + str(round(ols_result.rsquared,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.4),xy=(.1,.4)) plt.savefig(filepath+'/'+aorkey+'_'+str(line)+'.png') plt.close() else: line_lum=np.nan line_lum_err = np.nan snr = np.nan return line_lum,line_lum_err,snr
def pah_11_3(objid,aorkey,detlvl,spectrum,scale,z,filepath): #rest frame wavelenth spectrum = spectrum.dropna() wave = spectrum.wavelength * (1/(z+1.)) shifted_1 = spectrum.flux_jy[spectrum.wavelength >= 14.06935] shifted_2 = spectrum.flux_jy[spectrum.wavelength < 14.06935] * scale flux = shifted_2.append(shifted_1) * (1/(z+1.)) flux_err = spectrum.flux_jy_err * (1/(z+1.)) wave_dup = wave[wave.duplicated(keep=False)] flux_dup = flux[wave.duplicated(keep=False)] flux_dup_err = flux_err[wave.duplicated(keep=False)] if len(wave[wave.duplicated(keep=False)]) > 0.: #cut the first bad ones #wave_cut[wave_cut.dupyerlicated(keep='first')] i=0 while len(wave[wave.duplicated(keep=False)]) > 0.: print i new_flux = ((flux_dup.iloc[i]/flux_dup_err.iloc[i]**2) + (flux_dup.iloc[i+1]/flux_dup_err.iloc[i+1]**2))/((1/flux_dup_err.iloc[i]**2) + (1/flux_dup_err.iloc[i+1]**2)) #error of weighted mean new_flux_err = np.sqrt(1/ ((1/flux_dup_err.iloc[i]**2) + (1/flux_dup_err.iloc[i+1]**2))) wave_loc_keep = wave_dup.index[i] wave_loc_drop = wave_dup.index[i+1] #drop first wave = wave.drop(wave_loc_drop) flux = flux.drop(wave_loc_drop) flux_err = flux_err.drop(wave_loc_drop) # now keep weighted mean flux flux[wave_loc_keep] = new_flux flux_err[wave_loc_keep] = new_flux_err i=i+2 cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) dl_mpc = cosmo.luminosity_distance(z) #dl in Mpc gammar_r = 0.032 gammar_n = 0.012 fratio = 1.25 line = 11.33 line_n = 11.23 temp_wave = np.arange(10.0,12.5,0.01) # determine a fitting window on either side of function #cutout a 3sigma region on either side of feature sub_wave = wave[((wave >= 10.0) & (wave <= 10.4)) | ((wave >= 10.6) & (wave <= 12.5))] sub_flux = flux[((wave >= 10.0) & (wave <= 10.4)) | ((wave >= 10.6) & (wave <= 12.5))] sub_flux_err = flux_err[((wave >= 10.0) & (wave <= 10.4)) | ((wave >= 10.6) & (wave <= 12.5))] sub_wave2= (sub_wave-line)**2 sub_wave3 = (sub_wave - line)**3 if len(sub_wave) > 3: #fine scale x #fit with underlying continuum one = sub_wave*0 + 1.0 #some centroid noise delta=pd.Series(data=np.arange(-.03,.03,.005)) rms = pd.Series(data=np.zeros(len(delta))) for k in range(len(delta)): center = line + delta[k] center_n = line_n + delta[k] drude = (gammar_r**2 /((((sub_wave/center) - (center/sub_wave))**2) + gammar_r**2)) + (fratio * gammar_n**2 /((((sub_wave/center_n) - (center_n/sub_wave))**2) + gammar_n**2)) A = [(a,b,c,d,e) for a,b,c,d,e in zip(one,sub_wave,sub_wave2,sub_wave3,drude)] #can't use sklearn BECAUSE THEY DONT GIVE COVARIANCE MATRIX WTH #LR = LinearRegression(fit_intercept=True) #LR.fit(A,sub_flux) #soln_coeffs = LR.coef_ #model = soln_coeffs[0] + soln_coeffs[1]*two + soln_coeffs[2] * gauss ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave + ols_result.params.x2 * sub_wave2 + ols_result.params.x3 * sub_wave3 + ols_result.params.x4 * drude rms[k] = np.sqrt(np.sum(((sub_flux-model)**2)/sub_flux_err**2)) min_delta = delta[rms == rms.min()].values[0] center = line + min_delta center_n = line_n + min_delta drude = (gammar_r**2 /((((sub_wave/center) - (center/sub_wave))**2) + gammar_r**2)) + (fratio * gammar_n**2 /((((sub_wave/center_n) - (center_n/sub_wave))**2) + gammar_n**2)) A = [(a,b,c,d,e) for a,b,c,d,e in zip(one,sub_wave,sub_wave2,sub_wave3,drude)] ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave + ols_result.params.x2 * sub_wave2 + ols_result.params.x3 * sub_wave3 + ols_result.params.x4 * drude temp_model = ols_result.params.const + ols_result.params.x1 * temp_wave +ols_result.params.x2 * (temp_wave - line)**2 +ols_result.params.x3 * (temp_wave - line)**3 + ols_result.params.x4 *(gammar_r**2)/(((temp_wave/center) - (center/temp_wave))**2 + gammar_r**2)+ols_result.params.x4 *fratio*(gammar_n**2)/ (((temp_wave/center_n) - (center_n/temp_wave))**2 + gammar_n**2) temp_cont = ols_result.params.const + ols_result.params.x1 * temp_wave +ols_result.params.x2 * (temp_wave - line)**2 +ols_result.params.x3 * (temp_wave - line)**3 #line luminosity in 1e42 ergs/sec import uncertainties.unumpy as unumpy dl = dl_mpc.value * 3.086e24 #Mpc to cm #line_ew_err1 = ((ols_result.params.x2)/(ols_result.params.const + ols_result.params.x1 * line)) * np.sqrt(abs(((ols_result.bse.x2))/ols_result.params.x2) + abs((flux_err[pah]/wave[pah]**2)/use_c_no_ice)) a = ols_result.params.x4 #drude b = ols_result.params.const #baseline c = ols_result.params.x1 #0th order coeff d = ols_result.params.x2 #2nd order coeff e = ols_result.params.x3 # 3rd order coeff a_err = ols_result.bse.x4 b_err = ols_result.bse.const c_err = ols_result.bse.x1 d_err = ols_result.bse.x2 e_err = ols_result.bse.x3 a = unumpy.uarray(( a,a_err )) b = unumpy.uarray(( b,b_err )) c = unumpy.uarray(( c,c_err )) d = unumpy.uarray(( d,d_err )) e = unumpy.uarray(( e,e_err )) line_ew = np.pi*.5 *(a) * (gammar_r*line) /(b + (c * line) + d*(line - line_n)**2 + e*(line - line_n)**3) line_ew_err = unumpy.std_devs(line_ew) line_ew = unumpy.nominal_values(a) ''' a_err_sq = ols_result.bse.x4**2 b_err_sq = ols_result.bse.const**2 c_err_sq = ols_result.bse.x1**2 d_err_sq = ols_result.bse.x2**2 e_err_sq = ols_result.bse.x3**2 c1 = (np.pi/2.) * (gammar_r*line + gammar_n*fratio*line_n) c2 = line c3 = line - line_n c4 = line - line_n c_parens = c1 * (b + c2*c + c3*d**2 + c4*e**3)**-1 c_parens_sq = c1 * (b + c2*c + c3*d**2 + c4*e**3)**-1 partial_a_sq = (c_parens)**2 partial_b_sq = (a * c_parens_sq)**2 partial_c_sq = (a*c2 * c_parens_sq)**2 partial_d_sq = (2*a*c3*d * c_parens_sq)**2 partial_e_sq = (a*c4*3*(e**2) * c_parens_sq)**2 ew_err = np.sqrt((a_err_sq*partial_a_sq) + (b_err_sq*partial_b_sq) + (c_err_sq*partial_c_sq) + (d_err_sq*partial_d_sq) + (e_err_sq*partial_e_sq)) ''' #ew_err = ((np.pi/2.)/(ols_result.params.const + line_1*ols_result.params.x1)**2)*((ols_result.bse.x2**2) + (ols_result.params.x2**2)*((ols_result.params.const)**2 + (line_1*ols_result.params.x1)**2)) if z == 0: dl = 1. line_lum = dl**2*(1/(1+z))* 4*np.pi* (a)* (2.99e14)*(gammar_r + gammar_n*fratio)/((line))* 1e-23* np.pi/2 line_lum_err = unumpy.std_devs(line_lum) line_lum = unumpy.nominal_values(line_lum) ''' line_lum = dl**2*(1/(1+z))* 4*np.pi* (ols_result.params.x4)* (2.99e14)*(gammar_r + gammar_n*fratio)/((line))* 1e-23* np.pi/2 line_lum_err = dl**2*(1/(1+z))* 4*np.pi* (ols_result.bse.x4)* (2.99e14)*(gammar_r + gammar_n*fratio)/((line))* 1e-23* np.pi/2 line_flux = (1/(1+z))* (ols_result.params.x2)* (2.99e14)*gammar_r/((line))*(1e-23)* np.pi/2 #W m-2 line_flux_err = (1/(1+z))* (ols_result.bse.x2)* (2.99e14)*gammar_r/((line))*(1e-23)* np.pi/2# W m-2 ''' snr = ols_result.params.x2 / ols_result.bse.x2 line_flux = (1/(1+z))* (ols_result.params.x2)* (2.99e14)*gammar_r/((line))*(1e-23)* np.pi/2 #W m-2 line_flux_err = (1/(1+z))* (ols_result.bse.x2)* (2.99e14)*gammar_r/((line))*(1e-23)* np.pi/2# W m-2 #line_lum = line_flux #line_lum_err = line_flux_err fig = plt.figure() #plt.ion() ax = fig.add_subplot(111) plt.title(" Aorkey: "+str(aorkey) + ' Line: 11.3') plt.suptitle("Detlvl: " + str(detlvl)) ax.plot(sub_wave,sub_flux,'.') ax.errorbar(sub_wave,sub_flux,yerr=sub_flux_err,fmt=None) ax.plot(temp_wave,temp_model) ax.plot(temp_wave,temp_cont) ax.annotate("Line Lum 10^42 ergs/s: " + str(round(line_lum/1e42,3))+ " err: "+ str(round(line_lum_err/1e42,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.8),xy=(.1,.8)) ax.annotate("Mean Squared Error: "+str(round(ols_result.mse_model,6)),xycoords='axes fraction',textcoords='axes fraction', xytext=(.1,.6),xy=(.1,.6)) ax.annotate("R^2: " + str(round(ols_result.rsquared,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.4),xy=(.1,.4)) fig.set_size_inches(18.5, 10.5) fig.savefig(filepath+'/'+aorkey+'_'+str(line)+'.png') plt.close() else: line_ew = np.nan line_ew_err = np.nan line_lum=np.nan line_lum_err = np.nan snr = np.nan return line_lum,line_lum_err,line_ew,line_ew_err
def sl_sys_analysis(): # Get command line arguments args = {} if comm_rank == 0: print(":Registered %d processes" % comm_size) args["infile"] = sys.argv[1] args["nimgs"] = sys.argv[2] args["los"] = sys.argv[3] args["version"] = sys.argv[4] args["dt_sigma"] = float(sys.argv[5]) args["image_amps_sigma"] = float(sys.argv[6]) args["flux_ratio_errors"] = float(sys.argv[7]) args["astrometry_sigma"] = float(sys.argv[8]) args = comm.bcast(args) # Organize devision of strong lensing systems with open(args["infile"], "r") as myfile: limg_data = myfile.read() systems = json.loads(limg_data) sys_nr_per_proc = int(len(systems) / comm_size) print("comm_rank", comm_rank) start_sys = sys_nr_per_proc * comm_rank end_sys = sys_nr_per_proc * (comm_rank + 1) print(start_sys, end_sys) with open("../lens_catalogs_sie_only.json", "r") as myfile: limg_data = myfile.read() systems_prior = json.loads(limg_data) if comm_rank == 0: print("Each process will have %d systems" % sys_nr_per_proc) print("That should take app. %f min." % (sys_nr_per_proc * 20)) source_size_pc = 10.0 window_size = 0.1 # units of arcseconds grid_number = 100 # supersampled window (per axis) z_source = 2.0 cosmo = FlatLambdaCDM(H0=71, Om0=0.3089, Ob0=0.0) results = {"gamma": [], "phi_ext": [], "gamma_ext": [], "theta_E": [], "D_dt": []} for ii in range(len(systems))[(start_sys + 2) : end_sys]: system = systems[ii] system_prior = systems_prior[ii] print("Analysing system ID: %d" % ii) # the data set is z_lens = system_prior["zl"] lensCosmo = LensCosmo(cosmo=cosmo, z_lens=z_lens, z_source=z_source) # convert units of pc into arcseconds D_s = lensCosmo.D_s source_size_arcsec = source_size_pc / 10 ** 6 / D_s / constants.arcsec print("The source size in arcsec init = %.4f" % source_size_arcsec) #0.0012 # multiple images properties ximg = np.zeros(system["nimgs"]) yimg = np.zeros(system["nimgs"]) t_days = np.zeros(system["nimgs"]) image_amps = np.zeros(system["nimgs"]) for jj in range(system["nimgs"]): ximg[jj] = system["ximg"][jj] # [arcsec] yimg[jj] = system["yimg"][jj] # [arcsec] t_days[jj] = system["delay"][jj] # [days] image_amps[jj] = system["mags"][jj] # [linear units or magnitudes] # sort by arrival time index_sort = np.argsort(t_days) ximg = ximg[index_sort] # relative RA (arc seconds) yimg = yimg[index_sort] # relative DEC (arc seconds) image_amps = np.abs(image_amps[index_sort]) t_days = t_days[index_sort] d_dt = t_days[1:] - t_days[0] # measurement uncertainties astrometry_sigma = args["astrometry_sigma"] ximg_measured = ximg + np.random.normal(0, astrometry_sigma, system["nimgs"]) yimg_measured = yimg + np.random.normal(0, astrometry_sigma, system["nimgs"]) image_amps_sigma = np.ones(system["nimgs"]) * args["image_amps_sigma"] flux_ratios = image_amps[1:] - image_amps[0] flux_ratio_errors = np.ones(system["nimgs"] - 1) * args["flux_ratio_errors"] flux_ratios_measured = flux_ratios + np.random.normal(0, flux_ratio_errors) d_dt_sigma = np.ones(system["nimgs"] - 1) * args["dt_sigma"] d_dt_measured = d_dt + np.random.normal(0, d_dt_sigma) kwargs_data_joint = { "time_delays_measured": d_dt_measured, "time_delays_uncertainties": d_dt_sigma, "flux_ratios": flux_ratios_measured, "flux_ratio_errors": flux_ratio_errors, "ra_image_list": [ximg_measured], "dec_image_list": [yimg_measured], } # lens model choices lens_model_list = ["SPEMD", "SHEAR_GAMMA_PSI"] # 1. layer: primary SPEP fixed_lens = [] kwargs_lens_init = [] kwargs_lens_sigma = [] kwargs_lower_lens = [] kwargs_upper_lens = [] fixed_lens.append({}) kwargs_lens_init.append( { "theta_E": 1.0, "gamma": 2, "center_x": 0, "center_y": 0, "e1": 0, "e2": 0.0, } ) # error kwargs_lens_sigma.append( { "theta_E": 0.2, "e1": 0.1, "e2": 0.1, "gamma": 0.1, "center_x": 0.1, "center_y": 0.1, } ) # lower limit kwargs_lower_lens.append( { "theta_E": 0.01, "e1": -0.5, "e2": -0.5, "gamma": 1.5, "center_x": -10, "center_y": -10, } ) # upper limit kwargs_upper_lens.append( { "theta_E": 10, "e1": 0.5, "e2": 0.5, "gamma": 2.5, "center_x": 10, "center_y": 10, } ) # 2nd layer: external SHEAR fixed_lens.append({"ra_0": 0, "dec_0": 0}) kwargs_lens_init.append({"gamma_ext": 0.05, "psi_ext": 0.0}) kwargs_lens_sigma.append({"gamma_ext": 0.05, "psi_ext": np.pi}) kwargs_lower_lens.append({"gamma_ext": 0, "psi_ext": -np.pi}) kwargs_upper_lens.append({"gamma_ext": 0.3, "psi_ext": np.pi}) # 3rd layer: external CONVERGENCE kwargs_lens_init.append({'kappa_ext': 0.12}) kwargs_lens_sigma.append({'kappa_ext': 0.06}) kwargs_lower_lens.append({'kappa_ext': 0.0}) kwargs_upper_lens.append({'kappa_ext': 0.3}) # combined lens model lens_params = [ kwargs_lens_init, kwargs_lens_sigma, fixed_lens, kwargs_lower_lens, kwargs_upper_lens, ] # image position parameters point_source_list = ["LENSED_POSITION"] # we fix the image position coordinates fixed_ps = [{}] # the initial guess for the appearing image positions is: # at the image position. kwargs_ps_init = [{"ra_image": ximg, "dec_image": yimg}] # let some freedome in how well the actual image positions are # matching those given by the data (indicated as 'ra_image', 'dec_image' # and held fixed while fitting) kwargs_ps_sigma = [ { "ra_image": 0.01 * np.ones(len(ximg)), "dec_image": 0.01 * np.ones(len(ximg)), } ] kwargs_lower_ps = [ { "ra_image": -10 * np.ones(len(ximg)), "dec_image": -10 * np.ones(len(ximg)), } ] kwargs_upper_ps = [ {"ra_image": 10 * np.ones(len(ximg)), "dec_image": 10 * np.ones(len(ximg))} ] ps_params = [ kwargs_ps_init, kwargs_ps_sigma, fixed_ps, kwargs_lower_ps, kwargs_upper_ps, ] # quasar source size fixed_special = {} kwargs_special_init = {} kwargs_special_sigma = {} kwargs_lower_special = {} kwargs_upper_special = {} fixed_special["source_size"] = source_size_arcsec kwargs_special_init["source_size"] = source_size_arcsec kwargs_special_sigma["source_size"] = source_size_arcsec kwargs_lower_special["source_size"] = 0.0001 kwargs_upper_special["source_size"] = 1 # Time-delay distance kwargs_special_init["D_dt"] = 4300 # corresponds to H0 ~ 70 kwargs_special_sigma["D_dt"] = 3000 kwargs_lower_special["D_dt"] = 2500 # corresponds to H0 ~ 120 kwargs_upper_special["D_dt"] = 14000 # corresponds to H0 ~ 20 special_params = [ kwargs_special_init, kwargs_special_sigma, fixed_special, kwargs_lower_special, kwargs_upper_special, ] # combined parameter settings kwargs_params = { "lens_model": lens_params, "point_source_model": ps_params, "special": special_params, } # our model choices kwargs_model = { "lens_model_list": lens_model_list, "point_source_model_list": point_source_list, } lensModel = LensModel(kwargs_model["lens_model_list"]) lensModelExtensions = LensModelExtensions(lensModel=lensModel) lensEquationSolver = LensEquationSolver(lensModel=lensModel) # setup options for likelihood and parameter sampling time_delay_likelihood = True flux_ratio_likelihood = True image_position_likelihood = True kwargs_flux_compute = { "source_type": "INF", "window_size": window_size, "grid_number": grid_number, } kwargs_constraints = { "num_point_source_list": [int(args["nimgs"])], # any proposed lens model must satisfy the image positions # appearing at the position of the point sources being sampeld # "solver_type": "PROFILE_SHEAR", "Ddt_sampling": time_delay_likelihood, # sampling of the time-delay distance # explicit modelling of the astrometric imperfection of # the point source positions "point_source_offset": True, } # explicit sampling of finite source size parameter # (only use when source_type='GAUSSIAN' or 'TORUS') if ( kwargs_flux_compute["source_type"] in ["GAUSSIAN", "TORUS"] and flux_ratio_likelihood is True ): kwargs_constraints["source_size"] = True # e.g. power-law mass slope of the main deflector # [[index_model, 'param_name', mean, 1-sigma error], [...], ...] prior_lens = [[0, "gamma", 2, 0.1]] prior_special = [] kwargs_likelihood = { "position_uncertainty": args["astrometry_sigma"], "source_position_likelihood": True, "image_position_likelihood": True, "time_delay_likelihood": True, "flux_ratio_likelihood": True, "kwargs_flux_compute": kwargs_flux_compute, "prior_lens": prior_lens, "prior_special": prior_special, "check_solver": True, "solver_tolerance": 0.001, "check_bounds": True, } fitting_seq = FittingSequence( kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, ) fitting_kwargs_list = [ ["PSO", {"sigma_scale": 1.0, "n_particles": 200, "n_iterations": 500}] ] chain_list_pso = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_seq.best_fit() kwargs_result = fitting_seq.best_fit(bijective=True) args_result = fitting_seq.param_class.kwargs2args(**kwargs_result) logL, _ = fitting_seq.likelihoodModule.logL(args_result, verbose=True) # and now we run the MCMC fitting_kwargs_list = [ [ "MCMC", {"n_burn": 400, "n_run": 600, "walkerRatio": 10, "sigma_scale": 0.1}, ] ] chain_list_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_seq.best_fit() # print("number of non-linear parameters in the MCMC process: ", len(param_mcmc)) # print("parameters in order: ", param_mcmc) print("number of evaluations in the MCMC process: ", np.shape(samples_mcmc)[0]) param = Param( kwargs_model, fixed_lens, kwargs_fixed_ps=fixed_ps, kwargs_fixed_special=fixed_special, kwargs_lens_init=kwargs_result["kwargs_lens"], **kwargs_constraints, ) # the number of non-linear parameters and their names # num_param, param_list = param.num_param() for i in range(len(samples_mcmc)): kwargs_out = param.args2kwargs(samples_mcmc[i]) kwargs_lens_out, kwargs_special_out, kwargs_ps_out = ( kwargs_out["kwargs_lens"], kwargs_out["kwargs_special"], kwargs_out["kwargs_ps"], ) # compute 'real' image position adding potential astrometric shifts x_pos = kwargs_ps_out[0]["ra_image"] y_pos = kwargs_ps_out[0]["dec_image"] # extract quantities of the main deflector theta_E = kwargs_lens_out[0]["theta_E"] gamma = kwargs_lens_out[0]["gamma"] e1, e2 = kwargs_lens_out[0]["e1"], kwargs_lens_out[0]["e2"] phi, q = param_util.ellipticity2phi_q(e1, e2) phi_ext, gamma_ext = ( kwargs_lens_out[1]["psi_ext"] % np.pi, kwargs_lens_out[1]["gamma_ext"], ) if flux_ratio_likelihood is True: mag = lensModel.magnification(x_pos, y_pos, kwargs_lens_out) flux_ratio_fit = mag[1:] / mag[0] if ( kwargs_constraints.get("source_size", False) is True and "source_size" not in fixed_special ): source_size = kwargs_special_out["source_size"] if time_delay_likelihood is True: D_dt = kwargs_special_out["D_dt"] # and here the predicted angular diameter distance from a # default cosmology (attention for experimenter bias!) gamma = np.median(gamma) phi_ext = np.median(phi_ext) gamma_ext = np.median(gamma_ext) theta_E = np.median(theta_E) D_dt = np.median(D_dt) results["gamma"].append(gamma) results["phi_ext"].append(phi_ext) results["gamma_ext"].append(gamma_ext) results["theta_E"].append(theta_E) results["H0"].append(c_light / D_dt)
def _subclass_init(self, filename, **kwargs): #pylint: disable=W0221 assert os.path.isfile(filename), 'Catalog file {} does not exist'.format(filename) self._file = filename if kwargs.get('md5'): assert md5(self._file) == kwargs['md5'], 'md5 sum does not match!' else: warnings.warn('No md5 sum specified in the config file') self.lightcone = kwargs.get('lightcone') with h5py.File(self._file, 'r') as fh: # pylint: disable=no-member # get version catalog_version = list() for version_label in ('Major', 'Minor', 'MinorMinor'): try: catalog_version.append(fh['/metaData/version' + version_label].value) except KeyError: break catalog_version = StrictVersion('.'.join(map(str, catalog_version or (2, 0)))) # get cosmology self.cosmology = FlatLambdaCDM( H0=fh['metaData/simulationParameters/H_0'].value, Om0=fh['metaData/simulationParameters/Omega_matter'].value, Ob0=fh['metaData/simulationParameters/Omega_b'].value, ) # get sky area if catalog_version >= StrictVersion("2.1.1"): self.sky_area = float(fh['metaData/skyArea'].value) else: self.sky_area = 25.0 #If the sky area isn't specified use the default value of the sky area. # get native quantities self._native_quantities = set() def _collect_native_quantities(name, obj): if isinstance(obj, h5py.Dataset): self._native_quantities.add(name) fh['galaxyProperties'].visititems(_collect_native_quantities) # check versions self.version = kwargs.get('version', '0.0.0') config_version = StrictVersion(self.version) if config_version != catalog_version: raise ValueError('Catalog file version {} does not match config version {}'.format(catalog_version, config_version)) if StrictVersion(__version__) < config_version: raise ValueError('Reader version {} is less than config version {}'.format(__version__, catalog_version)) # specify quantity modifiers self._quantity_modifiers = { 'galaxy_id' : (_gen_galaxy_id, 'galaxyID'), 'ra': 'ra', 'dec': 'dec', 'ra_true': 'ra_true', 'dec_true': 'dec_true', 'redshift': 'redshift', 'redshift_true': 'redshiftHubble', 'shear_1': 'shear1', 'shear_2': 'shear2', 'convergence': ( _calc_conv, 'magnification', 'shear1', 'shear2', ), 'magnification': (lambda mag: np.where(mag < 0.2, 1.0, mag), 'magnification'), 'halo_id': 'hostHaloTag', 'halo_mass': 'hostHaloMass', 'is_central': (lambda x: x.astype(np.bool), 'isCentral'), 'stellar_mass': 'totalMassStellar', 'stellar_mass_disk': 'diskMassStellar', 'stellar_mass_bulge': 'spheroidMassStellar', 'size_disk_true': 'morphology/diskMajorAxisArcsec', 'size_bulge_true': 'morphology/spheroidMajorAxisArcsec', 'size_minor_disk_true': 'morphology/diskMinorAxisArcsec', 'size_minor_bulge_true': 'morphology/spheroidMinorAxisArcsec', 'position_angle_true': (_gen_position_angle, 'morphology/positionAngle'), 'sersic_disk': 'morphology/diskSersicIndex', 'sersic_bulge': 'morphology/spheroidSersicIndex', 'ellipticity_true': 'morphology/totalEllipticity', 'ellipticity_1_true': (_calc_ellipticity_1, 'morphology/totalEllipticity'), 'ellipticity_2_true': (_calc_ellipticity_2, 'morphology/totalEllipticity'), 'ellipticity_disk_true': 'morphology/diskEllipticity', 'ellipticity_1_disk_true': (_calc_ellipticity_1, 'morphology/diskEllipticity'), 'ellipticity_2_disk_true': (_calc_ellipticity_2, 'morphology/diskEllipticity'), 'ellipticity_bulge_true': 'morphology/spheroidEllipticity', 'ellipticity_1_bulge_true': (_calc_ellipticity_1, 'morphology/spheroidEllipticity'), 'ellipticity_2_bulge_true': (_calc_ellipticity_2, 'morphology/spheroidEllipticity'), 'size_true': ( _calc_weighted_size, 'morphology/diskMajorAxisArcsec', 'morphology/spheroidMajorAxisArcsec', 'LSST_filters/diskLuminositiesStellar:LSST_r:rest', 'LSST_filters/spheroidLuminositiesStellar:LSST_r:rest', ), 'size_minor_true': ( _calc_weighted_size_minor, 'morphology/diskMajorAxisArcsec', 'morphology/spheroidMajorAxisArcsec', 'LSST_filters/diskLuminositiesStellar:LSST_r:rest', 'LSST_filters/spheroidLuminositiesStellar:LSST_r:rest', 'morphology/totalEllipticity', ), 'bulge_to_total_ratio_i': ( lambda x, y: x/(x+y), 'SDSS_filters/spheroidLuminositiesStellar:SDSS_i:observed', 'SDSS_filters/diskLuminositiesStellar:SDSS_i:observed', ), 'A_v': ( _calc_Av, 'otherLuminosities/totalLuminositiesStellar:V:rest', 'otherLuminosities/totalLuminositiesStellar:V:rest:dustAtlas', ), 'A_v_disk': ( _calc_Av, 'otherLuminosities/diskLuminositiesStellar:V:rest', 'otherLuminosities/diskLuminositiesStellar:V:rest:dustAtlas', ), 'A_v_bulge': ( _calc_Av, 'otherLuminosities/spheroidLuminositiesStellar:V:rest', 'otherLuminosities/spheroidLuminositiesStellar:V:rest:dustAtlas', ), 'R_v': ( _calc_Rv, 'otherLuminosities/totalLuminositiesStellar:V:rest', 'otherLuminosities/totalLuminositiesStellar:V:rest:dustAtlas', 'otherLuminosities/totalLuminositiesStellar:B:rest', 'otherLuminosities/totalLuminositiesStellar:B:rest:dustAtlas', ), 'R_v_disk': ( _calc_Rv, 'otherLuminosities/diskLuminositiesStellar:V:rest', 'otherLuminosities/diskLuminositiesStellar:V:rest:dustAtlas', 'otherLuminosities/diskLuminositiesStellar:B:rest', 'otherLuminosities/diskLuminositiesStellar:B:rest:dustAtlas', ), 'R_v_bulge': ( _calc_Rv, 'otherLuminosities/spheroidLuminositiesStellar:V:rest', 'otherLuminosities/spheroidLuminositiesStellar:V:rest:dustAtlas', 'otherLuminosities/spheroidLuminositiesStellar:B:rest', 'otherLuminosities/spheroidLuminositiesStellar:B:rest:dustAtlas', ), 'position_x': 'x', 'position_y': 'y', 'position_z': 'z', 'velocity_x': 'vx', 'velocity_y': 'vy', 'velocity_z': 'vz', } # add magnitudes for band in 'ugrizY': if band != 'Y': self._quantity_modifiers['mag_true_{}_sdss'.format(band)] = 'SDSS_filters/magnitude:SDSS_{}:observed:dustAtlas'.format(band) self._quantity_modifiers['Mag_true_{}_sdss_z0'.format(band)] = 'SDSS_filters/magnitude:SDSS_{}:rest:dustAtlas'.format(band) self._quantity_modifiers['mag_true_{}_lsst'.format(band)] = 'LSST_filters/magnitude:LSST_{}:observed:dustAtlas'.format(band.lower()) self._quantity_modifiers['Mag_true_{}_lsst_z0'.format(band)] = 'LSST_filters/magnitude:LSST_{}:rest:dustAtlas'.format(band.lower()) # add SEDs translate_component_name = {'total': '', 'disk': '_disk', 'spheroid': '_bulge'} sed_re = re.compile(r'^SEDs/([a-z]+)LuminositiesStellar:SED_(\d+)_(\d+):rest((?::dustAtlas)?)$') for quantity in self._native_quantities: m = sed_re.match(quantity) if m is None: continue component, start, width, dust = m.groups() key = 'sed_{}_{}{}{}'.format(start, width, translate_component_name[component], '' if dust else '_no_host_extinction') self._quantity_modifiers[key] = quantity # make quantity modifiers work in older versions if catalog_version < StrictVersion('3.0'): self._quantity_modifiers.update({ 'galaxy_id' : 'galaxyID', 'host_id': 'hostIndex', 'position_angle_true': 'morphology/positionAngle', 'ellipticity_1_true': 'morphology/totalEllipticity1', 'ellipticity_2_true': 'morphology/totalEllipticity2', 'ellipticity_1_disk_true': 'morphology/diskEllipticity1', 'ellipticity_2_disk_true': 'morphology/diskEllipticity2', 'ellipticity_1_bulge_true': 'morphology/spheroidEllipticity1', 'ellipticity_2_bulge_true': 'morphology/spheroidEllipticity2', }) if catalog_version < StrictVersion('2.1.2'): self._quantity_modifiers.update({ 'position_angle_true': (lambda pos_angle: np.rad2deg(np.rad2deg(pos_angle)), 'morphology/positionAngle'), #I converted the units the wrong way, so a double conversion is required. }) if catalog_version < StrictVersion('2.1.1'): self._quantity_modifiers.update({ 'disk_sersic_index': 'diskSersicIndex', 'bulge_sersic_index': 'spheroidSersicIndex', }) if catalog_version == StrictVersion('2.0'): # to be backward compatible self._quantity_modifiers.update({ 'ra': (lambda x: x/3600, 'ra'), 'ra_true': (lambda x: x/3600, 'ra_true'), 'dec': (lambda x: x/3600, 'dec'), 'dec_true': (lambda x: x/3600, 'dec_true'), })
def limit_mu(self, cosmology=FlatLambdaCDM(H0=70, Om0=0.3)): app_mag = self.app_mag_pre_mu(cosmology) app_mag_lim = 27 * u.mag # General "average" limit for HFF images and is OK with the filters used here. mu_lim = 10**((app_mag - app_mag_lim).value / 2.5) return mu_lim
def app_mag_post_mu(self, mu=10**4, cosmology=FlatLambdaCDM(H0=70, Om0=0.3)): app_mag_post_mu = self.app_mag_pre_mu( cosmology) - 2.5 * np.log10(mu) * u.mag return app_mag_post_mu
# imager.py # ALS 2017/10/05 import os import astropy.units as u from astropy.io import fits import numpy as np from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=70, Om0=0.3) from .operator import Operator from .. import standards from ..filters import surveysetup from ..external_links import file_humvi_compose from .. import visualtools class Imager(Operator): def __init__(self, **kwargs): """ Imager, parent class for all obj operator for images Params ------ Operator params: /either obj (object of class obsobj): with attributes ra, dec, dir_obj /or ra (float) dec (float)
def setup(self): self.z_L = 0.8 self.z_S = 3.0 from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self.bkg = Background(cosmo=cosmo)
for k, v in hdu[ext].header.items(): if k[0] == 'C': try: i = int(k[1:]) - 1 except ValueError: continue channel_dict[v] = i return channel_dict # minimum and maximum emission-line fluxes for plot ranges fmin = 1e-19 fmax = 1e-16 # define a standard cosmology cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) mpl7_dir = os.environ[ 'MANGADIR_MPL7'] # Be aware that this directory syntax might need revision drp = fits.open(mpl7_dir + 'drpall-v2_4_3.fits') drpdata = drp[1].data ba = drpdata.field('nsa_sersic_ba') filename = 'good_galaxies.pdf' plateifu = drpdata.field('plateifu') # read in information from NSA catalog nsa = fits.open(mpl7_dir + '1-nsa_v1_0_1.fits') nsa_data = nsa[1].data # check on a galaxy of interest
omega_l = 0.6911 omega_m = 0.3089 def get_lumdist(z): return cosmo.luminosity_distance(z) # in Mpc def get_age(z): return cosmo.age(z).value # in Gyr # get cosmology of TNG # TODO: is this what Sandro uses? cosmo = FlatLambdaCDM(H0=h * 100. * u.km / u.s / u.Mpc, Tcmb0=2.7255 * u.K, Om0=omega_m) # parameter choices tng = 'tng300' z_choice = 1.41131 #0.81947#1.41131#0.81947#1.41131#1.11358#0.#0.81947#1. dust_reddening = '' #'_red'#'_red'#'' skip_lowmass = 0 low_mass = 10. want_random = '' #''#'_scatter' sfh_ap = '_30kpc' #'_30kpc'#'_3rad'#'_30kpc'#'' cam_filt = 'sdss_desi' # should be desi; use des with fsps filters # No implementation error if want_random == '_scatter': print("For now we are not incorporating redshift uncertainties")
import pymangle import time t0 = time.time() from astropy.coordinates import SkyCoord from astropy_healpix import healpy import healpy as hp import astropy.units as u import astropy.cosmology as cc from astropy.cosmology import FlatLambdaCDM #cosmo = cc.Planck13 cosmoMD = FlatLambdaCDM(H0=67.77 * u.km / u.s / u.Mpc, Om0=0.307115) # , Ob0=0.048206) h = 0.6777 L_box = 1000.0 / h cosmo = cosmoMD print("interpolates z d_comoving and D_L") z_array = np.arange(0, 7.5, 0.001) dc_to_z = interp1d(cosmo.comoving_distance(z_array), z_array) d_L = cosmo.luminosity_distance(z_array) dl_cm = (d_L.to(u.cm)).value dL_interpolation = interp1d(z_array, dl_cm) #from sklearnp.neighbors import BallTree, DistanceMetric from astropy.table import Table, unique, Column from math import radians, cos, sin, asin, sqrt, pi
### Import required libraries ### import matplotlib.pyplot as plt #for plotting from astropy.io import fits #for handling fits from astropy.table import Table #for handling tables import numpy as np #for handling arrays #import math #from astropy.stats import median_absolute_deviation import vari_funcs #my module to help run code neatly from astropy.cosmology import FlatLambdaCDM from astropy import units as u plt.close('all') #close any open plots #from numpy.lib.recfunctions import append_fields ### Define cosmology ### cosmo = FlatLambdaCDM(H0=70, Om0=0.3) #%% Open the fits files and get data ### chandata = fits.open( 'variable_tables/no06_variables_chi30_chandata_DR11data_restframe.fits' )[1].data tbdata = fits.open( 'variable_tables/no06_variables_chi30_DR11data_restframe.fits')[1].data fullxray = fits.open( 'mag_flux_tables/chanDR11data_restframe_mag_flux_table_best_extra_clean_no06.fits' )[1].data sigtb = Table.read('quad_epoch_sigma_table_extra_clean_no06_067arcsec.fits') ### Limit to Chandra region for simplicity ### tbdata = vari_funcs.chandra_only(tbdata) fullxray = vari_funcs.chandra_only(fullxray)
def basic_stats(self, source=Source(), simul=Simulation(), cosmology=FlatLambdaCDM(H0=70, Om0=0.3)): print( "Basic statistics for a source of mass {} and redshift {}.".format( source.get_mass(), source.get_z())) ratio = distances_ratio(self._z, source.get_z(), cosmology) # Need to multiply kappa and gamma by this ratio to get the values for a specific redshift of the source # (and not z_s=inf as we defined for the Cluster instance variables) kappa = self._kappa * ratio gamma = self._gamma * ratio kappa_st = self.compute_kappa_star(source, cosmology) one = np.ones(kappa.shape) mu = np.absolute(1 / ((one - kappa)**2 - gamma**2)) mu_thr = 10**(int(np.log10(source.limit_mu(cosmology))) - 1 ) # exponent is (order_of_magnitude-1) n_above_thr = 0 n_above_lim = 0 nx, ny = kappa.shape pix_for_adv_stats = [] pix_above_lim = np.zeros((nx, ny)) for i in range(nx): for j in range(ny): if mu[i, j] > mu_thr: n_above_thr = n_above_thr + 1 path = "./Maps/k" + str(kappa[i, j]) + "_g" + str( gamma[i, j]) + "_kst" + str(kappa_st[i, j]) + "/" create_maps(kappa[i, j], gamma[i, j], kappa_st[i, j], simul, path) if mu_above_limit(path + "0/map.fits", source, cosmology): n_above_lim = n_above_lim + 1 pix_above_lim[i, j] = 1 pix_for_adv_stats.append((i, j)) percent_above_thr = 100 * float(n_above_thr) / (nx * ny) percent_above_lim = 100 * float(n_above_lim) / (nx * ny) print('Number of pixels above \mu_thr=' + str(mu_thr) + ' (for z_s=' + str(source.get_z()) + '): ' + str(n_above_thr)) print('Percentage of pixels above \mu_thr=' + str(mu_thr) + ' (for z_s=' + str(source.get_z()) + '): ' + str(percent_above_thr) + '%') print('Number of pixels above \mu_lim=' + str(source.limit_mu(cosmology)) + ' (for z_s=' + str(source.get_z()) + '): ' + str(n_above_lim)) print('Percentage of pixels above \mu_lim=' + str(source.limit_mu(cosmology)) + ' (for z_s=' + str(source.get_z()) + '): ' + str(percent_above_lim) + '%') d = cosmology.angular_diameter_distance(self._z) # Trigonometry using small angle approximation dx = np.absolute(self._theta_x) * d dy = np.absolute(self._theta_y) * d x = np.linspace(0, dx * nx, num=nx, endpoint=False) y = np.linspace(0, dy * ny, num=ny, endpoint=False) x_mesh, y_mesh = np.meshgrid(x, y) plt.pcolormesh(x_mesh.value, y_mesh.value, pix_above_lim, cmap=plt.cm.get_cmap('Blues', 2)) plt.colorbar(ticks=[0, 1]) plt.clim(0, 1) plt.title('Pixels above $\mu$ limit') plt.xlabel('x [{}]'.format(x_mesh.unit)) plt.ylabel('y [{}]'.format(y_mesh.unit)) plt.savefig("./Plots/M" + str(source.get_mass().value) + "_z" + str(source.get_z()) + "/above_mu_map.png") plt.clf() return pix_for_adv_stats
def fetch_great_wall(data_home=None, download_if_missing=True, xlim=(-375, -175), ylim=(-300, 200), cosmo=None): """Get the 2D SDSS "Great Wall" distribution, following Cowan et al 2008 Parameters ---------- data_home : optional, default=None Specify another download and cache folder for the datasets. By default all astroML data is stored in '~/astroML_data'. download_if_missing : optional, default=True If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. xlim, ylim : tuples or None the limits in Mpc of the data: default values are the same as that used for the plots in Cowan 2008. If set to None, no cuts will be performed. cosmo : `astropy.cosmology` instance specifying cosmology to use when generating the sample. If not provided, a Flat Lambda CDM model with H0=73.2, Om0=0.27, Tcmb0=0 is used. Returns ------- data : ndarray, shape = (Ngals, 2) grid of projected (x, y) locations of galaxies in Mpc """ # local imports so we don't need dependencies for loading module from scipy.interpolate import interp1d # We need some cosmological information to compute the r-band # absolute magnitudes. if cosmo is None: cosmo = FlatLambdaCDM(H0=73.2, Om0=0.27, Tcmb0=0) data = fetch_sdss_specgals(data_home, download_if_missing) # cut to the part of the sky with the "great wall" data = data[(data['dec'] > -7) & (data['dec'] < 7)] data = data[(data['ra'] > 80) & (data['ra'] < 280)] # do a redshift cut, following Cowan et al 2008 z = data['z'] data = data[(z > 0.01) & (z < 0.12)] # first sample the distance modulus on a grid zgrid = np.linspace(min(data['z']), max(data['z']), 100) mugrid = cosmo.distmod(zgrid).value f = interp1d(zgrid, mugrid) mu = f(data['z']) # do an absolute magnitude cut at -20 Mr = data['petroMag_r'] + data['extinction_r'] - mu data = data[Mr < -21] # compute distances in the equatorial plane # first sample comoving distance Dcgrid = cosmo.comoving_distance(zgrid).value f = interp1d(zgrid, Dcgrid) dist = f(data['z']) locs = np.vstack([ dist * np.cos(data['ra'] * np.pi / 180.), dist * np.sin(data['ra'] * np.pi / 180.) ]).T # cut on x and y limits if specified if xlim is not None: locs = locs[(locs[:, 0] > xlim[0]) & (locs[:, 0] < xlim[1])] if ylim is not None: locs = locs[(locs[:, 1] > ylim[0]) & (locs[:, 1] < ylim[1])] return locs
def adv_stats(self, pix_for_adv_stats, n_pts=15, source=Source(), simul=Simulation(), cosmology=FlatLambdaCDM(H0=70, Om0=0.3)): print("Advanced statistics.") ratio = distances_ratio(self._z, source.get_z()) kappa = self._kappa * ratio gamma = self._gamma * ratio kappa_st = self.compute_kappa_star(source, cosmology) good_light_curves = np.zeros(self._kappa.shape) light_curve_example = False for n in range(len(pix_for_adv_stats)): i, j = pix_for_adv_stats[n] dir_map = "./Maps/k" + str(kappa[i, j]) + "_g" + str( gamma[i, j]) + "_kst" + str(kappa_st[i, j]) + "/0/map.fits" hdul_mu = fits.open(dir_map) mu_data = np.absolute(hdul_mu[0].data) for m in range(100): xs, ys = source.trajectory(simul, n_pts, lenses_moving=False) mu = np.ones(len(xs)) mag = np.ones(len(xs)) for i in range(n_pts): mu[i] = mu_data[xs[i], ys[i]] mag[i] = source.app_mag_post_mu(mu[i], cosmology).value # TO CHECK: what are best conditions for good light curve statistics ? mu_3rd_percentile = np.percentile(mu, 75) mag_diff = np.amax(mag) - np.amin(mag) if mu_3rd_percentile > source.limit_mu( cosmology) and mag_diff > 1: good_light_curves[i, j] = good_light_curves[i, j] + 1 if light_curve_example is False: light_curve_example = True angle = np.sqrt( np.power(xs - xs[0] * np.ones(len(xs)), 2) + np.power(ys - ys[0] * np.ones(len(ys)), 2)) ds = cosmology.angular_diameter_distance( source.get_z()) # TO CHANGE !!! Once know about Gerlumph distances gerlumph_coeff = 1 # Trigonometry using small angle approximation distance = ds * angle * gerlumph_coeff plt.plot(distance, mag) plt.title('Good light curve example') # TO CHANGE !!! Once know about Gerlumph distances # plt.xlabel('Distance [{}]'.format(distance.unit)) plt.xlabel('Distance [?]') plt.ylabel('Magnitude [mag]') plt.savefig("./Plots/M" + str(source.get_mass().value) + "_z" + str(source.get_z()) + "/light_curve_example.png") plt.clf() nx, ny = kappa.shape d = cosmology.angular_diameter_distance(self._z) # Trigonometry using small angle approximation dx = np.absolute(self._theta_x) * d dy = np.absolute(self._theta_y) * d x = np.linspace(0, dx * nx, num=nx, endpoint=False) y = np.linspace(0, dy * ny, num=ny, endpoint=False) x_mesh, y_mesh = np.meshgrid(x, y) plt.pcolormesh(x_mesh.value, y_mesh.value, good_light_curves, cmap=plt.cm.get_cmap('Blues', 5)) plt.colorbar(ticks=[0, 20, 40, 60, 80, 100]) plt.clim(0, 100) plt.title('Good light curves percentage') plt.xlabel('x [{}]'.format(x_mesh.unit)) plt.ylabel('y [{}]'.format(y_mesh.unit)) plt.savefig("./Plots/M" + str(source.get_mass().value) + "_z" + str(source.get_z()) + "/good_light_curves_percentage.png") plt.clf() return
import warnings from sklearn.cluster import KMeans from astropy.cosmology import FlatLambdaCDM from astropy.coordinates import SkyCoord from astropy import units import time warnings.filterwarnings('error') # parameters # cosmological parameters omega_m0 = 0.31 H0 = 67.5 cosmos = FlatLambdaCDM(H0, omega_m0) # separation bin, comoving or angular diameter distance in unit of Mpc/h sep_bin_num = 13 bin_st, bin_ed = 0.05, 20 separation_bin = hk_tool_box.set_bin_log(bin_st, bin_ed, sep_bin_num+1).astype(numpy.float32) # bin number for ra & dec of each exposure deg2arcmin = 60 deg2rad = numpy.pi/180 # chi guess bin for PDF_SYM delta_sigma_guess_num = 100 num_m = 50 num_p = delta_sigma_guess_num - num_m
def distances_ratio(z_l, z_s, cosmology=FlatLambdaCDM(H0=70, Om0=0.3)): d_ls = cosmology.angular_diameter_distance_z1z2(z_l, z_s) d_s = cosmology.angular_diameter_distance(z_s) ratio = d_ls / d_s return ratio.value
data_red = data[flag_red] data_blue = data[flag_blue] # truncate sample (optional: speeds up computation) #data_red = data_red[::10] #data_blue = data_blue[::10] print(data_red.size, "red galaxies") print(data_blue.size, "blue galaxies") #------------------------------------------------------------ # Distance Modulus calculation: # We need functions approximating mu(z) and z(mu) # where z is redshift and mu is distance modulus. # We'll accomplish this using the cosmology class and # scipy's cubic spline interpolation. cosmo = FlatLambdaCDM(H0=71, Om0=0.27, Tcmb0=0) z_sample = np.linspace(0.01, 1.5, 100) mu_sample = cosmo.distmod(z_sample).value mu_z = interpolate.interp1d(z_sample, mu_sample) z_mu = interpolate.interp1d(mu_sample, z_sample) data = [data_red, data_blue] titles = ['$u-r > 2.22$', '$u-r < 2.22$'] markers = ['o', '^'] archive_files = ['lumfunc_red.npz', 'lumfunc_blue.npz'] def compute_luminosity_function(z, m, M, m_max, archive_file): """Compute the luminosity function and archive in the given file. If the file exists, then the saved results are returned.
import astropy.io.fits as pf import numpy as np from astropy import units as u from astropy import cosmology from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=70, Om0=0.3) import sys import treecorr import healpy as hp # setting up config file and correlator for treecorr config_file = 'default.params' config = treecorr.config.read_config(config_file) zmin = float(sys.argv[1]) zmax = float(sys.argv[2]) Nz = int(sys.argv[3]) Maglim1 = float(sys.argv[4]) Maglim2 = float(sys.argv[5]) lambmin = float(sys.argv[6]) lambmax = float(sys.argv[7]) clusters = pf.open(sys.argv[8])[1].data randoms = pf.open(sys.argv[9])[1].data galaxies = pf.open(sys.argv[10])[1].data galaxy_randoms = pf.open(sys.argv[11])[1].data jkid = int(sys.argv[12]) tot_area = float(sys.argv[13]) outfile = sys.argv[14]
def pah_7_7(objid,aorkey,detlvl,spectrum,scale,z,filepath): #rest frame wavelenth spectrum = spectrum.dropna() wave = spectrum.wavelength * (1/(z+1.)) shifted_1 = spectrum.flux_jy[spectrum.wavelength >= 14.06935] shifted_2 = spectrum.flux_jy[spectrum.wavelength < 14.06935] * scale flux = shifted_2.append(shifted_1) * (1/(z+1.)) flux_err = spectrum.flux_jy_err * (1/(z+1.)) wave_dup = wave[wave.duplicated(keep=False)] flux_dup = flux[wave.duplicated(keep=False)] flux_dup_err = flux_err[wave.duplicated(keep=False)] if len(wave[wave.duplicated(keep=False)]) > 0.: #cut the first bad ones #wave_cut[wave_cut.dupyerlicated(keep='first')] i=0 while len(wave[wave.duplicated(keep=False)]) > 0.: print i new_flux = ((flux_dup.iloc[i]/flux_dup_err.iloc[i]**2) + (flux_dup.iloc[i+1]/flux_dup_err.iloc[i+1]**2))/((1/flux_dup_err.iloc[i]**2) + (1/flux_dup_err.iloc[i+1]**2)) #error of weighted mean new_flux_err = np.sqrt(1/ ((1/flux_dup_err.iloc[i]**2) + (1/flux_dup_err.iloc[i+1]**2))) wave_loc_keep = wave_dup.index[i] wave_loc_drop = wave_dup.index[i+1] #drop first wave = wave.drop(wave_loc_drop) flux = flux.drop(wave_loc_drop) flux_err = flux_err.drop(wave_loc_drop) # now keep weighted mean flux flux[wave_loc_keep] = new_flux flux_err[wave_loc_keep] = new_flux_err i=i+2 cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) dl_mpc = cosmo.luminosity_distance(z) #dl in Mpc gammar_1 = 0.126 gammar_2 = 0.044 gammar_3 = 0.053 fratio1 = 2.50 line_1 = 7.417 line_2 = 7.598 line_3 = 7.850 fratio2 = 2.36 temp_wave = np.arange(6.5,10,0.01) sub_wave = wave[(wave >= 7.0) & (wave <= 8.2)] sub_flux = flux[(wave >= 7.0) & (wave <= 8.2)] sub_flux_err = flux_err[(wave >= 7.0) & (wave <= 8.2)] # determine a fitting window on either side of function #cutout a 3sigma region on either side of feature sub_wave2 = (sub_wave - line_1)**2 sub_wave3 = (sub_wave - line_1)**3 if len(sub_wave) > 3: #fine scale x #fit with underlying continuum one = sub_wave*0 + 1.0 #some centroid noise delta=pd.Series(data=np.arange(-.03,.03,.005)) rms = pd.Series(data=np.zeros(len(delta))) for k in range(len(delta)): center_1 = line_1 + delta[k] center_2 = line_2 + delta[k] center_3 = line_3 + delta[k] drude = (gammar_1**2 /((((sub_wave/center_1) - (center_1/sub_wave))**2) + gammar_1**2)) drude = drude + (gammar_2**2 /((((sub_wave/center_2) - (center_2/sub_wave))**2) + gammar_2**2)) drude = drude + (gammar_3**2 /((((sub_wave/center_3) - (center_3/sub_wave))**2) + gammar_3**2)) A = [(a,b,c) for a,b,c in zip(one,sub_wave,drude)] ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave+ ols_result.params.x2*drude rms[k] = np.sqrt(np.sum(((sub_flux-model)**2)/sub_flux_err**2)) min_delta = delta[rms == rms.min()].values[0] center_1 = line_1 + min_delta center_2 = line_2 + min_delta center_3 = line_3 + min_delta drude = (gammar_1**2 /((((sub_wave/center_1) - (center_1/sub_wave))**2) + gammar_1**2)) drude = drude + (gammar_2**2 /((((sub_wave/center_2) - (center_2/sub_wave))**2) + gammar_2**2)) drude = drude + (gammar_3**2 /((((sub_wave/center_3) - (center_3/sub_wave))**2) + gammar_3**2)) A = [(a,b,c) for a,b,c in zip(one,sub_wave,drude)] ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*sub_wave+ ols_result.params.x2*drude # temp_model = ols_result.params.const +ols_result.params.x1*temp_wave + ols_result.params.x2 * ((gammar_1**2 /((((temp_wave/center_1) - (center_1/temp_wave))**2) + gammar_1**2)) + ((gammar_2**2)/((((temp_wave/center_2) - (center_2/temp_wave))**2) + gammar_2**2)) + ((gammar_3**2)/((((temp_wave/center_3) - (center_3/temp_wave))**2) + gammar_3**2))) # temp_cont = ols_result.params.const + temp_wave*ols_result.params.x1 #line luminosity in 1e42 ergs/sec dl = dl_mpc.value * 3.086e24 #Mpc to cm # line_lum = dl**2*(1/(1+z))* 4*np.pi* (ols_result.params.x2)* (2.99e14)*(fratio2*gammar_2+fratio1*gammar_1+ gammar_r)/((line_1))* 1e-23* np.pi/2 # line_lum_err = dl**2*(1/(1+z))* 4*np.pi* (ols_result.bse.x2)* (2.99e14)*(fratio2*gammar_2+fratio1*gammar_1+ gammar_r)/((line_1))* 1e-23* np.pi/2 if z == 0: dl = 1. line_lum = dl**2*(1/(1+z))* 4*np.pi* (ols_result.params.x2)* (2.99e14)*(gammar_1)/((line_1))* 1e-23* np.pi/2 line_lum_err = dl**2*(1/(1+z))* 4*np.pi* (ols_result.bse.x2)* (2.99e14)*(gammar_1)/((line_1))* 1e-23* np.pi/2 #use_c_no_ice =temp_cont/wave**2 EW= (np.pi/2.) * ((ols_result.params.x2)*(gammar_1*line_1))/(ols_result.params.const + line_1*ols_result.params.x1) ''' a = ols_result.params.x2 b = ols_result.params.const c = ols_result.params.x1 a_err_sq = ols_result.bse.x2**2 b_err_sq = ols_result.bse.const**2 c_err_sq = ols_result.bse.x1**2 c1 = (np.pi/2.) *(fratio2*gammar_3*line_3+fratio1*gammar_2*line_2+ gammar_1*line_1) c2 = line_1 partial_a_sq = (c1 * (b+(c2*c))**-1)**2 partial_b_sq = (c1 * a * (b+(c2*c))**-2)**2 partial_c_sq = (c1 * c2 *a * (b+(c2*c))**-2)**2 ew_err = np.sqrt((a_err_sq*partial_a_sq) + (b_err_sq*partial_b_sq) + (c_err_sq*partial_c_sq)) ''' ew_err = ((np.pi/2.)/(ols_result.params.const + line_1*ols_result.params.x1)**2)*((ols_result.bse.x2**2) + (ols_result.params.x2**2)*((ols_result.params.const)**2 + (line_1*ols_result.params.x1)**2)) fig = plt.figure() #plt.ion() ax = fig.add_subplot(111) plt.title(" Aorkey: "+str(aorkey) + ' PAH 7_7' ) plt.suptitle("Detlvl: " + str(detlvl)) ax.errorbar(sub_wave,sub_flux,yerr=sub_flux_err,fmt='.') ax.errorbar(wave[(wave >= 6.5) & (wave <= 9.0)],flux[(wave >= 6.5) & (wave <= 9.0)],yerr=flux_err[(wave >= 6.5) & (wave <= 9.0)],fmt='.') ax.plot(sub_wave,model) #ax.plot(temp_wave,temp_cont) ax.annotate("Line Lum 10^42 ergs/s: " + str(round(line_lum/1e42,3))+ " err: "+ str(round(line_lum_err/1e42,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.8),xy=(.1,.8)) ax.annotate("Mean Squared Error: "+str(round(ols_result.mse_model,6)),xycoords='axes fraction',textcoords='axes fraction', xytext=(.1,.6),xy=(.1,.6)) ax.annotate("R^2: " + str(round(ols_result.rsquared,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.4),xy=(.1,.4)) plt.savefig(filepath+'/'+str(aorkey)+'_7_7'+'.png') plt.close() else: line_lum=np.nan line_lum_err = np.nan EW = np.nan ew_err = np.nan return line_lum,line_lum_err,EW,ew_err
def Flux2L(flux, z): """Transfer flux to luminoity assuming a flat Universe""" cosmo = FlatLambdaCDM(H0=70, Om0=0.3) DL = cosmo.luminosity_distance(z).value * 10**6 * 3.08 * 10**18 # unit cm L = flux * 1.e-17 * 4. * np.pi * DL**2 # erg/s/A return L
def linfit(objid,aorkey,detlvl,spectrum,scale,line,z,filepath): cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) dl_mpc = cosmo.luminosity_distance(z) #dl in Mpc spectrum = spectrum.dropna() wave = spectrum.wavelength * (1+z)**-1 flux = spectrum.flux_jy * (1+z)**-1 flux_err = spectrum.flux_jy_err * (1+z)**-1 line_lums = [] line_lums_err = [] for i in range(len(line)): #account for different line resolution at sl vs ll if line[i] * (1+z) < 14.2: sig = .1/2.35/(1+z) elif (line[i] * (1+z) >= 14.2) and (line[i] * (1+z) <= 20.6) : sig = .14/2.35/(1+z) elif (line[i] * (1+z) > 20.6): sig = .34/2.35/(1+z) # determine a fitting window on either side of function #cutout a 3sigma region on either side of feature sub_wave = wave[(wave >= line[i]-3.5*sig) & (wave <= line[i]+3.5*sig)] sub_flux = flux[(wave >= line[i]-3.5*sig) & (wave <= line[i]+3.5*sig)] sub_flux_err = flux_err[(wave >= line[i]-3.5*sig) & (wave <= line[i]+3.5*sig)] if (len(sub_wave) > 3) and (len(sub_flux[np.isfinite(sub_flux)]) == len(sub_flux)): #fine scale x x= pd.Series(data=np.arange(line[i] - 3*sig,line[i] + 3*sig,.01)) #fit with underlying continuum one = sub_wave*0 + 1.0 two = sub_wave - line[i] #assume some centroid noise - find the actual line center delta=pd.Series(data=np.arange(-.03,.03,.005)) rms = pd.Series(data=np.zeros(len(delta))) for k in range(len(delta)): j = delta[k] line_center = line[i] + j gauss = np.exp(-((sub_wave-line_center)**2) /(2*sig**2)) A = [(a,b,c) for a,b,c in zip(one,two,gauss)] ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*two + ols_result.params.x2 * gauss rms[k] = np.sqrt(np.sum(((sub_flux-model)**2)/sub_flux_err**2)) min_delta = delta[rms == rms.min()].values[0] line_center = line[i] + min_delta gauss = np.exp(-((sub_wave-line_center)**2) /(2*sig**2)) A = [(a,b,c) for a,b,c in zip(one,two,gauss)] ols = sm.OLS(sub_flux, A) ols_result = ols.fit() model = ols_result.params.const + ols_result.params.x1*(x-line[i]) + ols_result.params.x2 * np.exp(-((x-line_center)**2) /(2*sig**2)) model_cont = ols_result.params.const + ols_result.params.x1*(x - line[i]) #conver Fnu (Jy) to Flamb(ergs/s/cm**2/Hz) #Fnu * c/lambda**2 * 1e-23 #line luminosity in 1e42 ergs/sec dl = dl_mpc.value * 3.086e24 #Mpc to cm if z == 0: dl = 1. line_flux_nu = (ols_result.params.x2)*np.sqrt(2*np.pi)*sig line_flux_nu_cgs = 1e-23 * line_flux_nu line_flux_lam = line_flux_nu_cgs *2.99e14/(line_center)**2 line_lum = dl**2* 4*np.pi* line_flux_lam line_lum_err = dl**2* 4*np.pi* ols_result.bse.x2* 2.99e14*sig/((line_center**2))* 1e-23 *np.sqrt(2*np.pi) fig = plt.figure() ax = fig.add_subplot(111) plt.title(" Aorkey: "+str(aorkey[:5]) + ' Line ' + str(line[i])) plt.suptitle("Detlvl: " + str(detlvl)) ax.plot(sub_wave,sub_flux,'.') ax.fill_between(sub_wave,sub_flux+sub_flux_err,sub_flux-sub_flux_err,alpha=0.4) ax.plot(x,model) ax.plot(x,model_cont) ax.annotate("Line Lum 10^42 ergs/s: " + str(round(line_lum/1e42,3))+ " err: "+ str(round(line_lum_err/1e42,3)),xycoords='axes fraction',textcoords='axes fraction',xytext=(.1,.8),xy=(.1,.8)) ax.annotate("Mean Squared Error: "+str(round(ols_result.mse_model,6)), xytext=(.1,.6),xy=(.1,.6)) ax.annotate("R^2: " + str(round(ols_result.rsquared,3)),xytext=(.1,.4),xy=(.1,.4)) plt.savefig(filepath+'/'+aorkey[:5]+'_'+str(line[i])+'.png') plt.close() line_lums.append(line_lum) line_lums_err.append(line_lum_err) else: print "Not enough points in region" line_lums.append(np.nan) line_lums_err.append(np.nan) return line_lums,line_lums_err
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jul 12 08:41:10 2019 @author: yuhanyao """ #import sys #import os from helper import phys import numpy as np import extinction from astropy.time import Time from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=70., Om0=0.275) def deredden_df(tb, ebv): """ perform extinction correction """ if 'mag' in tb.columns: tb['mag0'] = tb['mag'] - extinction.ccm89( tb['wave'].values, 3.1 * ebv, 3.1) # extinction in magnitude if "limmag" in tb.columns: tb['limmag0'] = tb["limmag"] - extinction.ccm89( tb['wave'].values, 3.1 * ebv, 3.1) # extinction in magnitude return tb
from __future__ import print_function from builtins import range from cosmosis.datablock import names, option_section import sys import numpy as np import scipy.interpolate as spi import scipy.integrate as sint import mcfit from mcfit import P2xi from scipy.interpolate import interp1d from scipy.interpolate import interp2d from scipy.special import eval_legendre as legendre from scipy import integrate from astropy.cosmology import FlatLambdaCDM y3fid_cosmology = FlatLambdaCDM(H0=69., Om0=0.30, Ob0=0.048) def setup(options): sample_a = options.get_string(option_section, "sample_a", default="lens lens").split() sample_b = options.get_string(option_section, "sample_b", default="lens source").split() rmin = options.get_double(option_section, "rpmin", default=0.01) rmax = options.get_double(option_section, "rpmax", default=500.) nr = options.get_int(option_section, "nr", default=1024) nk = options.get_int(option_section, "nk", default=200) rp = np.logspace(np.log10(rmin), np.log10(rmax), nr)
OMEGA_0 = OMEGA_M DELTATFACTOR = 0.5 cc_data_dir = '/home/isultan/data/LastJourney/CoreCatalog_004_Reduced/' cc_output_dir = '/home/isultan/projects/halomassloss/core_catalog_mevolved/output_LastJourney_localhost_dtfactor_0.5_run2/' AFID = 1.1 ZETAFID = 0.1 import numpy as np import os from itk import periodic_bcs, many_to_one from astropy.cosmology import FlatLambdaCDM cosmoFLCDM = FlatLambdaCDM(H0=LITTLEH * 100, Om0=OMEGA_M, Tcmb0=0, Neff=3.04, m_nu=None, Ob0=None) import re steps = sorted([ int(re.split('\-|#', i)[1]) for i in os.listdir(cc_data_dir) if '#35' in i ]) zarr = [ 10.04, 9.81, 9.56, 9.36, 9.15, 8.76, 8.57, 8.39, 8.05, 7.89, 7.74, 7.45, 7.31, 7.04, 6.91, 6.67, 6.56, 6.34, 6.13, 6.03, 5.84, 5.66, 5.48, 5.32, 5.24, 5.09, 4.95, 4.74, 4.61, 4.49, 4.37, 4.26, 4.10, 4.00, 3.86, 3.76, 3.63, 3.55, 3.43, 3.31, 3.21, 3.10, 3.04, 2.94, 2.85, 2.74, 2.65, 2.58, 2.48, 2.41, 2.32, 2.25, 2.17, 2.09, 2.02, 1.95, 1.88, 1.80, 1.74, 1.68, 1.61, 1.54, 1.49, 1.43, 1.38, 1.32, 1.26, 1.21, 1.15, 1.11, 1.06, 1.01, 0.96, 0.91, 0.86, 0.82, 0.78, 0.74, 0.69, 0.66, 0.62, 0.58, 0.54, 0.50,
def compute_distances(self): self.cosmological = FlatLambdaCDM(H0=71.0, Om0=0.2669) self.Ds = self.cosmological.angular_diameter_distance(self.Zs)
import numpy as np from astropy.io import fits from astropy.wcs import WCS from astropy.table import Table, Column from glob import glob import time import multiprocessing as mp import argparse from astropy.cosmology import FlatLambdaCDM #you can also use pre-defined parameters, e.g.: #from astropy.cosmology import WMAP7 import astropy.units as u #define the cosmology (if you import WMAP7, you don't need this line) cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) def clip(vec,sigclip,cthresh): ''' ''' old=vec.std() ii=(np.abs(vec-vec.mean()) <= sigclip*old) clipvec=vec[ii] #sigma=[]#dblarr(1) sigma=clipvec.std() veci=clipvec while np.abs(sigma-old)/old > cthresh: old = sigma ii=(np.abs(vec-veci.mean()) <= sigclip*veci.std()) veci=vec[ii] sigma=veci.std()
redchilimit = 40 if y[2] < redshiftlimit and y[0] < redchilimit: sntype = types[sn] if sntype[4] =='b': ib+=1 redchib.append(y[0]) else: redchic.append(y[0]) ic+=1 # redchi.append(y[2]) #if y[0] > redchilimit: # pass #print sn, 'redchi: ', y[2] model.set(z=y[1][0],t0=y[1][1], amplitude=y[1][2]) cosmo=FlatLambdaCDM(H0=70,Om0=0.3) dispc = cosmo.luminosity_distance(y[2])*(10**(6)) #input redshift, will return in Mpc timearray = np.arange(y[1][1],y[1][1]+60,1) fluxlc = model.bandflux('ptf48r',timearray, zp = 27.0, zpsys='ab') obsmag =[(-21.49 - 2.5*np.log10(x)) for x in fluxlc] absmag =[(np.min(point) - 5*(np.log10(dispc.value) - 1)) for point in obsmag] fluxdatapoints = np.array(y[3][:,4], dtype = float) fluxdatapointsall = np.array([(float(i)*10**((-27-21.49)/2.5)) for i in hml[:,1]], dtype = float) fluxyerr = np.array(y[3][:,5], dtype = float) fluxyerrall = np.array([(float(i)*10**((-27-21.49)/2.5)) for i in hml[:,2]], dtype = float)
class PowerTemplate: def __init__(self, z=0.5, ombhsq=0.02222, omhsq=0.14212, sigma8=0.830, h=0.6726, ns=0.9652, Tcmb0=2.725): self.z = z self.h = h self.ombhsq = ombhsq # Omega matter baryon self.omhsq = omhsq # Omega matter (baryon + CDM) self.om = omhsq / np.square(h) self.omb = ombhsq / np.square(h) self.cosmology = FlatLambdaCDM(H0=100 * h, Om0=self.om, Tcmb0=Tcmb0, Ob0=self.omb) self.ol = 1 - self.omhsq / np.square(h) # ignring radiation density self.sigma8 = sigma8 self.ns = ns self.Tcmb0 = 2.725 self.Rscale = 8 self.P0smooth = 1 # initial value for the coefficient self.P0smooth = np.square(sigma8 / self.siglogsmooth()) # correction def T0(self, k): ''' Zero-baryon transfer function shape from E&H98, input k in h/Mpc ''' k = k * self.h # convert unit of k from h/Mpc to 1/Mpc # Eqn 26, approximate sound horizon in Mpc s = 44.5 * np.log( 9.83 / self.omhsq) / np.sqrt(1 + 10 * np.power(self.ombhsq, 3 / 4)) # Eqn 28, both dimensionless Theta = self.Tcmb0 / 2.7 # Eqn 31, alpha_gamma, dimensionless agam = 1 - 0.328 * np.log(431*self.omhsq) * self.ombhsq/self.omhsq \ + 0.38*np.log(22.3*self.omhsq) * np.square(self.ombhsq/self.omhsq) # Eqn 30, in unit of h gamma_eff = self.omhsq / self.h * ( agam + (1 - agam) / np.power(1 + (0.43 * k * s), 4)) q = k / self.h * np.square(Theta) / gamma_eff C0 = 14.2 + 731 / (1 + (62.5 * q)) # Eqn 29 L0 = np.log(2 * np.e + 1.8 * q) T0 = L0 / (L0 + C0 * np.square(q)) return T0 def siglogsmooth(self, tol=1e-10): '''rms mass fluctuations (integrated in logspace) for sig8 etc. NOTES: formalism is all real-space distance in Mpc/h, all k in h/Mpc ''' return np.sqrt(romberg(self.intefunclogsmooth, -10, 10, tol=tol)) def intefunclogsmooth(self, logk): """ Function to integrate to get rms mass fluctuations (logspace) base 10 NOTES: Because the k's here are all expressed in h/Mpc the value of P0 that arises using this integration is in (Mpc**3)/(h**3.) and therefore values of P(k) derived from this integration are in (Mpc**3)/(h**3.), i.e. h^-3Mpc^3 and need divided by h^3 to get the 'absolute' P(k) in Mpc^3. v1.0 Adam D. Myers Jan 2007 """ k = np.power(10.0, logk) # base 10 kR = k * self.Rscale integrand = np.log(10) / (2*np.square(np.pi)) * np.power(k, 3) \ * self.Psmooth(k) * np.square(W(kR)) return integrand def Psmooth(self, k): """ Baryonic Linear power spectrum SHAPE, pass k in h/Mpc """ om = self.om ol = self.ol omz = self.cosmology.Om(self.z) # Omega matter at z olz = ol / np.square(self.cosmology.efunc(self.z)) # MBW Eqn 3.77 g0 = 5 / 2 * om / (np.power(om, 4 / 7) - ol + ((1 + om / 2) * (1 + ol / 70))) # Eqn 4.76 gz = 5 / 2 * omz / (np.power(omz, 4 / 7) - olz + ((1 + omz / 2) * (1 + olz / 70))) Dlin_ratio = gz / (1 + self.z) / g0 Psmooth = self.P0smooth * np.square(self.T0(k)) * \ np.power(k, self.ns) * np.square(Dlin_ratio) return Psmooth def P(self, k_lin, P_lin, n_mu_bins, beta=0, Sigma_s=4, Sigma_r=15, Sigma_perp=0, Sigma_para=0): """ This is the final power template from P_lin and P_nw including the C and exponential damping terms input k, P are 1D arrays from CAMB, n_mu_bins is an integer default beta = 0.4, dimensionless """ self.k = k_lin # 1D array self.P_lin = P_lin # 1D array self.P_nw = self.Psmooth(k_lin) # 1D array self.mu_bins = np.linspace(0, 1, num=n_mu_bins + 1) # 1D bin edges self.mu = (self.mu_bins[1:] + self.mu_bins[:-1]) / 2 # 1D bin centres mu, k = np.meshgrid(self.mu, k_lin) # 2D meshgrid P_lin = np.tile(P_lin, (n_mu_bins, 1)).T # meshgrid P_nw = self.Psmooth(k) # follows meshgrid shape of k_lin try: assert P_lin.size == P_nw.size except AssertionError: print('P shapes are', P_lin.shape, P_nw.shape) sigmavsq = (1 - np.square(mu))*np.square(Sigma_perp)/2 \ + np.square(mu*Sigma_para)/2 # BAO peak smoothing due to nonlinear S = np.exp(-np.square(k * Sigma_r) / 2) C = (1 + np.square(mu) * beta * (1-S)) / \ (1 + np.square(k*mu*Sigma_s)/2) self.P_k_mu = np.square(C) * ( (P_lin - P_nw) * np.exp(-np.square(k) * sigmavsq) + P_nw) return self.P_k_mu def p_multipole(self, ell): Ln = legendre(ell) P_ell = (2 * ell + 1) / 2 * np.sum( self.P_k_mu * (Ln(-self.mu) + Ln(self.mu)) * np.diff(self.mu_bins), axis=1) return P_ell def xi_multipole(self, r_input, ell, a=0.34, r_damp=True): ''' integrate in logk space using trapozoidal rule a is the exponential damping parameter to suppress high-k oscillations usually 0.3 to 1 input r may be 2D of dim (n_r_bins, n_mu_bins) ''' P_ell = self.p_multipole(ell) P_ell = (P_ell[1:] + P_ell[:-1]) / 2 # interpolate midpoint dk = np.diff(self.k) lnk_edges = np.log(self.k) lnk = (lnk_edges[1:] + lnk_edges[:-1]) / 2 # lnk value at midpoint k = np.exp(lnk) # k value at midpoint # turn into 2D grids k = np.tile(k, (r_input.size, 1)) P_ell = np.tile(P_ell, (r_input.size, 1)) dk = np.tile(dk, (r_input.size, 1)) r = np.tile(r_input.flatten(), (k.shape[1], 1)).T assert k.shape == P_ell.shape == dk.shape == r.shape if r_damp: damp = np.exp(-r * np.square(k * a)) else: damp = np.exp(-np.square(k * a)) xi_ell = np.power(1j, ell) / (2 * np.square(np.pi)) * np.sum( np.square(k) * P_ell * spherical_jn(ell, k * r) * damp * dk, axis=1) return np.real(xi_ell.reshape(r_input.shape))
RSD = True # redshift binning #finalCatZShell = [(0.9,1.0),(1.0,1.1),(1.1,1.2),(1.2,1.3),(1.3,1.4),(1.4,1.5),(1.5,1.6),(1.6,1.7),(1.7,1.8)] #finalCatZShell = [(1.0,1.1)] finalCatZShell = [(0.9,1.2)] #,(1.2,1.5),(1.5,1.8)] # plotting and showing PLOT = True SHOW = True # rotation applied to mollweide view plots - not a healpy rotator rotmoll=[40,55,-90] # COSMOLOGY SECTION LCDMmodel = FlatLambdaCDM(Om0 = 0.319, H0 = 67.0) LCDMmodelLF = FlatLambdaCDM(Om0 = 0.300, H0 = 70.0) Mpc_h = u.def_unit('Mpc_h', u.Mpc/LCDMmodel.h) Mpc_h_LF = u.def_unit('Mpc_h_LF', u.Mpc/LCDMmodel.h) cm_LF = u.def_unit('cm_LF', u.cm) l_unit = Mpc_h params = {'flat': True, 'H0': 67.0, 'Om0': 0.319, 'Ob0': 0.049, 'sigma8': 0.83, 'ns': 0.96, 'relspecies':False} cosmology.addCosmology('FS', params) cosmo = cosmology.setCosmology('FS') cmrelation = "diemer19" # LE3 CATALOGS SECTION cat4le3_format="fits" WriteLE3Random=True max_PKs_in_script=50 max_2PCFs_in_script=50
import numpy as np import astropy from astropy.io import fits, ascii from astropy.table import Table, Column import matplotlib.pyplot as plt from scipy import optimize, stats import pandas as pd import re from astropy.coordinates import ICRS, Distance, Angle, SkyCoord from astropy import units as u from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=71, Om0=0.27) def correct_rgc(coord, glx_ctr=SkyCoord(0.0, 0.0, unit='arcsec'), glx_PA=Angle('37d42m54s'), glx_incl=Angle('77.5d')): # distance from coord to glx centre sky_radius = glx_ctr.separation(coord) avg_dec = 0.5 * (glx_ctr.dec + coord.dec).radian x = (glx_ctr.ra - coord.ra) * np.cos(avg_dec) y = glx_ctr.dec - coord.dec # azimuthal angle from coord to glx -- not completely happy with this phi = glx_PA - Angle('90d') + Angle(np.arctan(y.arcsec / x.arcsec), unit=u.rad) # convert to coordinates in rotated frame, where y-axis is galaxy major ax; # have to convert to arcmin b/c can't do sqrt(x^2+y^2) when x and y are angles xp = (sky_radius * np.cos(phi.radian)).arcsec yp = (sky_radius * np.sin(phi.radian)).arcsec