def make_cosmic_sfr(CSP_dir): SFHs_fnames = glob.glob(os.path.join(CSP_dir, 'SFHs_*.fits')) nsubpersfh = fits.getval(SFHs_fnames[0], ext=0, keyword='NSUBPER') SFHs = np.row_stack( [fits.getdata(fn_, 'allsfhs') / fits.getdata(fn_, 'mformed')[::nsubpersfh, None] for fn_ in SFHs_fnames]) ts = fits.getdata(SFHs_fnames[0], 'allts') zs, zmasks = zip(*[masked_z_at_value(WMAP9.age, t_ * u.Gyr) for t_ in ts]) zs = np.ma.array(zs, mask=zmasks) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) SFRDmean = SFHs.mean(axis=0) / WMAP9.scale_factor(zs)**3. ax.plot(1. / WMAP9.scale_factor(zs), SFRDmean / SFRDmean.max()) ax.set_xlim([1., 1. / WMAP9.scale_factor(10.)]) ax.set_yscale('log') ax.set_ylim([.009, 1.05]) ax.set_xlabel(r'$\frac{1}{a}$', size='x-small') ax.set_ylabel(r'$\log{\psi}$', size='x-small') ax.tick_params(labelsize='x-small', which='both') ax_ = ax.twiny() ax_.set_xlim(ax.get_xlim()) zticks = np.linspace(0., 10., 11) inv_sf_ticks = 1. / WMAP9.scale_factor(zticks) ax_.set_xticks(inv_sf_ticks, minor=False) ax_.set_xticklabels(zticks) ax_.tick_params(labelsize='x-small') ax_.set_xlabel(r'$z$', size='x-small') fig.suptitle('``Cosmic" SFR', size='small') savefig(fig, 'CosmicSFR.png', CSP_dir, close=True)
def plot_cosmology(zs, mu_mcmc, mu_minuit, std_mcmc, std_minuit, n): import matplotlib.pyplot as plt from matplotlib import gridspec from matplotlib.ticker import MaxNLocator fig = plt.figure(figsize=(4.5, 5.5)) gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1], hspace=0.0, wspace=0.0) ax0 = fig.add_subplot(gs[0]) ax1 = fig.add_subplot(gs[1], sharex=ax0) axes = [ax0, ax1] zsort = sorted(zs) distmod = WMAP9.distmod(zsort).value - 19.3 distmod2 = WMAP9.distmod(zs).value - 19.3 ms = 2 alpha = 0.4 axes[0].errorbar(zs, mu_minuit, yerr=np.sqrt(std_minuit*std_minuit + 0.1*0.1), ms=ms, fmt='o', label=r"Max. Likelihood", color="r", alpha=alpha) axes[0].errorbar(zs, mu_mcmc, yerr=np.sqrt(std_mcmc*std_mcmc + 0.1*0.1), fmt='o', ms=ms, label=r"MCMC", color="b", alpha=alpha) axes[1].errorbar(zs, mu_minuit - distmod2, yerr=np.sqrt(std_minuit*std_minuit + 0.1*0.1), ms=ms, fmt='o', label=r"Max. Likelihood", color="r", alpha=alpha) axes[1].errorbar(zs, mu_mcmc - distmod2, yerr=np.sqrt(std_mcmc*std_mcmc + 0.1*0.1), fmt='o', ms=ms, label=r"MCMC", color="b", alpha=alpha) axes[0].plot(zsort, distmod, 'k') axes[1].axhline(0, color='k') axes[1].set_xlabel("$z$") axes[0].set_ylabel(r"$\mu$") axes[1].set_ylabel(r"$\mu_{\rm obs} - \mu(\mathcal{C})$") axes[0].legend(loc=2, frameon=False) plt.setp(ax0.get_xticklabels(), visible=False) ax0.yaxis.set_major_locator(MaxNLocator(7, prune="lower")) ax1.yaxis.set_major_locator(MaxNLocator(3)) fig.savefig("output/obs_cosmology_%s.png" % n, bbox_inches="tight", dpi=300, transparent=True) fig.savefig("output/obs_cosmology_%s.pdf" % n, bbox_inches="tight", dpi=300, transparent=True)
def update(val): zL,zS = slzL.val,slzS.val xL,yL = slxL.val, slyL.val ML,eL,PAL = slML.val,sleL.val,slPAL.val sh,sha = slss.val,slsa.val xs,ys = slxs.val, slys.val Fs,ws = slFs.val, slws.val ns,ars,pas = slns.val,slars.val,slPAs.val newDd = cosmo.angular_diameter_distance(zL).value newDs = cosmo.angular_diameter_distance(zS).value newDds= cosmo.angular_diameter_distance_z1z2(zL,zS).value newLens = vl.SIELens(zLens,xL,yL,10**ML,eL,PAL) newShear = vl.ExternalShear(sh,sha) newSource = vl.SersicSource(zS,True,xs,ys,Fs,ws,ns,ars,pas) xs,ys = vl.LensRayTrace(xim,yim,[newLens,newShear],newDd,newDs,newDds) imbg = vl.SourceProfile(xim,yim,newSource,[newLens,newShear]) imlensed = vl.SourceProfile(xs,ys,newSource,[newLens,newShear]) caustics = vl.CausticsSIE(newLens,newDd,newDs,newDds,newShear) ax.cla() ax.imshow(imbg,cmap=cmbg,extent=[xim.min(),xim.max(),yim.min(),yim.max()],origin='lower') ax.imshow(imlensed,cmap=cmlens,extent=[xim.min(),xim.max(),yim.min(),yim.max()],origin='lower') mu = imlensed.sum()*(xim[0,1]-xim[0,0])**2 / newSource.flux['value'] ax.text(0.9,1.05,'$\\mu$ = {0:.2f}'.format(mu),transform=ax.transAxes) for i in range(caustics.shape[0]): ax.plot(caustics[i,0,:],caustics[i,1,:],'k-') f.canvas.draw_idle()
def create_modelimage(lens,source,xmap,ymap,xemit,yemit,indices, Dd=None,Ds=None,Dds=None,sourcedatamap=None): """ Creates a model lensed image given the objects and map coordinates specified. Supposed to be common for both image fitting and visibility fitting, so we don't need any data here. Returns: immap A 2D array representing the field evaluated at xmap,ymap with all sources included. mus: A numpy array of length N_sources containing the magnifications of each source (1 if unlensed). """ lens = list(np.array([lens]).flatten()) # Ensure lens(es) are a list source = list(np.array([source]).flatten()) # Ensure source(s) are a list mus = np.zeros(len(source)) immap, imsrc = np.zeros(xmap.shape), np.zeros(xemit.shape) # If we didn't get pre-calculated distances, figure them here assuming WMAP9 if np.any((Dd is None,Ds is None, Dds is None)): from astropy.cosmology import WMAP9 as cosmo Dd = cosmo.angular_diameter_distance(lens[0].z).value Ds = cosmo.angular_diameter_distance(source[0].z).value Dds= cosmo.angular_diameter_distance_z1z2(lens[0].z,source[0].z).value # Do the raytracing for this set of lens & shear params xsrc,ysrc = LensRayTrace(xemit,yemit,lens,Dd,Ds,Dds) if sourcedatamap is not None: # ... then particular source(s) are specified for this map for jsrc in sourcedatamap: if source[jsrc].lensed: ims = SourceProfile(xsrc,ysrc,source[jsrc],lens) imsrc += ims mus[jsrc] = ims.sum()*(xemit[0,1]-xemit[0,0])**2./source[jsrc].flux['value'] else: immap += SourceProfile(xmap,ymap,source[jsrc],lens); mus[jsrc] = 1. else: # Assume we put all sources in this map/field for j,src in enumerate(source): if src.lensed: ims = SourceProfile(xsrc,ysrc,src,lens) imsrc += ims mus[j] = ims.sum()*(xemit[0,1]-xemit[0,0])**2./src.flux['value'] else: immap += SourceProfile(xmap,ymap,src,lens); mus[j] = 1. # Try to reproduce matlab's antialiasing thing; this uses a 3lobe lanczos low-pass filter imsrc = Image.fromarray(imsrc) resize = np.array(imsrc.resize((int(indices[1]-indices[0]),int(indices[3]-indices[2])),Image.ANTIALIAS)) immap[indices[2]:indices[3],indices[0]:indices[1]] += resize # Flip image to match sky coords (so coordinate convention is +y = N, +x = W, angle is deg E of N) immap = immap[::-1,:] # last, correct for pixel size. immap *= (xmap[0,1]-xmap[0,0])**2. return immap,mus
def get_size_flag(self): self.DA=np.zeros(len(self.s.SERSIC_TH50)) #self.DA[self.membflag] = cosmo.angular_diameter_distance(self.s.CLUSTER_REDSHIFT[self.membflag]).value*Mpcrad_kpcarcsec) for i in range(len(self.DA)): if self.membflag[i]: self.DA[i] = cosmo.angular_diameter_distance(self.s.CLUSTER_REDSHIFT[i]).value*Mpcrad_kpcarcsec else: self.DA[i] = cosmo.angular_diameter_distance(self.s.ZDIST[i]).value*Mpcrad_kpcarcsec self.sizeflag=(self.s.SERSIC_TH50*self.DA > minsize_kpc) #& (self.s.SERSIC_TH50 < 20.)
def build_light_cone(fileglob, zs=np.array([6, 6.5, 7]), boxtype='delta_T'): zs = np.array(zs) files = glob(fileglob) zind = {'delta_T': 5, 'Ts_z': 1, 'xH_': 2, 'deltax': 3} zsim = [] dims = [] lengths = [] files_keep = [] for f in files: if not os.path.basename(f).startswith(boxtype): if not 'updated_smoothed_' + boxtype in os.path.basename(f): continue files_keep.append(f) parms = os.path.basename(f).split('_') zsim.append(np.float(parms[zind[boxtype]][1:])) # redshifts dims.append(np.int(parms[-2])) # dim of box lengths.append(np.float(parms[-1][0:-3])) # length in Mpc files = files_keep if (np.max(np.diff(dims)) > 0) or (np.max(np.diff(lengths)) > 0): raise(ValueError('Boxes are not all the same size')) if (np.max(zs) > np.max(zsim)) or (np.min(zs) < np.min(zsim)): raise(ValueError('Requested redshifts outside range of sim.')) order = np.argsort(zsim) files = [files[i] for i in order] zsim = np.array([zsim[i] for i in order]) zsim0 = zsim[0] dim = dims[0] length = lengths[0] dx = length / dim lightcube = np.zeros((dim, dim, len(zs)), dtype=np.float32) Ds = cosmo.comoving_distance(zs).value - cosmo.comoving_distance(zsim0).value pix1 = map(int, np.floor(Ds / dx) % dim) wp1 = Ds / dx - np.floor(Ds / dx) pix2 = map(int, np.ceil(Ds / dx) % dim) wp2 = 1 - wp1 box1 = [np.argmax(zsim[zsim <= z]) for z in zs] box2 = [i + 1 for i in box1] wb2 = (zs - zsim[box1]) / (zsim[box2] - zsim[box1]) wb1 = (zsim[box2] - zs) / (zsim[box2] - zsim[box1]) for i, z in enumerate(zs): data = np.fromfile(files[box1[i]], dtype=np.float32) data = data.reshape((dim, dim, dim)) slice11 = data[:, :, pix1[i]] slice12 = data[:, :, pix2[i]] data = np.fromfile(files[box2[i]], dtype=np.float32) data = data.reshape((dim, dim, dim)) slice21 = data[:, :, pix1[i]] slice22 = data[:, :, pix2[i]] lightcube[:, :, i] = (slice11 * wb1[i] * wp1[i] + slice12 * wb1[i] * wp2[i] + slice21 * wb2[i] * wp1[i] + slice21 * wb2[i] * wp2[i]) return lightcube
def cal_beta(z_los): beta = np.zeros(len(z_los)) for i in range(len(z_los)): zi = np.array([zl,z_los[i]]) z1,z2 = np.min(zi),np.max(zi) D1,D2 = cosmo.angular_diameter_distance(z1),cosmo.angular_diameter_distance(z2) beta[i] = (D2-D1)*Ds/D2/(Ds-D1) return beta
def me(theta_Es, e_theta_Es, zlenses, zsources): ntarg = theta_Es.size M_E = numpy.zeros(ntarg) e_M_E = numpy.zeros(ntarg) vdisp = numpy.zeros(ntarg) e_vdisp = numpy.zeros(ntarg) for targ in numpy.arange(ntarg): zsource = zsources[targ] zlens = zlenses[targ] theta_E = theta_Es[targ] e_theta_E = e_theta_Es[targ] # luminosity distances d_LS = cosmo.luminosity_distance(zsource).value * 3.08e24 d_LL = cosmo.luminosity_distance(zlens).value * 3.08e24 # comoving distances d_MS = d_LS / (1 + zsource) d_ML = d_LL / (1 + zlens) # angular diameter distances d_ALS = 1 / (1 + zsource) * ( d_MS - d_ML ) d_AL = d_LL / (1 + zlens)**2 d_AS = d_LS / (1 + zsource)**2 # einstein radius in cm (7.1 kpc/" at z=0.7) theta_E_cm = theta_E / 206265. * d_AL e_theta_E_cm = e_theta_E / 206265. * d_AL # get a distribution of Einstein radii niters = 1e3 theta_E_iters = numpy.random.normal(loc = theta_E_cm, \ scale = e_theta_E_cm, size = niters) # compute the mass enclosed within the Einstein radius c = 3e10 G = 6.67e-8 sigma_crit = c**2 / 4 / pi / G * d_AS / d_AL / d_ALS M_E_iters = pi * sigma_crit * theta_E_iters**2 / 2e33 M_E[targ] = numpy.mean(M_E_iters) e_M_E[targ] = numpy.std(M_E_iters) vdisp2 = theta_E_iters / d_AL / 4 / pi * c**2 * d_AS / d_ALS vdisp[targ] = numpy.mean(numpy.sqrt(vdisp2) / 1e5) e_vdisp[targ] = numpy.std(numpy.sqrt(vdisp2) / 1e5) return M_E, e_M_E, vdisp, e_vdisp
def load_model(nbins_sfh=6,sigma=0.3,datfile=None,objname=None, **extras): # we'll need this to access specific model parameters n = [p['name'] for p in model_params] # create SFH bins with open(datfile,'r') as f: data = json.load(f) zred = float(data[objname]['redshift']) model_params[n.index('zred')]['init'] = zred tuniv = WMAP9.age(zred).value*1e9 # now construct the nonparametric SFH # set number of components # set logsfr_ratio prior # propagate to agebins model_params[n.index('agebins')]['N'] = nbins_sfh model_params[n.index('mass')]['N'] = nbins_sfh model_params[n.index('logsfr_ratios')]['N'] = nbins_sfh-1 model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH model_params[n.index('logsfr_ratios')]['prior'] = SFR_Ratio(mean=np.full(nbins_sfh-1,0.0),sigma=np.full(nbins_sfh-1,sigma)) model_params.append({'name': 'tuniv', 'N': 1, 'isfree': False, 'init': tuniv}) # set mass-metallicity prior model_params[n.index('massmet')]['prior'] = MassMet(z_mini=-1.98, z_maxi=0.19, mass_mini=7, mass_maxi=12.5) return sedmodel.SedModel(model_params)
def Counts(gal_id, gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data[data['field'] == gal_field] #separating the potential satellites into star-forming(b) and quiescent(r) bins# mask = ((np.abs(data_tmp['z'] - z) <= delta_z) & (data_tmp['id'] != gal_id) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] lst_galr = lst_gal[(((lst_gal['vj'] < 0.92) & (lst_gal['uv'] > 1.3)) | ((lst_gal['vj'] > 0.8) & (lst_gal['vj'] < 1.6) & (lst_gal['uv'] > (0.88*lst_gal['vj'] +0.49))))] lst_galb = lst_gal[(((lst_gal['vj'] < 0.92) & (lst_gal['uv'] < 1.3)) | ((lst_gal['vj'] > 0.8) & (lst_gal['vj'] < 1.6) & (lst_gal['uv'] < (0.88*lst_gal['vj'] +0.49))) | (lst_gal['vj']>1.5))] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc*(R*u.kpc) #retrieving RA and DEC data of given galaxy# p1 = data_tmp[(data_tmp['id'] == gal_id)] #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(p1['ra']*u.deg, p1['dec']*u.deg) sc1 = SkyCoord(lst_galr['ra']*u.deg, lst_galr['dec']*u.deg) sc2 = SkyCoord(lst_galb['ra']*u.deg, lst_galb['dec']*u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nnr = np.empty(len(R)) nnb = np.empty(len(R)) for ii,r in enumerate(arcmin): nnr[ii] = np.sum(sep1 <= r) nnb[ii] = np.sum(sep2 <= r) return [nnr, nnb]
def load_model(nbins_sfh=5,sigma=0.3,df=2, **extras): # we'll need this to access specific model parameters n = [p['name'] for p in model_params] # create SFH bins zred = model_params[n.index('zred')]['init'] tuniv = WMAP9.age(zred).value*1e9 # now construct the nonparametric SFH # set number of components # set logsfr_ratio prior # propagate to agebins model_params[n.index('agebins')]['N'] = nbins_sfh model_params[n.index('mass')]['N'] = nbins_sfh model_params[n.index('logsfr_ratios')]['N'] = nbins_sfh-1 model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH model_params[n.index('logsfr_ratios')]['prior'] = priors.StudentT(mean=np.full(nbins_sfh-1,0.0), scale=np.full(nbins_sfh-1,sigma), df=np.full(nbins_sfh-1,df)) model_params[n.index('logsfr_ratio30')]['prior'] = priors.StudentT(mean=0.0, scale=sigma, df=df) model_params[n.index('logsfr_ratiomax')]['prior'] = priors.StudentT(mean=0.0, scale=sigma, df=df) model_params.append({'name': 'tuniv', 'N': 1, 'isfree': False, 'init': tuniv}) return sedmodel.SedModel(model_params)
def photometry(): filters=['I','B','V','R'] #filter names for f in filters: All_data=Table(names=('Count','Error','Time')) file=open('photometry_'+str(sn_name)+str(f)+'.txt','w') super_lotis_path='/Users/zeynepyaseminkalender/Documents/MyFiles_Pyhton/SuperLOTIS_final' date_search_path=os.path.join(super_lotis_path, '13*') search_str=os.path.join(date_search_path,str(sn_name)+str(f)+'.fits') for name in glob(search_str): date=extract_date_from_fullpath(name) final_date=convert_time(date) with fits.open(name) as analysis: z = .086 r = 1 * u.kpc / cosmo.kpc_comoving_per_arcmin(z) cordinate = SkyCoord('01:48:08.66 +37:33:29.22', unit = (u.hourangle, u.deg)) aperture = SkyCircularAperture(cordinate, r) exp_time= analysis[0].header['EXPTIME'] # error calculation data_error = np.sqrt(analysis[0].data*exp_time) / exp_time tbl=Table(aperture_photometry(analysis[0],aperture,error=data_error),names=('Count','Count_Error','x_center','y_center','center_input')) tbl.keep_columns(['Count','Count_Error']) count=tbl[0]['Count'] error=tbl[0]['Count_Error'] All_data.add_row((count,error,final_date)) print >> file , All_data file.close() plot(filters)
def load_model(objname=None, datdir=None, nbins_sfh=7, sigma=0.3, df=2, agelims=[], zred=None, runname=None, **extras): # we'll need this to access specific model parameters n = [p['name'] for p in model_params] # first calculate redshift and corresponding t_universe # if no redshift is specified, read from file if zred is None: datname = datdir + objname.split('_')[0] + '_' + runname + '.dat' dat = ascii.read(datname) idx = dat['phot_id'] == int(objname.split('_')[-1]) zred = float(dat['z_best'][idx]) tuniv = WMAP9.age(zred).value*1e9 model_params[n.index('zred')]['init'] = zred # now construct the nonparametric SFH # current scheme: last bin is 15% age of the Universe, first two are 0-30, 30-100 # remaining N-3 bins spaced equally in logarithmic space tbinmax = (tuniv*0.85) agelims = agelims[:2] + np.linspace(agelims[2],np.log10(tbinmax),nbins_sfh-2).tolist() + [np.log10(tuniv)] agebins = np.array([agelims[:-1], agelims[1:]]) # load nvariables and agebins model_params[n.index('agebins')]['N'] = nbins_sfh model_params[n.index('agebins')]['init'] = agebins.T model_params[n.index('mass')]['N'] = nbins_sfh model_params[n.index('logsfr_ratios')]['N'] = nbins_sfh-1 model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH model_params[n.index('logsfr_ratios')]['prior'] = priors.StudentT(mean=np.full(nbins_sfh-1,0.0), scale=np.full(nbins_sfh-1,sigma), df=np.full(nbins_sfh-1,df)) return sedmodel.SedModel(model_params)
def load_model(nbins_sfh=7,sigma=0.3,df=2.,agelims=None,objname=None, **extras): # we'll need this to access specific model parameters n = [p['name'] for p in model_params] # replace nbins_sfh nbins_sfh = 4 + (int(objname)-1) / 9 # create SFH bins zred = model_params[n.index('zred')]['init'] tuniv = WMAP9.age(zred).value # now construct the nonparametric SFH # current scheme: six bins, four spaced equally in logarithmic # last bin is 15% age of the Universe, first two are 0-30, 30-100 tbinmax = (tuniv*0.85)*1e9 agelims = agelims[:2] + np.linspace(agelims[2],np.log10(tbinmax),nbins_sfh-2).tolist() + [np.log10(tuniv*1e9)] agebins = np.array([agelims[:-1], agelims[1:]]) # load nvariables and agebins model_params[n.index('agebins')]['N'] = nbins_sfh model_params[n.index('agebins')]['init'] = agebins.T model_params[n.index('mass')]['N'] = nbins_sfh model_params[n.index('logsfr_ratios')]['N'] = nbins_sfh-1 model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH model_params[n.index('logsfr_ratios')]['prior'] = priors.StudentT(mean=np.full(nbins_sfh-1,0.0), scale=np.full(nbins_sfh-1,sigma), df=np.full(nbins_sfh-1,df)) return sedmodel.SedModel(model_params)
def load_model(datname='', objname='', **extras): ###### REDSHIFT ###### hdulist = fits.open(datname) idx = hdulist[1].data['Name'] == objname zred = hdulist[1].data['cz'][idx][0] / 3e5 hdulist.close() #### TUNIV ##### tuniv = WMAP9.age(zred).value #### TAGE ##### tage_init = 1.1 tage_mini = 0.11 # FSPS standard tage_maxi = tuniv #### INSERT MAXIMUM AGE AND REDSHIFT INTO MODEL PARAMETER DICTIONARY #### pnames = [m['name'] for m in model_params] zind = pnames.index('zred') model_params[zind]['init'] = zred tind = pnames.index('tage') model_params[tind]['prior_args']['maxi'] = tuniv model = BurstyModel(model_params) return model
def cosmoComVol(redshift, WMAP9=False, H0=70.0, Om0=0.30, Planck15=False, Gpc=False): """ Get the Comoving Volume at redshift=z. This is simply a wrapper of astropy.cosmology The input redsfhit can be an array """ if WMAP9: from astropy.cosmology import WMAP9 as cosmo elif Planck15: from astropy.cosmology import Planck15 as cosmo else: from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=H0, Om0=Om0) v = cosmo.comoving_volume(redshift) if not Gpc: return v.value else: return v.to(u.Gpc).value
def cosmoDA(redshift, WMAP9=False, H0=70.0, Om0=0.30, Planck15=False, kpc=False): """ Get the Angular Diameter Distance at redshift=z. This is simply a wrapper of astropy.cosmology The input redsfhit can be an array """ if WMAP9: from astropy.cosmology import WMAP9 as cosmo elif Planck15: from astropy.cosmology import Planck15 as cosmo else: from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=H0, Om0=Om0) da = cosmo.angular_diameter_distance(redshift) if not kpc: return da.value else: return da.to(u.kpc).value
def cosmoAge(redshift, WMAP9=False, H0=70.0, Om0=0.30, Planck15=False, Myr=False): """ Get the Age of the Universe at redshift=z. This is simply a wrapper of astropy.cosmology The input redsfhit can be an array """ if WMAP9: from astropy.cosmology import WMAP9 as cosmo elif Planck15: from astropy.cosmology import Planck15 as cosmo else: from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=H0, Om0=Om0) age = cosmo.age(redshift) if not Myr: return age.value else: return age.to(u.Myr).value
def cosmoLookBack(redshift, WMAP9=False, H0=70.0, Om0=0.30, Planck15=False, Myr=False): """ Get the Look-back Time at redshift=z. This is simply a wrapper of astropy.cosmology The input redsfhit can be an array """ if WMAP9: from astropy.cosmology import WMAP9 as cosmo elif Planck15: from astropy.cosmology import Planck15 as cosmo else: from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=H0, Om0=Om0) lbt = cosmo.lookback_time(redshift) if not Myr: return lbt.value else: return lbt.to(u.Myr).value
def Counts(gal_id, gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] mask = ((np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['id'] != gal_id) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc*(R*u.kpc) #retrieving RA and DEC data of given galaxy# p1 = data_tmp[(data_tmp['id'] == gal_id)] #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(p1['ra']*u.deg, p1['dec']*u.deg) sc = SkyCoord(lst_gal['ra']*u.deg, lst_gal['dec']*u.deg) sep = sc0.separation(sc) sep = sep.to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn = np.empty(len(R)) for ii,r in enumerate(arcmin): nn[ii] = np.sum(sep <= r) return nn
def load_model(alpha_sfh=0.2,agelims=None, **extras): # we'll need this to access specific model parameters n = [p['name'] for p in model_params] # create SFH bins zred = model_params[n.index('zred')]['init'] tuniv = WMAP9.age(zred).value # now construct the nonparametric SFH # current scheme: six bins, four spaced equally in logarithmic # last bin is 15% age of the Universe, first two are 0-30, 30-100 tbinmax = (tuniv*0.85)*1e9 agelims = agelims[:2] + np.linspace(agelims[2],np.log10(tbinmax),len(agelims)-3).tolist() + [np.log10(tuniv*1e9)] agebins = np.array([agelims[:-1], agelims[1:]]) ncomp = len(agelims) - 1 # load nvariables and agebins model_params[n.index('agebins')]['N'] = ncomp model_params[n.index('agebins')]['init'] = agebins.T model_params[n.index('mass')]['N'] = ncomp model_params[n.index('mass')]['init'] = np.full(ncomp,1e6) model_params[n.index('mass')]['prior'] = priors.TopHat(mini=np.full(ncomp,1e5), maxi=np.full(ncomp,1e12)) return sedmodel.SedModel(model_params)
def load_model(agelims=[], nbins_sfh=7, sigma=0.3, df=2, **extras): # we'll need this to access specific model parameters n = [p['name'] for p in model_params] # first calculate t_universe at z=1 tuniv = WMAP9.age(1.0).value*1e9 # now construct the nonparametric SFH # current scheme: last bin is 15% age of the Universe, first two are 0-30, 30-100 # remaining N-3 bins spaced equally in logarithmic space tbinmax = (tuniv*0.85) agelims = agelims[:2] + np.linspace(agelims[2],np.log10(tbinmax),nbins_sfh-2).tolist() + [np.log10(tuniv)] agebins = np.array([agelims[:-1], agelims[1:]]) # load nvariables and agebins model_params[n.index('agebins')]['N'] = nbins_sfh model_params[n.index('agebins')]['init'] = agebins.T model_params[n.index('mass')]['N'] = nbins_sfh model_params[n.index('logsfr_ratios')]['N'] = nbins_sfh-1 model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH model_params[n.index('logsfr_ratios')]['prior'] = priors.StudentT(mean=np.full(nbins_sfh-1,0.0), scale=np.full(nbins_sfh-1,sigma), df=np.full(nbins_sfh-1,df)) # insert redshift into model dictionary model_params[n.index('zred')]['init'] = 0.0 return sedmodel.SedModel(model_params)
def load_model(objname=None, datdir=None, runname=None, agelims=[], zred=None, alpha_sfh=0.3, **extras): # we'll need this to access specific model parameters n = [p['name'] for p in model_params] # first calculate redshift and corresponding t_universe # if no redshift is specified, read from file hdu = fits.open(APPS+'/prospector_alpha/data/3dhst/shivaei_sample.fits') fields = np.array([f.replace('-','') for f in hdu[1].data['FIELD']]) ids = hdu[1].data['V4ID'].astype(str) idx_obj = (fields == objname.split('_')[0]) & (ids == objname.split('_')[1]) zred = float(hdu[1].data['Z_MOSFIRE'][idx_obj][0]) tuniv = WMAP9.age(zred).value # now construct the nonparametric SFH # current scheme: six bins, four spaced equally in logarithmic # last bin is 15% age of the Universe, first two are 0-30, 30-100 tbinmax = (tuniv*0.85)*1e9 agelims = agelims[:2] + np.linspace(agelims[2],np.log10(tbinmax),len(agelims)-3).tolist() + [np.log10(tuniv*1e9)] agebins = np.array([agelims[:-1], agelims[1:]]) ncomp = len(agelims) - 1 # load into `agebins` in the model_params dictionary model_params[n.index('agebins')]['N'] = ncomp model_params[n.index('agebins')]['init'] = agebins.T # now we do the computational z-fraction setup # number of zfrac variables = (number of SFH bins - 1) # set initial with a constant SFH # if alpha_SFH is a vector, use this as the alpha array # else assume all alphas are the same model_params[n.index('mass')]['N'] = ncomp model_params[n.index('z_fraction')]['N'] = ncomp-1 if type(alpha_sfh) != type(np.array([])): alpha = np.repeat(alpha_sfh,ncomp-1) else: alpha = alpha_sfh tilde_alpha = np.array([alpha[i-1:].sum() for i in xrange(1,ncomp)]) model_params[n.index('z_fraction')]['prior'] = priors.Beta(alpha=tilde_alpha, beta=alpha, mini=0.0, maxi=1.0) model_params[n.index('z_fraction')]['init'] = np.array([(i-1)/float(i) for i in range(ncomp,1,-1)]) model_params[n.index('z_fraction')]['init_disp'] = 0.02 # set mass-metallicity prior # insert redshift into model dictionary model_params[n.index('massmet')]['prior'] = MassMet(z_mini=-1.98, z_maxi=0.19, mass_mini=7, mass_maxi=12.5) model_params[n.index('zred')]['init'] = zred # set gas-phase metallicity prior # log(Z/Zsun) = -3.07 for model mean = hdu[1].data['m_12LOGOH'][idx_obj][0] if (mean > -100): gas_logz_mean = np.clip((mean - 12) + 3.06, -2, 0.5) sigma = (hdu[1].data['U68_12LOGOH'] - hdu[1].data['L68_12LOGOH'])[idx_obj][0] / 2. model_params[n.index('gas_logz')]['prior'] = priors.ClippedNormal(mean=gas_logz_mean,sigma=sigma,mini=-2,maxi=0.5) return sedmodel.SedModel(model_params)
def load_model(**extras): # set tage_max, fix redshift n = [p['name'] for p in model_params] zred = 0.0001 tuniv = WMAP9.age(zred).value model_params[n.index('tage')]['prior'].update(maxi=tuniv) return sedmodel.SedModel(model_params)
def volume_limit(sample,zmax,magcol,appmag): # Volume-limit the sample based on the detection limit and redshift appmag_lim = appmag * u.mag distmod = WMAP9.distmod(zmax) absmag_lim = appmag_lim - distmod absmags = sample[magcol] - WMAP9.distmod(sample['Z_BEST']).value #print sample['imaging'][0] #print 'absmag_lim',absmag_lim #print 'appmag_lim',appmag_lim #print 'distmod',distmod vl_ind = (absmags < absmag_lim.value) & (sample['Z_BEST'] > 0) & (sample['Z_BEST'] < zmax) return sample[vl_ind]
def lum_dist_error(self, val, err): ''' Use a black box error method to find the uncertanty in astropy.cosmology.WMAP9.luminosity_distance(). Args: val (float): A redshift value err (float): Error in the redshift Returns: error (float): Error in the luminosity distance ''' diff = (cosmo.luminosity_distance(val + err).cgs - cosmo.luminosity_distance(val - err).cgs) error = abs(.5 * diff.value) return(error)
def conv_error(self, val, err): ''' Use a black box error method to find the uncertanty in astropy.cosmology.WMAP9.kpc_comoving_per_arcmin(). Args: val (float): A redshift value err (float): Error in the redshift Returns: error (float): Error in the conversion factor ''' diff = (cosmo.kpc_comoving_per_arcmin(val + err)**2 - cosmo.kpc_comoving_per_arcmin(val - err)**2) error = abs(.5 * diff.value) return(error)
def SNcosmo_template(): # Pick cosmology from astropy.cosmology import WMAP9 as cosmo #from astropy.cosmology import FlatLambdaCDM #cosmo = FlatLambdaCDM(H0=69.6, Om0=0.286) from astropy import units as u import astropy.constants as const # Parameters obsfilter = inst.filter # This is the name of the sample band we use # Other defined things absmag_V = -19.3 # We use this to normalize (needed?) magsystem = 'ab' # The magnitude system (both ab and vega should work) modelphase = 0 # The phase of the SN template = I.Source # The name of the SN template (e.g. salt2 or hsiao) # When using new bands, assuming these are in newband directory # e.g. from WFC3IR from http://www.stsci.edu/~WFC3/UVIS/SystemThroughput/ bandinfo = {} for filterfile in os.listdir(I.Inst): if filterfile == '.DS_Store': pass else: words = re.split('\.',filterfile) filterdata = np.genfromtxt(I.Inst+'/'+filterfile ) # Remove regions with zero throughput #iNonzero = np.where( filterdata[:,1]>0 )[0] band = sncosmo.Bandpass(i_wave,np.ones(len(i_wave)),name=words[0]) #band = sncosmo.Bandpass(filterdata[:,0],filterdata[:,1],name=words[0]) sncosmo.registry.register(band) # Scale template to chosen absolute V (Vega) model = sncosmo.Model(source=template) model.set(z=0) magdiff = model.bandmag('bessellv','vega',[0])-absmag_V templatescale = 10**(0.4*magdiff) print 'Get scale %s to match absolute mag.'%(templatescale) # Get distance modulus DM = cosmo.distmod(z) print 'Get Distance Modulus %s'%(DM) # Create a model assuming both of these scales model = sncosmo.Model(source=template) print model model.set(x0=templatescale*10**(-0.4*DM.value),z=z) #this only works for salt2 model. need amplitude for hsiao model # Derive the observed magnitude and flux in chosen filter obsmag = model.bandmag(obsfilter,magsystem,[modelphase]) bandflux = model.bandflux(obsfilter, modelphase ) # Flux in photons/s/cm^2 print 'We get mag %s (%s) and flux %s photons/s/cm^2'%(obsmag,magsystem,bandflux) return model.flux(modelphase,i_wave) #wave is in observer frame
def vl_plot(): # Plot the volume-limited samples split by survey tasca = fits.getdata('{0}/comparisons/COSMOS/cosmos_tasca_gzh.fits'.format(gzh_path),1) cassata = fits.getdata('{0}/comparisons/COSMOS/cosmos_cassata_gzh.fits'.format(gzh_path),1) zest = fits.getdata('{0}/comparisons/COSMOS/cosmos_zest_gzh.fits'.format(gzh_path),1) aegis = fits.getdata('{0}/comparisons/AEGIS/aegis_gzh.fits'.format(gzh_path),1) gems = fits.getdata('{0}/comparisons/GEMS/gems_gzh.fits'.format(gzh_path),1) goods = fits.getdata('{0}/comparisons/GOODS/goods_gzh.fits'.format(gzh_path),1) dlist = get_dlist(aegis,gems,goods,cassata,tasca,zest) fig,axarr = plt.subplots(3,2,sharex=True,sharey=True,figsize=(12,12)) for s,ax in zip(dlist,axarr.ravel()): d = s['data'] newdata = volume_limit(d,1.0,s['magcol'],s['maglim']) olddata = d[d['Z_BEST'] > 0.] absmag_old = olddata[s['magcol']] - WMAP9.distmod(olddata['Z_BEST']).value absmag_new = newdata[s['magcol']] - WMAP9.distmod(newdata['Z_BEST']).value appmag_old = olddata[s['magcol']] appmag_new = newdata[s['magcol']] s['data'] = newdata ax.scatter(olddata['Z_BEST'],absmag_old,c='gray') ax.scatter(newdata['Z_BEST'],absmag_new,c='red') ax.set_xlim(-0.5,4) ax.set_ylim(-15,-25) ax.set_xlabel(r'$z$',fontsize=20) ax.set_ylabel(r'$M$',fontsize=20) ax.set_title("{0} ".format(s['survey'])+r'$m_\mathrm{I|i|z}<$'+'{0:.1f}'.format(s['maglim']),fontsize=20) fig.tight_layout() plt.show() plt.close() return None
def get_spectrum(self, outwave=None, filters=None, peraa=False, **params): """Given a theta vector, generate spectroscopy, photometry and any extras (e.g. stellar mass). :param theta: ndarray of parameter values. :param sps: A python-fsps StellarPopulation object to be used for generating the SED. :returns spec: The restframe spectrum in units of maggies. :returns phot: The apparent (redshifted) observed frame maggies in each of the filters. :returns extras: A list of the ratio of existing stellar mass to total mass formed for each component, length ncomp. """ self.params.update(**params) # Pass the model parameters through to the sps object ncomp = len(self.params['mass']) for ic in range(ncomp): s, p, x = self.one_sed(component_index=ic, filterlist=filters) try: spec += s maggies += p extra += [x] except(NameError): spec, maggies, extra = s, p, [x] # `spec` is now in Lsun/Hz if outwave is not None: w = self.csp.wavelengths spec = np.interp(outwave, w, spec) # Distance dimming and unit conversion if self.params['zred'] == 0: # Use 10pc for the luminosity distance (or a number # provided in the lumdist key in units of Mpc) dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2 else: lumdist = cosmo.luminosity_distance(self.params['zred']).value dfactor = (lumdist * 1e5)**2 / (1 + self.params['zred']) if peraa: # spectrum will be in erg/s/cm^2/AA spec *= to_cgs / dfactor * lightspeed / outwave**2 else: # Spectrum will be in maggies spec *= to_cgs / dfactor / 1e3 / (3631*jansky_mks) return spec, maggies / dfactor, extra
def generate_calculation_values(output_files_path, info_table): """Return the average star formation rates per SN type per age, an array of 23 ages, present solar mass values per SN environment, and processed solar masses per SN environment This function uses the STARLIGHT output files for 244 local SN environments to generate the information used to plot the star formation history per SN type Arguments: output_files_path (str) : the path indicating the STARLIGHT output files location info_table (str) : the data table specifying SN names, types, and redshifts Returns: type_(II, IIn, Ic, IIb, Ia, Ibc, Ib)_values (lists) : lists containing the average star formation rates per SN type for 23 ages sorted_ages (array) : an array containing the 23 ages for which star formation rate is calculated """ all_output_files = glob.glob(output_files_path) # full paths of .BN files type_II_values = [] type_IIn_values = [] type_Ic_values = [] type_IIb_values = [] type_Ia_values = [] type_Ibc_values = [] type_Ib_values = [] present_masses = [] processed_masses = [] for path in all_output_files: # Changing from absolute to relative file paths will make # splitting by '/' no longer necessary. supernova_name = path.split('/')[6].split('.')[0] with open(path) as ofile: lines = ofile.readlines() # Check to make sure that the output file contains data # If it does not, skip that file if len(lines) < 5: continue for num, line in enumerate(lines): line_list = line.split() if 'Mini_j(%)' in line_list: table_start_value = num + 1 elif '[N_base]' in line_list: n_base_value = int(line_list[0]) elif '[Mini_tot' in line_list: mini_tot = float(line_list[0]) elif '[Mcor_tot' in line_list: mcor_tot = float(line_list[0]) # Calculate the total mass of the SN local environment redshift = np.asarray(info_table['z'])[np.where( np.asarray(info_table['SN']) == supernova_name)] type = np.asarray(info_table['Type'])[np.where( np.asarray(info_table['SN']) == supernova_name)] solar_luminosity = 3.826e33 # Unit: erg / sec luminosity_distance = cosmo.luminosity_distance( redshift) * 3.0857e24 # Unit: cm # mini_tot unit: cm^2 * sec * solar mass / erg mass_local_env = mcor_tot * 4 * np.pi * ( luminosity_distance**2) / solar_luminosity # Unit: Solar Mass processed_mass_local_env = mini_tot * 4 * np.pi * ( luminosity_distance**2) / solar_luminosity # Unit: Solar Mass present_masses.append(mass_local_env.value) processed_masses.append(processed_mass_local_env.value) # Define arrays for mass percentages and ages lines = lines[table_start_value:table_start_value + n_base_value + 1] data = ascii.read(lines) t = Table(data) t.keep_columns(['col3', 'col5']) percent_mass_t = (np.array(t['col3'])) ages = np.array(t['col5']) # Remove repeated ages so there are only 23 sorted_ages = [] for age in ages: if age not in sorted_ages: sorted_ages.append(age) sorted_ages.sort(reverse=True) sorted_ages = np.array(sorted_ages) # Define array for sorted mass percentages (total percent per age) sorted_percent_mass_t = [] for a in sorted_ages: sorted_percent_mass_t.append( np.sum(percent_mass_t[np.where(ages == a)])) sorted_percent_mass_t = np.array(sorted_percent_mass_t) # Calculate the age bin widths age_widths = [] for i, age in enumerate(sorted_ages): if i == 0 and (i + 1) <= 22: age_widths.append(((age + sorted_ages[i + 1]) / 2)) if (i >= 1) and ((i + 1) <= 22): age_widths.append( abs(((age + sorted_ages[i + 1]) / 2) - (((sorted_ages[i - 1] + age) / 2)))) age_widths = np.array(age_widths) age_widths = np.append(age_widths, 2080000) ## Calculate the Star Formation Rate at all 23 ages SFR_t = (sorted_percent_mass_t / 100) * processed_mass_local_env / age_widths * 10000 # Calculate the (non)normalized cumulative distribution of SFR cum_sum = np.cumsum(SFR_t) norm_cum_sum = cum_sum / np.sum(SFR_t).value # Split based on SN Type if (type == 'II') or (type == 'IIP') or (type == 'IIL'): #type_II_values.append(norm_cum_sum) type_II_values.append(cum_sum) if (type == 'Ia') or (type == 'Ia-pec') or (type == 'Ia-91bg') or ( type == 'Ia-91T') or (type == 'Ia-02cx') or (type == 'I'): #type_Ia_values.append(norm_cum_sum) type_Ia_values.append(cum_sum) if (type == 'Ibc') or (type == 'Ibc-pec') or (type == 'Ic-BL') or ( type == 'IIb') or (type == 'Ib') or (type == 'Ic'): #type_Ibc_values.append(norm_cum_sum) type_Ibc_values.append(cum_sum) if type == ('Ib'): #type_Ib_values.append(norm_cum_sum) type_Ib_values.append(cum_sum) if type == ('Ic'): #type_Ic_values.append(norm_cum_sum) type_Ic_values.append(cum_sum) if type == ('IIn'): #type_IIn_values.append(norm_cum_sum) type_IIn_values.append(cum_sum) if type == ('IIb'): #type_IIb_values.append(norm_cum_sum) type_IIb_values.append(cum_sum) return type_II_values, type_IIn_values, type_Ic_values, type_IIb_values, \ type_Ia_values, type_Ibc_values, type_Ib_values, sorted_ages
def load_model(objname, field, agelims=[], **extras): # REDSHIFT # open file, load data photname, zname, filtername, filts = get_names(field) with open(photname, 'r') as f: hdr = f.readline().split() dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]]) dat = np.loadtxt(photname, comments='#', delimiter=' ', dtype=dtype) with open(zname, 'r') as fz: hdr_z = fz.readline().split() dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]]) zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z) idx = dat[ 'id'] == objname # creates array of True/False: True when dat[id] = objname zred = zout['z_spec'][idx][0] # use z_spec if zred == -99: # if z_spec doesn't exist zred = zout['z_peak'][idx][0] # use z_phot print(zred, 'zred') # CALCULATE AGE OF THE UNIVERSE (TUNIV) AT REDSHIFT ZRED tuniv = WMAP9.age(zred).value print(tuniv, 'tuniv') n = [p['name'] for p in model_params] # model_params[n.index('tage')]['prior_args']['maxi'] = tuniv # NONPARAMETRIC SFH # NEW agelims = [ 0.0, 8.0, 8.7, 9.0, (9.0 + (np.log10(tuniv * 1e9) - 9.0) / 3), (9.0 + 2 * (np.log10(tuniv * 1e9) - 9.0) / 3), np.log10(tuniv * 1e9) ] ncomp = len(agelims) - 1 agebins = np.array([agelims[:-1], agelims[1:] ]) # why agelims[1:] instead of agelims[0:]? # INSERT REDSHIFT INTO MODEL PARAMETER DICTIONARY zind = n.index('zred') model_params[zind]['init'] = zred # SET UP AGEBINS model_params[n.index('agebins')]['N'] = ncomp model_params[n.index('agebins')]['init'] = agebins.T # FRACTIONAL MASS INITIALIZATION # NEW # N-1 bins, last is set by x = 1 - np.sum(sfr_fraction) model_params[n.index('sfr_fraction')]['N'] = ncomp - 1 model_params[n.index('sfr_fraction')]['prior_args'] = { 'maxi': np.full(ncomp - 1, 1.0), 'mini': np.full(ncomp - 1, 0.0), # NOTE: ncomp instead of ncomp-1 makes the prior take into # account the implicit Nth variable too } model_params[n.index('sfr_fraction')]['init'] = np.zeros(ncomp - 1) + 1. / ncomp model_params[n.index('sfr_fraction')]['init_disp'] = 0.02 # CREATE MODEL model = BurstyModel(model_params) return model
def PS(self, z, region_size=1., radius_in=0., radius_out=1.): kpcp = cosmo.kpc_proper_per_arcmin(z).value Mpcpix = 1000. / kpcp / self.data.pixsize # 1 Mpc in pixel regsizepix = region_size * Mpcpix ###################### # Set the scales ###################### minscale = 2 # minimum scale of 2 pixels maxscale = regsizepix / 2. scale = np.logspace(np.log10(minscale), np.log10(maxscale), 10) # 10 scale logarithmically spaced sckpc = scale * self.data.pixsize * kpcp kr = 1. / np.sqrt(2. * np.pi**2) * np.divide( 1., scale) # Eq. A5 of Arevalo et al. 2012 ###################### # Define the region where the power spectrum will be extracted ###################### fmask = fits.open('mask.fits') mask = fmask[0].data data_size = mask.shape fmask.close() y, x = np.indices(data_size) rads = np.hypot(y - data_size[0] / 2., x - data_size[1] / 2.) region = np.where( np.logical_and( np.logical_and(rads > radius_in * Mpcpix, rads <= radius_out * Mpcpix), mask > 0.0)) ###################### # Extract the PS from the various images ###################### nsc = len(scale) ps, psnoise, amp, eamp = np.empty(nsc), np.empty(nsc), np.empty( nsc), np.empty(nsc) vals = [] nreg = 20 # Number of subregions for bootstrap calculation for i in range(nsc): # Read images fco = fits.open('conv_scale_%d_kpc.fits' % (int(np.round(sckpc[i])))) convimg = fco[0].data.astype(float) fco.close() fmod = fits.open('conv_model_%d_kpc.fits' % (int(np.round(sckpc[i])))) convmod = fmod[0].data.astype(float) fmod.close() print('Computing the power at scale', sckpc[i], 'kpc') ps[i], psnoise[i], vps = calc_ps(region, convimg, convmod, kr[i], nreg) vals.append(vps) # Bootstrap the data and compute covariance matrix print('Computing the covariance matrix...') nboot = int(1e4) # number of bootstrap resamplings cov = do_bootstrap(vals, nboot) # compute eigenvalues of covariance matrix to verify that the matrix is positive definite la, v = np.linalg.eig(cov) print('Eigenvalues: ', la) eps = np.empty(nsc) for i in range(nsc): eps[i] = np.sqrt(cov[i, i]) amp = np.sqrt(np.abs(ps) * 2. * np.pi * kr**2 / cf) eamp = 1. / 2. * np.power(np.abs(ps) * 2. * np.pi * kr**2 / cf, -0.5) * 2. * np.pi * kr**2 / cf * eps self.kpix = kr self.k = 1. / np.sqrt(2. * np.pi**2) * np.divide(1., sckpc) self.ps = ps self.eps = eps self.psnoise = psnoise self.amp = amp self.eamp = eamp self.cov = cov
def MexicanHat(self, modimg_file, z, region_size=1., factshift=1.5): # Function to compute Mexican Hat convolution # Inputs: # z: redshift # region_size: size of the region of interest in Mpc # factshift: size of the border around the region imgo = self.data.img expo = self.data.exposure bkg = self.data.bkg pixsize = self.data.pixsize # Read model image fmod = fits.open(modimg_file) modimg = fmod[0].data.astype(float) # Define the mask nonz = np.where(expo > 0.0) masko = np.copy(expo) masko[nonz] = 1.0 imgt = np.copy(imgo) noexp = np.where(expo == 0.0) imgt[noexp] = 0.0 # Set the region of interest x_c = self.profile.cra # Center coordinates y_c = self.profile.cdec kpcp = cosmo.kpc_proper_per_arcmin(z).value Mpcpix = 1000. / kpcp / pixsize # 1 Mpc in pixel regsizepix = region_size * Mpcpix self.regsize = regsizepix minx = int(np.round(x_c - factshift * regsizepix)) maxx = int(np.round(x_c + factshift * regsizepix + 1)) miny = int(np.round(y_c - factshift * regsizepix)) maxy = int(np.round(y_c + factshift * regsizepix + 1)) if minx < 0: minx = 0 if miny < 0: miny = 0 if maxx > self.data.axes[1]: maxx = self.data.axes[1] if maxy > self.data.axes[0]: maxy = self.data.axes[0] img = np.nan_to_num( np.divide(imgt[miny:maxy, minx:maxx], modimg[miny:maxy, minx:maxx])) mask = masko[miny:maxy, minx:maxx] self.size = img.shape self.mask = mask fmod[0].data = mask fmod.writeto('mask.fits', overwrite=True) # Simulate perfect model with Poisson noise randmod = np.random.poisson(modimg[miny:maxy, minx:maxx]) simmod = np.nan_to_num(np.divide(randmod, modimg[miny:maxy, minx:maxx])) # Set the scales minscale = 2 # minimum scale of 2 pixels maxscale = regsizepix / 2. # at least 4 resolution elements on a side scale = np.logspace(np.log10(minscale), np.log10(maxscale), 10) # 10 scale logarithmically spaced sckpc = scale * pixsize * kpcp # Convolve images for i in range(len(scale)): sc = scale[i] print('Convolving with scale', sc) convimg, convmod = calc_mexicanhat(sc, img, mask, simmod) # Save image fmod[0].data = convimg fmod.writeto('conv_scale_%d_kpc.fits' % (int(np.round(sckpc[i]))), overwrite=True) fmod[0].data = convmod fmod.writeto('conv_model_%d_kpc.fits' % (int(np.round(sckpc[i]))), overwrite=True) fmod.close()
# coding: utf-8 # %load survey_tess/python/RaDec2cartesian.py # %load python/RaDec2cartesian.py import numpy as np from astropy.cosmology import WMAP9 as cosmo dir1 = '/home/jarmijo/CHUVIS/' gal_pos = np.loadtxt(dir1 + 'subsample.dat') edges = np.loadtxt(dir1 + 'edges_z0.1.dat') cap = np.loadtxt(dir1 + 'cap_z0.1.dat') # cap2 = np.loadtxt(dir1+'mock_cap_b.txt') # c = 299792.458; H0 = 68 dc_z = cosmo.comoving_distance(gal_pos[:, 2]).to_value() ra = gal_pos[:, 0] dec = gal_pos[:, 1] # se puede hacer lo mismo con rutinas healpy ############# galaxias ############ xp = dc_z * np.cos(ra) * np.sin(dec) yp = dc_z * np.sin(ra) * np.sin(dec) zp = dc_z * np.cos(dec) ########### edges (and holes) ############ xe = cosmo.comoving_distance(edges[:, 2]).to_value() * np.cos(edges[:, 0]) * np.sin(edges[:, 1]) ye = cosmo.comoving_distance(edges[:, 2]).to_value() * np.sin(edges[:, 0]) * np.sin(edges[:, 1]) ze = cosmo.comoving_distance(edges[:, 2]).to_value() * np.cos(edges[:, 1]) ########### cap ################ xc1 = cosmo.comoving_distance(cap[:, 2]).to_value() * np.cos(cap[:, 0]) * np.sin(cap[:, 1]) yc1 = cosmo.comoving_distance(cap[:, 2]).to_value() * np.sin(cap[:, 0]) * np.sin(cap[:, 1]) zc1 = cosmo.comoving_distance(cap[:, 2]).to_value() * np.cos(cap[:, 1]) #
sizepix[m][w][0] = 0 else: modeltype[m] = 'sersic' sizepix[m][w][0] = np.float(content[48][4:8]) #color vs size for sersic fits size_color = np.zeros(12) size_four_one = np.zeros(12) size_eight_one = np.zeros(12) size_four_two = np.zeros(12) size_eight_two = np.zeros(12) kpcrad = np.zeros([2, 12, 2]) for m in range(0, len(model)): for w in range(0, 12): arcsecperkpc = cosmo.arcsec_per_kpc_proper(redshifts[w]) for i in range(0, 2): kpcrad[m][w][i] = (0.025 * sizepix[m][w][i]) / arcsecperkpc.value for w in range(0, 12): size_four_one[w] = kpcrad[0][w][0] size_four_two[w] = kpcrad[1][w][0] size_color[w] = mags[1][w][0] - mags[1][w][1] size_eight_one[w] = kpcrad[0][w][1] size_eight_two[w] = kpcrad[1][w][1] x3 = minmax([size_four_one, size_eight_one]) x3 = minmax([size_four_two, size_eight_two]) y3 = minmax([size_color]) #if modeltype[0] == 'sersic' or modeltype[1] == 'sersic': # name_cs = 'color_v_size_'+one+'_'+two+'.pdf'
if 'RA' in prihdr.comments['CD1_1']: raDegColPix = prihdr['CD1_1'] raDegRowPix = prihdr['CD1_2'] decDegColPix = prihdr['CD2_1'] decDegRowPix = prihdr['CD2_2'] else: decDegColPix = prihdr['CD1_1'] decDegRowPix = prihdr['CD1_2'] raDegColPix = prihdr['CD2_1'] raDegRowPix = prihdr['CD2_2'] tot_ra = raDegRowPix * 42 tot_dec = decDegColPix * 42 scale = cosmo.kpc_proper_per_arcmin(0.37) * 60. x_kpc = scale * tot_ra y_kpc = scale * tot_dec print(lowest_redshift) refdirs = os.listdir( '/data/marvels/billzhu/Reference PSF Subtract/0.37 - 0.55/') # For each quasar, both absorption and reference, calculate the distance, kpc across the image, and scale for f in refdirs: index = int(f.split('_')[0]) if index == lindex: continue
import numpy as np import matplotlib.pyplot as plt from astropy.cosmology import WMAP9 as cosmo import astropy.units as u import astropy.constants as const c = const.c.cgs # speed of light in cgs H0 = cosmo.H(0).cgs # Hubble constant in cgs Sigma_T = 6.65e-25 * u.cm**2 # Thomson cross section of molecule [cm^2] O_m = 0.308 # energy density parameter for mass O_lambda = 0.692 # energy density parameter for lambda def tau_e(z): """ Returns the optical depth for the ionized intergalactic medium as a function of redshift z. Parameters: ----------- z: redshift at which the optical depth is to be calculated """ _z = np.linspace( 0, z, 100) # New redshift array to define limits in the integral numerator = Sigma_T * 1.9e-7 * u.cm**-3 * (1 + _z)**2 denominator = H0 * np.sqrt(O_m * (1 + _z)**3 + O_lambda) integral = c * numerator / denominator res = np.trapz(integral, _z) return (res)
# stats total_mass_sat_rand = np.mean( mass_sat_rand) # mean value of total mass in an aperture total_mass_sat_sf_rand = np.mean(mass_sat_sf_rand) return mass_neighbors_rand, counts_gals_rand / float( num_p), total_mass_sat_rand, total_mass_sat_sf_rand # ################### MAIN FUNCTION ################### # total_mass_sat_log = open( 'total_mass_sat', 'w') # record total mass of satellites for redshift evolution for z in np.arange(3, 20, 1) / 10.: print('=============' + str(z) + '================') dis = WMAP9.angular_diameter_distance( z).value # angular diameter distance at redshift z dis_l = WMAP9.comoving_distance( z - 0.1).value # comoving distance at redshift z-0.1 dis_h = WMAP9.comoving_distance( z + 0.1).value # comoving distance at redshift z+0.1 total_v = 4. / 3 * np.pi * (dis_h**3 - dis_l**3) # Mpc^3 survey_v = total_v * (4 / 41253.05) # Mpc^3 density = 0.00003 # desired constant (cumulative) volume number density (Mpc^-3) num = int(density * survey_v) # slice catalog in redshift bin cat_massive_z_slice = cat_massive_gal[ abs(cat_massive_gal['ZPHOT'] - z) < 0.1] # massive galaxies in this z slice cat_massive_z_slice.sort('MASS_BEST') cat_massive_z_slice.reverse()
fit, ax = plt.subplots() hdulist = fits.open('2175SUBComb22-g.fit') scidata = hdulist[0].data.astype(float) mean, median, stddev = sigma_clipped_stats(scidata, sigma=3.0, iters=5) print(median) #scidata -= median spline = interpolate.interp2d(np.arange(len(scidata)), np.arange(len(scidata)), scidata) scidata = spline(np.arange(0, len(scidata), 0.1), np.arange(0, len(scidata), 0.1)) #scidata *= 1.15 SBarray = [] outter = [] scale = cosmo.kpc_proper_per_arcmin( 1.44) * u.arcmin / u.kiloparsec * 0.396 / 60 print(scale) #print(cosmo.kpc_proper_per_arcmin(1.03)) #print(cosmo.kpc_proper_per_arcmin(5)) #print("%s" % ('%.4E' % Decimal(photoncount(scidata, 250, 200)))) #print(-2.5 * math.log10(photoncount(scidata, 250, 200)) + 2.5 * math.log10(scale**2)) #print(-2.5 * math.log10(2/(10**8 * 2000)) + 2.5 * np.log10(scale**2)) #print(scale) for j in range(1, 12): #print(5 * j / scale) f = photoncount(scidata, 60 * (25 / 3)**((1.0 / 8) * j) / scale, 60 * (25 / 3)**((1.0 / 8) * (j - 1)) / scale)
def correct_for_distance(self, flux, fluxerr): dlmu = cosmo.distmod(self.redshift).value flux, fluxerr = helpers.calc_luminosity(flux, fluxerr, dlmu) return flux, fluxerr
# Planck13.comoving_distance(ar_z)) # # plt.plot(ar_z, Planck13.comoving_distance(ar_z) / Planck13.comoving_distance(ar_z)) # plt.plot(ar_z, Planck13.comoving_distance(ar_z + ar_delta_z_planck13 - ar_delta_z_wmap7) / # Planck13.comoving_distance(ar_z)) # plt.show() # print(scipy.misc.derivative(func=Planck13.comoving_distance, x0=2, dx=0.1)) # ar_dcmv_dz_planck13 = np.array([scipy.misc.derivative( # func=lambda x: Planck13.comoving_distance(x).value, x0=z, dx=0.01) for z in ar_z]) # ar_dcmv_dz_wmap7 = np.array([scipy.misc.derivative( # func=lambda x: WMAP7.comoving_distance(x).value, x0=z, dx=0.01) for z in ar_z]) # plt.plot(ar_z, -(ar_dcmv_dz_planck13 - ar_dcmv_dz_wmap7) * ar_delta_z_planck13) # plt.show() del scipy.misc ar_base_cmvd_planck13 = Planck13.comoving_distance(ar_z) ar_true_planck13_cmvd = Planck13.comoving_distance(ar_z + ar_delta_z_planck13) ar_base_cmvd_wmap5 = WMAP5.comoving_distance(ar_z) ar_wmap5_apparent_cmvd = WMAP5.comoving_distance(ar_z + ar_delta_z_planck13) ar_base_cmvd_wmap7 = WMAP7.comoving_distance(ar_z) ar_wmap7_apparent_cmvd = WMAP7.comoving_distance(ar_z + ar_delta_z_planck13) ar_base_cmvd_wmap9 = WMAP9.comoving_distance(ar_z) ar_wmap9_apparent_cmvd = WMAP9.comoving_distance(ar_z + ar_delta_z_planck13) plt.plot(ar_z, ar_true_planck13_cmvd - ar_base_cmvd_planck13) plt.plot(ar_z, ar_wmap5_apparent_cmvd - ar_base_cmvd_wmap5) plt.plot(ar_z, ar_wmap7_apparent_cmvd - ar_base_cmvd_wmap7) plt.plot(ar_z, ar_wmap9_apparent_cmvd - ar_base_cmvd_wmap9) # plt.plot(ar_z, ar_wmap7_apparent_cmvd - ar_true_planck13_cmvd) plt.show()
# Plot the CMD as a function of GZ1 morphology for the RGZ 75% counterparts data = ascii.read( '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis/rgz_wise_75_sdss_magerr.csv' ) fname_spec = 'rgz_75_sdss_mags_spec.vot' fname_nospec = 'rgz_75_sdss_mags_nospec.vot' zoospec = votable_read('%s/%s' % (rgzdir, fname_spec)) zoonospec = votable_read('%s/%s' % (rgzdir, fname_nospec)) na10 = na10_read() gr_na10 = na10['g-r'] distmod_na10 = WMAP9.distmod(na10['z']) Mr_na10 = na10['r'] - distmod_na10.value Mr_na10 = na10['M_g'] - gr_na10 # Generate contours of the NA10 sample magbins = np.linspace(-24, -20, 20) colorbins = np.linspace(0.3, 1.0, 20) hc, xc, yc = np.histogram2d(Mr_na10, gr_na10, bins=(magbins, colorbins)) levels = np.linspace(0, 200, 10) # Make 2x2 figure fig, axarr = plt.subplots(2, 2, figsize=(10, 10)) from scipy.ndimage.filters import gaussian_filter
# Remove NANs, INFs, z <= 0 and r <= 0 print "Original data shape: ", hdu_in.data.shape data_aux = np.asarray(hdu_in.data.tolist()) rm_crit = np.isnan (data_aux).any(axis=1) | np.isinf (data_aux).any(axis=1) | (hdu_in.data.field(field_z) <= 0) | (hdu_in.data.field(field_r) <= 0) del data_aux hdu_in.data = hdu_in.data[~rm_crit] #tbdata = tbdata[~np.isnan(np.asarray(tbdata.tolist())).any(axis=1)] tbdata = hdu_in.data print "Removed NANs and INFs: ", tbdata.shape r = tbdata.field(field_r) m = tbdata.field(field_m) z = tbdata.field(field_z) psf = tbdata.field(field_psf) d = cosmo.angular_diameter_distance(z).to(ap.units.kpc) # distance in kpc scale = d*ap.units.arcsec.to(ap.units.radian) # scale [kpc/arcsec] R = r * scale M = m - 5*np.log10(4.28E8*z) r_psf = 2.*np.sqrt(2*np.log(2)) * r/psf print hdu_in.data.field(field_z).shape cols = [pf.Column (name = field_r, array = r, format = "D"), pf.Column (name = field_m, array = m, format = "D"), pf.Column (name = field_z, array = z, format = "D"), pf.Column (name = field_psf, array = psf, format = "D"), pf.Column (name = "absPetroMag_r", array = M, format = "D"), pf.Column (name = "petroRad_r_psf", array = r_psf, format = "D"), pf.Column (name = "petroRad_r_kpc", array = R, format = "D")] #t = hdu_in.columns + pf.ColDefs(cols)
def get_spectrum(self, outwave=None, filters=None, peraa=False, **params): """Get a spectrum and SED for the given params. :param outwave: Desired *vacuum* wavelengths. Defaults to the values in sps.ssp.wavelength. :param peraa: (default: False) If `True`, return the spectrum in erg/s/cm^2/AA instead of AB maggies. :returns spec: Observed frame spectrum in AB maggies, unless `peraa=True` in which case the units are erg/s/cm^2/AA. :returns phot: Observed frame photometry in AB maggies. :returns mass_frac: The ratio of the surviving stellar mass to the total mass formed. """ # Spectrum in Lsun/Hz per solar mass formed, restframe wave, spectrum, mfrac = self.get_galaxy_spectrum(**params) # Redshifting + Wavelength solution # We do it ourselves. a = 1 + self.params.get('zred', 0) af = a b = 0.0 if 'wavecal_coeffs' in self.params: x = wave - wave.min() x = 2.0 * (x / x.max()) - 1.0 c = np.insert(self.params['wavecal_coeffs'], 0, 0) # assume coeeficients give shifts in km/s b = chebval(x, c) / (lightspeed * 1e-13) wa, sa = wave * (a + b), spectrum * af # Observed Frame if outwave is None: outwave = wa # Observed frame photometry, as absolute maggies if filters is not None: mags = getSED(wa, lightspeed / wa**2 * sa * to_cgs, filters) phot = np.atleast_1d(10**(-0.4 * mags)) else: phot = 0.0 # Spectral smoothing. do_smooth = (('sigma_smooth' in self.params) and ('sigma_smooth' in self.reserved_params)) if do_smooth: # We do it ourselves. smspec = self.smoothspec(wa, sa, self.params['sigma_smooth'], outwave=outwave, **self.params) elif outwave is not wa: # Just interpolate smspec = np.interp(outwave, wa, sa, left=0, right=0) else: # no interpolation necessary smspec = sa # Distance dimming and unit conversion zred = self.params.get('zred', 0.0) if (zred == 0) or ('lumdist' in self.params): # Use 10pc for the luminosity distance (or a number # provided in the dist key in units of Mpc) dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2 else: lumdist = cosmo.luminosity_distance(zred).value dfactor = (lumdist * 1e5)**2 if peraa: print "computing spectrum in erg/s/cm^2/AA" # spectrum will be in erg/s/cm^2/AA smspec *= to_cgs / dfactor * lightspeed / outwave**2 else: print "computing spectrum in maggies" # Spectrum will be in maggies smspec *= to_cgs / dfactor / 1e3 / (3631 * jansky_mks) # Convert from absolute maggies to apparent maggies phot /= dfactor # Mass normalization mass = np.sum(self.params.get('mass', 1.0)) if np.all(self.params.get('mass_units', 'mstar') == 'mstar'): # Convert from current stellar mass to mass formed mass /= mfrac return smspec * mass, phot * mass, mfrac
def processSelection (hdu, i_ini, i_end, field_r, field_m, field_z, field_psf, p_class1, biases, out_dir, class_fields = False, no_zeros = False): out_fn = out_dir + "sim_bias_" + str(i_ini) + "_" + str(i_end) + ".fits" if os.path.exists (out_fn): print out_fn, "exists" return print "selecting data" tbdata = hdu.data[i_ini:i_end] print "selected" data_aux = tbdata.tolist() print "tbdata.tolist() done" data_aux = np.asarray(tbdata.tolist()) print "creating rm_crit" print np.isnan (data_aux).shape rm_crit = np.isnan (data_aux).any(axis=1) | np.isinf (data_aux).any(axis=1) | (tbdata.field(field_z) <= 0) | (tbdata.field(field_r) <= 0) print "is_nan = ", np.isnan (data_aux).any(axis=1).sum() print "is_inf = ", np.isinf (data_aux).any(axis=1).sum() print field_z, " <= 0 = ", (tbdata.field(field_z) <= 0).sum() print field_r, " <= 0 = ", (tbdata.field(field_r) <= 0).sum(), tbdata.field(field_r).max() print "rm_crit.sum = ", rm_crit.sum() del data_aux print tbdata.shape tbdata = tbdata[~rm_crit] print tbdata.shape if np.isscalar (class_fields): y = (np.random.random(tbdata.shape[0]) < p_class1) else: y = createLabels (tbdata, class_fields, np.zeros (len (class_fields)) + p_class1) data_aux = tbdata.tolist() data_aux = np.asarray(tbdata.tolist()) if no_zeros: print "NULL = ", pd.isnull(data_aux).any(axis = 1).sum() #crit_zeros = (y != 0) & ~np.isnan(data_aux).any(axis = 1) crit_zeros = (y != 0) & ~pd.isnull(data_aux).any(axis = 1) y = y - 1 else: #crit_zeros = ~np.isnan(data_aux).any(axis = 1) crit_zeros = ~pd.isnull(data_aux).any(axis = 1) del data_aux y = y[crit_zeros] tbdata = tbdata[crit_zeros] r = tbdata.field(field_r) m = tbdata.field(field_m) # Uniform distributions to test # r = np.random.random (tbdata.shape[0]) # m = np.random.random (tbdata.shape[0])*30 z = tbdata.field(field_z) psf = tbdata.field(field_psf) d = cosmo.angular_diameter_distance(z).to(ap.units.kpc) # distance in kpc scale = d*ap.units.arcsec.to(ap.units.radian) # scale [kpc/arcsec] R = r * scale M = m - 5*np.log10(4.28E8*z) r_psf = 2.*np.sqrt(2*np.log(2)) * r/psf cols = [pf.Column (name = field_r, array = r, format = "D"), pf.Column (name = field_m, array = m, format = "D"), pf.Column (name = field_z, array = z, format = "D"), pf.Column (name = field_psf, array = psf, format = "D"), pf.Column (name = "absPetroMag_r", array = M, format = "D"), pf.Column (name = "petroRad_r_psf", array = r_psf, format = "D"), pf.Column (name = "petroRad_r_kpc", array = R, format = "D"), pf.Column (name = "class", array = y, format = "B")] # Create biased labels r_m = np.median(r_psf) m_m = np.median(m)-17.8 crit1 = (y == 1) r1 = r_psf[crit1] m1 = m[crit1]-17.8 factor = np.exp(-(r1*r1/(2*r_m*r_m) + m1*m1/(2*m_m*m_m))) factor [m1 >= 0] = np.exp(-(r1[m1 >= 0]**2/(2*r_m*r_m))) uniform = np.random.random(crit1.sum()) for bias in biases: print "bias = ", bias p_b = factor**(1./bias**2) y_b = y.copy() y_b[crit1] = y_b[crit1] * (uniform >= p_b) cols = cols + [pf.Column (name = "class_bias" + str(bias), array = y_b, format = "B")] hdu = pf.BinTableHDU.from_columns(cols) hdu.writeto (out_fn, clobber = True)
def color_mag_ratio(mgs,s82,decal,savefig=False): # Plot the spiral to elliptical ratio as a function of optical color. redshifts = (0.12,0.08,0.05) linestyles = ('solid','dashed','dashdot') datasets = ({'data':mgs, 'title':'MGS', 'appmag':17.0, 'sp':'t01_smooth_or_features_a02_features_or_disk_weighted_fraction', 'el':'t01_smooth_or_features_a01_smooth_weighted_fraction', 'umag':'PETROMAG_U', 'rmag':'PETROMAG_R', 'absr':'PETROMAG_MR', 'redshift':'REDSHIFT'}, {'data':s82, 'title':'Stripe 82', 'appmag':17.77, 'sp':'t01_smooth_or_features_a02_features_or_disk_weighted_fraction', 'el':'t01_smooth_or_features_a01_smooth_weighted_fraction', 'umag':'PETROMAG_U', 'rmag':'PETROMAG_R', 'absr':'PETROMAG_MR', 'redshift':'REDSHIFT'}, {'data':decals, 'title':'DECaLS', 'appmag':17.77, 'sp':'t00_smooth_or_features_a1_features_frac', 'el':'t00_smooth_or_features_a0_smooth_frac', 'umag':'metadata.mag.u', 'rmag':'metadata.mag.r', 'absr':'metadata.mag.abs_r', 'redshift':'metadata.redshift'}) # Work out the magnitude limit from cosmology fig,axarr = plt.subplots(num=2,nrows=1,ncols=3,figsize=(12,5)) for ax,d in zip(axarr.ravel(),datasets): for z,ls in zip(redshifts,linestyles): absmag_lim = d['appmag'] - WMAP9.distmod(z).value maglim = (d['data'][d['absr']] < absmag_lim) & (d['data'][d['redshift']] <= z) spiral = d['data'][d['sp']] >= 0.8 elliptical = d['data'][d['el']] >= 0.8 n_sp,bins_sp = np.histogram(d['data'][maglim & spiral][d['umag']] - d['data'][maglim & spiral][d['rmag']],range=(0,4),bins=25) n_el,bins_el = np.histogram(d['data'][maglim & elliptical][d['umag']] - d['data'][maglim & elliptical][d['rmag']],range=(0,4),bins=25) plotval = np.log10(n_sp * 1./n_el) ax.plot(bins_sp[1:],plotval,linestyle=ls,label=r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z)) ax.set_xlabel(r'$(u-r)$',fontsize=16) ax.set_ylabel(r'$\log(n_{sp}/n_{el})$',fontsize=16) ax.set_ylim(-1.5,1.5) ax.set_title(d['title'],fontsize=16) if ax == axarr.ravel()[0]: ax.legend(loc='upper left',fontsize=8) fig.tight_layout() if savefig: plt.savefig('{0}/feature_ratio.pdf'.format(plot_path)) else: plt.show() return None
def make_phys_gif_plots(): natural_base = '_nref11_RD0020_' refined_base = '_nref11n_nref10f_refine200kpc_z4to2_RD0020_' box_size = ds.arr(rb_width, 'code_length').in_units('kpc') box_size = np.ceil(box_size / 2.) res_list = [0.2, 0.5, 1, 5, 10] fontrc = {'fontname': 'Helvetica', 'fontsize': 20} mpl.rc('text', usetex=True) make_imshow = True properties = ['hden', 'temp'] lims = {'hden': (-6, -1), 'temp': (4.5, 6.5)} for line in lines: field = 'Emission_' + line for index in 'xyz': for res in res_list: for prop in properties: if res == res_list[0]: fileinNAT = 'frbs/frb' + index + natural_base + prop + '_' + field + '_forcedres.cpkl' fileinREF = 'frbs/frb' + index + refined_base + prop + '_' + field + '_forcedres.cpkl' pixsize = round( cosmo.arcsec_per_kpc_proper(redshift).value * 0.182959, 2) else: fileinNAT = 'frbs/frb' + index + natural_base + prop + '_' + field + '_' + str( res) + 'kpc.cpkl' fileinREF = 'frbs/frb' + index + refined_base + prop + '_' + field + '_' + str( res) + 'kpc.cpkl' pixsize = round( cosmo.arcsec_per_kpc_proper(redshift).value * res, 2) frbNAT = cPickle.load(open(fileinNAT, 'rb')) frbREF = cPickle.load(open(fileinREF, 'rb')) frbNAT = np.log10(frbNAT / (1. + redshift)**4) frbREF = np.log10(frbREF / (1. + redshift)**4) bsL, bsR = -1 * box_size.value, box_size.value fig, ax = plt.subplots(1, 2) fig.set_size_inches(10, 6) im = ax[0].imshow(frbNAT, extent=(bsL, bsR, bsR, bsL), vmin=lims[prop][0], vmax=lims[prop][1], interpolation=None, cmap='viridis', origin='lower') im1 = ax[1].imshow(frbREF, extent=(bsL, bsR, bsR, bsL), vmin=lims[prop][0], vmax=lims[prop][0], interpolation=None, cmap='viridis', origin='lower') axins = inset_axes( ax[1], width="5%", # width = 10% of parent_bbox width height="100%", # height : 50% loc=3, bbox_to_anchor=(1.07, 0.0, 1, 1), bbox_transform=ax[1].transAxes, borderpad=0) ax[0].set_title('Natural', **fontrc) ax[1].set_title('Forced Refine', **fontrc) cb = fig.colorbar( im1, cax=axins, label=r'log( photons s$^{-1}$ cm$^{-2}$ sr$^{-1}$)') if line == 'CIII_977': lineout = 'CIII 977' else: lineout = line fig.suptitle( 'z=2, ' + lineout + ', ' + str(res) + 'kpc' + ', ' + str(pixsize) + '"', **fontrc) plt.savefig('z2_' + index + '_' + prop + '_' + field + '_' + str(res) + 'kpc.pdf') plt.close() return
def add_6df(simplifiedmastercat, sixdf, tol=1 * u.arcmin): """ Adds entries in the catalog for the 6dF survey, or updates v when missing """ from astropy import table from astropy.coordinates import SkyCoord from astropy.constants import c ckps = c.to(u.km / u.s).value catcoo = SkyCoord(simplifiedmastercat['RA'].view(np.ndarray) * u.deg, simplifiedmastercat['Dec'].view(np.ndarray) * u.deg) sixdfcoo = SkyCoord(sixdf['obsra'].view(np.ndarray) * u.deg, sixdf['obsdec'].view(np.ndarray) * u.deg) idx, dd, d3d = sixdfcoo.match_to_catalog_sky(catcoo) msk = dd < tol sixdfnomatch = sixdf[~msk] t = table.Table() t.add_column(table.MaskedColumn(name='RA', data=sixdfnomatch['obsra'])) t.add_column(table.MaskedColumn(name='Dec', data=sixdfnomatch['obsdec'])) t.add_column( table.MaskedColumn(name='PGC#', data=-np.ones(len(sixdfnomatch), dtype=int), mask=np.ones(len(sixdfnomatch), dtype=bool))) t.add_column( table.MaskedColumn(name='NSAID', data=-np.ones(len(sixdfnomatch), dtype=int), mask=np.ones(len(sixdfnomatch), dtype=bool))) t.add_column( table.MaskedColumn(name='othername', data=sixdfnomatch['targetname'])) t.add_column( table.MaskedColumn(name='vhelio', data=sixdfnomatch['z_helio'] * ckps)) #t.add_column(table.MaskedColumn(name='vhelio_err', data=sixdfnomatch['zfinalerr']*ckps)) t.add_column( table.MaskedColumn(name='distance', data=WMAP9.luminosity_distance( sixdfnomatch['z_helio']).value)) #fill in anything else needed with -999 and masked for nm in simplifiedmastercat.colnames: if nm not in t.colnames: t.add_column( table.MaskedColumn( name=nm, data=-999 * np.ones(len(sixdfnomatch), dtype=int), mask=np.ones(len(sixdfnomatch), dtype=bool))) t = table.vstack([simplifiedmastercat, t], join_type='exact') #now update anything that *did* match but doesn't have another velocity tcoo = SkyCoord(t['RA'].view(np.ndarray) * u.deg, t['Dec'].view(np.ndarray) * u.deg) idx, dd, d3d = sixdfcoo.match_to_catalog_sky(tcoo) msk = dd < tol catmatch = t[idx[msk]] sixdfmatch = sixdf[msk] msk2 = t['vhelio'][idx[msk]].mask t['vhelio'][idx[msk & msk2]] = sixdf['z_helio'][msk2] * ckps return t
def get_catalog(params, map_struct): if not os.path.isdir(params["catalogDir"]): os.makedirs(params["catalogDir"]) catalogFile = os.path.join(params["catalogDir"], "%s.hdf5" % params["galaxy_catalog"]) """AB Magnitude zero point.""" MAB0 = -2.5 * np.log10(3631.e-23) pc_cm = 3.08568025e18 const = 4. * np.pi * (10. * pc_cm)**2. if params["galaxy_catalog"] == "2MRS": if not os.path.isfile(catalogFile): import astropy.constants as c cat, = Vizier.get_catalogs('J/ApJS/199/26/table3') ra, dec = cat["RAJ2000"], cat["DEJ2000"] cz = cat["cz"] magk = cat["Ktmag"] z = (u.Quantity(cat['cz']) / c.c).to(u.dimensionless_unscaled) completeness = 0.5 alpha = -1.0 MK_star = -23.55 MK_max = MK_star + 2.5 * np.log10(gammaincinv(alpha + 2, completeness)) MK = magk - cosmo.distmod(z) idx = (z > 0) & (MK < MK_max) ra, dec = ra[idx], dec[idx] z = z[idx] magk = magk[idx] distmpc = cosmo.luminosity_distance(z).to('Mpc').value with h5py.File(catalogFile, 'w') as f: f.create_dataset('ra', data=ra) f.create_dataset('dec', data=dec) f.create_dataset('z', data=z) f.create_dataset('magk', data=magk) f.create_dataset('distmpc', data=distmpc) else: with h5py.File(catalogFile, 'r') as f: ra, dec = f['ra'][:], f['dec'][:] z = f['z'][:] magk = f['magk'][:] distmpc = f['distmpc'][:] r = distmpc * 1.0 mag = magk * 1.0 elif params["galaxy_catalog"] == "GLADE": if not os.path.isfile(catalogFile): cat, = Vizier.get_catalogs('VII/281/glade2') ra, dec = cat["RAJ2000"], cat["DEJ2000"] distmpc, z = cat["Dist"], cat["z"] magb, magk = cat["Bmag"], cat["Kmag"] # Keep track of galaxy identifier GWGC, PGC, HyperLEDA = cat["GWGC"], cat["PGC"], cat["HyperLEDA"] _2MASS, SDSS = cat["_2MASS"], cat["SDSS-DR12"] idx = np.where(distmpc >= 0)[0] ra, dec = ra[idx], dec[idx] distmpc, z = distmpc[idx], z[idx] magb, magk = magb[idx], magk[idx] GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] with h5py.File(catalogFile, 'w') as f: f.create_dataset('ra', data=ra) f.create_dataset('dec', data=dec) f.create_dataset('distmpc', data=distmpc) f.create_dataset('magb', data=magb) f.create_dataset('magk', data=magk) f.create_dataset('z', data=z) # Add galaxy identifier f.create_dataset('GWGC', data=GWGC) f.create_dataset('PGC', data=PGC) f.create_dataset('HyperLEDA', data=HyperLEDA) f.create_dataset('2MASS', data=_2MASS) f.create_dataset('SDSS', data=SDSS) else: with h5py.File(catalogFile, 'r') as f: ra, dec = f['ra'][:], f['dec'][:] distmpc, z = f['distmpc'][:], f['z'][:] magb, magk = f['magb'][:], f['magk'][:] GWGC, PGC, _2MASS = f['GWGC'][:], f['PGC'][:], f['2MASS'][:] HyperLEDA, SDSS = f['HyperLEDA'][:], f['SDSS'][:] # Convert bytestring to unicode GWGC = GWGC.astype('U') PGC = PGC.astype('U') HyperLEDA = HyperLEDA.astype('U') _2MASS = _2MASS.astype('U') SDSS = SDSS.astype('U') # Keep only galaxies with finite B mag when using it in the grade if params["galaxy_grade"] == "S": idx = np.where(~np.isnan(magb))[0] ra, dec, distmpc = ra[idx], dec[idx], distmpc[idx] magb, magk = magb[idx], magk[idx] GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] r = distmpc * 1.0 mag = magb * 1.0 elif params["galaxy_catalog"] == "CLU": if not os.path.isfile(catalogFile): raise ValueError("Please add %s." % catalogFile) cat = Table.read(catalogFile) name = cat['name'] ra, dec = cat['ra'], cat['dec'] sfr_fuv, mstar = cat['sfr_fuv'], cat['mstar'] distmpc, magb = cat['distmpc'], cat['magb'] a, b2a, pa = cat['a'], cat['b2a'], cat['pa'] btc = cat['btc'] idx = np.where(distmpc >= 0)[0] ra, dec = ra[idx], dec[idx] sfr_fuv, mstar = sfr_fuv[idx], mstar[idx] distmpc, magb = distmpc[idx], magb[idx] a, b2a, pa = a[idx], b2a[idx], pa[idx] btc = btc[idx] idx = np.where(~np.isnan(magb))[0] ra, dec = ra[idx], dec[idx] sfr_fuv, mstar = sfr_fuv[idx], mstar[idx] distmpc, magb = distmpc[idx], magb[idx] a, b2a, pa = a[idx], b2a[idx], pa[idx] btc = btc[idx] z = -1*np.ones(distmpc.shape) r = distmpc * 1.0 mag = magb * 1.0 elif params["galaxy_catalog"] == "mangrove": catalogFile = os.path.join(params["catalogDir"], "%s.hdf5" % params["galaxy_catalog"]) if not os.path.isfile(catalogFile): print("mangrove catalog not found localy, start the automatic download") url = 'https://mangrove.lal.in2p3.fr/data/mangrove.hdf5' os.system("wget -O {}/mangrove.hdf5 {}".format(params["catalogDir"], url)) cat = Table.read(catalogFile) ra, dec = cat["RA"], cat["dec"] distmpc, z = cat["dist"], cat["z"] magb, magk = cat["B_mag"], cat["K_mag"] magW1, magW2, magW3, magW4 = cat["w1mpro"], cat["w2mpro"], cat["w3mpro"], cat["w4mpro"] stellarmass = cat['stellarmass'] # Keep track of galaxy identifier GWGC, PGC, HyperLEDA = cat["GWGC_name"], cat["PGC"], cat["HyperLEDA_name"] _2MASS, SDSS = cat["2MASS_name"], cat["SDSS-DR12_name"] idx = np.where(distmpc >= 0)[0] ra, dec = ra[idx], dec[idx] distmpc, z = distmpc[idx], z[idx] magb, magk = magb[idx], magk[idx] magW1, magW2, magW3, magW4 = magW1[idx], magW2[idx], magW3[idx], magW4[idx] stellarmass = stellarmass[idx] GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] # Convert bytestring to unicode GWGC = GWGC.astype('U') PGC = PGC.astype('U') HyperLEDA = HyperLEDA.astype('U') _2MASS = _2MASS.astype('U') SDSS = SDSS.astype('U') r = distmpc * 1.0 mag = magb * 1.0 n, cl = params["powerlaw_n"], params["powerlaw_cl"] dist_exp = params["powerlaw_dist_exp"] prob_scaled = copy.deepcopy(map_struct["prob"]) prob_sorted = np.sort(prob_scaled)[::-1] prob_indexes = np.argsort(prob_scaled)[::-1] prob_cumsum = np.cumsum(prob_sorted) index = np.argmin(np.abs(prob_cumsum - cl)) + 1 prob_scaled[prob_indexes[index:]] = 0.0 prob_scaled = prob_scaled**n theta = 0.5 * np.pi - dec * 2 * np.pi / 360.0 phi = ra * 2 * np.pi / 360.0 ipix = hp.ang2pix(map_struct["nside"], ra, dec, lonlat=True) if "distnorm" in map_struct: if map_struct["distnorm"] is not None: #creat an mask to cut at 3 sigma in distance mask = np.zeros(len(r)) #calculate the moments from distmu, distsigma and distnorm mom_mean, mom_std, mom_norm = distance.parameters_to_moments(map_struct["distmu"],map_struct["distsigma"]) condition_indexer = np.where( (r < (mom_mean[ipix] + (3*mom_std[ipix]))) & (r > (mom_mean[ipix] - (3*mom_std[ipix])) )) mask[condition_indexer] = 1 Sloc = prob_scaled[ipix] * (map_struct["distnorm"][ipix] * norm(map_struct["distmu"][ipix], map_struct["distsigma"][ipix]).pdf(r))**params["powerlaw_dist_exp"] / map_struct["pixarea"] #multiplie the Sloc by 1 or 0 according to the 3 sigma condistion Sloc = Sloc*mask idx = np.where(condition_indexer)[0] else: Sloc = copy.copy(prob_scaled[ipix]) idx = np.arange(len(r)).astype(int) else: Sloc = copy.copy(prob_scaled[ipix]) idx = np.arange(len(r)).astype(int) # this happens when we are using a tiny catalog... if np.all(Sloc == 0.0): Sloc[:] = 1.0 #new version of the Slum calcul (from HOGWARTs) Lsun = 3.828e26 Msun = 4.83 Mknmin = -19 Mknmax = -12 Lblist = [] for i in range(0, len(r)): Mb = mag[i] - 5 * np.log10((r[i] * 10 ** 6)) + 5 #L = Lsun * 2.512 ** (Msun - Mb) Lb = Lsun * 2.512 ** (Msun - Mb) Lblist.append(Lb) #set 0 when Sloc is 0 (keep compatible galaxies for normalization) Lblist = np.array(Lblist) Lblist[Sloc == 0] = 0 Slum = Lblist / np.nansum(np.array(Lblist)) """ L_nu = const * 10.**((mag + MAB0)/(-2.5)) L_nu = L_nu / np.nanmax(L_nu[idx]) L_nu = L_nu**params["catalog_n"] L_nu[L_nu < 0.001] = 0.001 L_nu[L_nu > 1.0] = 1.0 Slum = L_nu / np.sum(L_nu) """ mlim, M_KNmin, M_KNmax = 22, -17, -12 L_KNmin = const * 10.**((M_KNmin + MAB0)/(-2.5)) L_KNmax = const * 10.**((M_KNmax + MAB0)/(-2.5)) Llim = 4. * np.pi * (r * 1e6 * pc_cm)**2. * 10.**((mlim + MAB0)/(-2.5)) Sdet = (L_KNmax-Llim)/(L_KNmax-L_KNmin) Sdet[Sdet < 0.01] = 0.01 Sdet[Sdet > 1.0] = 1.0 # Set nan values to zero Sloc[np.isnan(Sloc)] = 0 Slum[np.isnan(Slum)] = 0 if params["galaxy_grade"] == "Smass": if params["galaxy_catalog"] != "mangrove": raise ValueError("You are trying to use the stellar mass information (Smass), please select the mangrove catalog for such use.") #set Smass Smass = np.array(stellarmass) #put null values to nan Smass[np.where(Smass == 0)] = np.nan #go back to linear scaling and not log Smass = 10**Smass # Keep only galaxies with finite stellarmass when using it in the grade #set nan values to 0 Smass[~np.isfinite(Smass)] = 0 #set Smass Smass = Smass / np.sum(Smass) #alpha is defined only with non null mass galaxies, we set a mask for that ind_without_mass = np.where(Smass == 0) Sloc_temp = copy.deepcopy(Sloc) Sloc_temp[ind_without_mass] = 0 #alpha_mass parameter is defined in such way that in mean Sloc count in as much as Sloc*alpha*Smass alpha_mass = (np.sum(Sloc_temp) / np.sum( Sloc_temp*Smass ) ) print("You chose to use the grade using stellar mass, the parameters values are:") print("alpha_mass =", alpha_mass) #beta_mass is a parameter allowing to change the importance of Sloc according to Sloc*alpha*Smass #beta_mass should be fitted in the futur on real GW event for which we have the host galaxy #fixed to one at the moment beta_mass = 1 print("beta_mass =", beta_mass) Smass = Sloc*(1+ (alpha_mass*beta_mass*Smass) ) #Smass = Sloc*Smass S = Sloc*Slum*Sdet prob = np.zeros(map_struct["prob"].shape) if params["galaxy_grade"] == "Sloc": for j in range(len(ipix)): prob[ipix[j]] += Sloc[j] grade = Sloc elif params["galaxy_grade"] == "S": for j in range(len(ipix)): prob[ipix[j]] += S[j] grade = S elif params["galaxy_grade"] == "Smass": for j in range(len(ipix)): prob[ipix[j]] += Smass[j] grade = Smass prob = prob / np.sum(prob) map_struct['prob_catalog'] = prob if params["doUseCatalog"]: map_struct['prob'] = prob Lblist = np.array(Lblist) idx = np.where(~np.isnan(grade))[0] grade = grade[idx] ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx] distmpc, z = distmpc[idx], z[idx] if params["galaxy_catalog"] == "GLADE": GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] Lblist = Lblist[idx] magb = magb[idx] if params["galaxy_catalog"] == "mangrove": GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] stellarmass, magb = stellarmass[idx], magb[idx] if params["galaxy_grade"] == "Smass": Smass = Smass[idx] """ Sthresh = np.max(grade)*0.01 idx = np.where(grade >= Sthresh)[0] grade = grade[idx] ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx] distmpc, z = distmpc[idx], z[idx] if params["galaxy_catalog"] == "GLADE": GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] """ idx = np.argsort(grade)[::-1] grade = grade[idx] ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx] distmpc, z = distmpc[idx], z[idx] if params["galaxy_catalog"] == "GLADE": GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] Lblist = Lblist[idx] magb = magb[idx] if params["galaxy_catalog"] == "mangrove": GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] stellarmass, magb = stellarmass[idx], magb[idx] if params["galaxy_grade"] == "Smass": Smass = Smass[idx] # Keep only galaxies within 3sigma in distance if params["galaxy_catalog"] != "mangrove": mask = Sloc > 0 ra, dec, Sloc, S = ra[mask], dec[mask], Sloc[mask], S[mask], distmpc, z = distmpc[mask], z[mask] if params["galaxy_catalog"] == "GLADE": GWGC, PGC, HyperLEDA = GWGC[mask], PGC[mask], HyperLEDA[mask] _2MASS, SDSS = _2MASS[mask], SDSS[mask] Lblist = Lblist[mask] if params["galaxy_catalog"] == "mangrove": # Keep only galaxies within 3sigma in distance mask = Sloc > 0 ra, dec, Sloc, S = ra[mask], dec[mask], Sloc[mask], S[mask] distmpc, z = distmpc[mask], z[mask] GWGC, PGC, HyperLEDA = GWGC[mask], PGC[mask], HyperLEDA[mask] _2MASS, SDSS = _2MASS[mask], SDSS[mask] stellarmass, magb = stellarmass[mask], magb[mask] if params["galaxy_grade"] == "Smass": Smass = Smass[mask] if len(ra) > 2000: print('Cutting catalog to top 2000 galaxies...') idx = np.arange(2000).astype(int) ra, dec, Sloc, S = ra[idx], dec[idx], Sloc[idx], S[idx] distmpc, z = distmpc[idx], z[idx] if params["galaxy_catalog"] == "GLADE": GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] Lblist = Lblist[idx] magb = magb[idx] elif params["galaxy_catalog"] == "mangrove": GWGC, PGC, HyperLEDA = GWGC[idx], PGC[idx], HyperLEDA[idx] _2MASS, SDSS = _2MASS[idx], SDSS[idx] stellarmass, magb = stellarmass[idx], magb[idx] if params["galaxy_grade"] == "Smass": Smass = Smass[idx] # now normalize the distributions S = S / np.sum(S) Sloc = Sloc / np.sum(Sloc) if params["galaxy_grade"] == "Smass": Smass = Smass/np.sum(Smass) else: Smass = np.ones(Sloc.shape) Smass = Smass/np.sum(Smass) catalog_struct = {} catalog_struct["ra"] = ra catalog_struct["dec"] = dec catalog_struct["Sloc"] = Sloc catalog_struct["S"] = S catalog_struct["Smass"] = Smass if params["writeCatalog"]: catalogfile = os.path.join(params["outputDir"], 'catalog.csv') fid = open(catalogfile, 'w') cnt = 1 if params["galaxy_catalog"] == "GLADE": if params["galaxy_grade"] == "S": fid.write("id, RAJ2000, DEJ2000, Sloc, S, Dist, z,GWGC, PGC, HyperLEDA, 2MASS, SDSS, BLum\n") for a, b, c, d, e, f, g, h, i, j, k, l in zip(ra, dec, Sloc, S, distmpc, z, GWGC, PGC, HyperLEDA, _2MASS, SDSS, Lblist): fid.write("%d, %.5f, %.5f, %.5e, %.5e, %.4f, %.4f, %s, %s, %s, %s, %s, %.2E\n" % (cnt, a, b, c, d, e, f, g, h, i, j, k, l)) cnt = cnt + 1 else: fid.write("id, RAJ2000, DEJ2000, Sloc, S, Dist, z, GWGC, PGC, HyperLEDA, 2MASS, SDSS\n") for a, b, c, d, e, f, g, h, i, j, k in zip(ra, dec, Sloc, S, distmpc, z, GWGC, PGC, HyperLEDA, _2MASS, SDSS): fid.write("%d, %.5f, %.5f, %.5e, %.5e, %.4f, %.4f, %s, %s, %s, %s, %s\n" % (cnt, a, b, c, d, e, f, g, h, i, j, k)) cnt = cnt + 1 elif params["galaxy_catalog"] == "mangrove": if params["galaxy_grade"] == "Smass": fid.write("id, RAJ2000, DEJ2000, Smass, S, Sloc, Dist, z, GWGC, PGC, HyperLEDA, 2MASS, SDSS, B_mag, stellarmass\n") for a, b, c, d, e, f, g, h, i, j, k, l, m, n in zip(ra, dec, Smass, S, Sloc, distmpc, z, GWGC, PGC, HyperLEDA, _2MASS, SDSS, magb, stellarmass): fid.write("%d, %.5f, %.5f, %.5e, %.5e, %.5e, %.4f, %.4f, %s, %s, %s, %s, %s, %s, %s\n" % (cnt, a, b, c, d, e, f, g, h, i, j, k, l, m, n)) cnt = cnt + 1 else: fid.write("id, RAJ2000, DEJ2000, Sloc, S, Dist, z, GWGC, PGC, HyperLEDA, 2MASS, SDSS\n") for a, b, c, d, e, f, g, h, i, j, k in zip(ra, dec, Sloc, S, distmpc, z, GWGC, PGC, HyperLEDA, _2MASS, SDSS): fid.write("%d, %.5f, %.5f, %.5e, %.5e, %.4f, %.4f, %s, %s, %s, %s, %s\n" % (cnt, a, b, c, d, e, f, g, h, i, j, k)) cnt = cnt + 1 else: fid.write("id, RAJ2000, DEJ2000, Sloc, S, Dist, z\n") for a, b, c, d in zip(ra, dec, Sloc, S, distmpc, z): fid.write("%d, %.5f, %.5f, %.5e, %.5e, %.4f, %.4f\n" % (cnt, a, b, c, d, e, f)) cnt = cnt + 1 fid.close() return map_struct, catalog_struct
def simplify_catalog(mastercat, quickld=True): """ Removes most of the unnecessary columns from the master catalog and joins fields where relevant Parameters ---------- mastercat : astropy.table.Table The table from initial_catalog quickld : bool If True, means do the "quick" version of the luminosity distance calculation (takes <1 sec as opposed to a min or so, but is only good to a few kpc) """ from astropy import table from astropy.constants import c ckps = c.to(u.km / u.s).value tab = table.Table() #RADEC: # use NSA unless it's missing, in which case use LEDA ras = mastercat['al2000'] * 15 ras[~mastercat['RA'].mask] = mastercat['RA'][~mastercat['RA'].mask] decs = mastercat['de2000'] decs[~mastercat['DEC'].mask] = mastercat['DEC'][~mastercat['DEC'].mask] tab.add_column(table.MaskedColumn(name='RA', data=ras, unit=u.deg)) tab.add_column(table.MaskedColumn(name='Dec', data=decs, unit=u.deg)) #Names/IDs: pgc = mastercat['pgc'].copy() pgc.mask = mastercat['pgc'] < 0 tab.add_column(table.MaskedColumn(name='PGC#', data=pgc)) tab.add_column(table.MaskedColumn(name='NSAID', data=mastercat['NSAID'])) #do these in order of how 'preferred' the object name is. nameorder = ('Objname', 'Name_eddkk', 'objname', 'Name_2mass' ) # this is: EDD, KK, LEDA, 2MASS #need to figure out which has the *largest* name strings, because we have a fixed number of characters largestdt = np.dtype('S1') for nm in nameorder: if mastercat.dtype[nm] > largestdt: largestdt = mastercat.dtype[nm] largestdtnm = nm names = mastercat[largestdtnm].copy( ) # these will all be overwritten - just use it for shape for nm in nameorder: msk = ~mastercat[nm].mask names[msk] = mastercat[nm][msk] tab.add_column(table.MaskedColumn(name='othername', data=names)) #After this, everything should have either an NSAID, a PGC#, or a name (or more than one) #VELOCITIES/redshifts #start with LEDA vs = mastercat['v'].astype(float) v_errs = mastercat['e_v'].astype(float) #Now add vhelio from the the EDD eddvhel = mastercat['Vhel_eddkk'] vs[~eddvhel.mask] = eddvhel[~eddvhel.mask] #EDD has no v-errors, so mask them v_errs[~eddvhel.mask] = 0 v_errs.mask[~eddvhel.mask] = True #then the NSA *observed* velocity, if available (NOT the same as distance) vs[~mastercat['Z'].mask] = mastercat['Z'][~mastercat['Z'].mask] * ckps v_errs.mask[~mastercat['Z'].mask] = True #v_errs[~mastercat['Z_ERR'].mask] = mastercat['Z_ERR'][~mastercat['Z_ERR'].mask] * ckps #finally, KK when present if its not available from one of the above kkvh = mastercat['Vh'] vs[~kkvh.mask] = kkvh[~kkvh.mask] #KK has no v-errors, so mask them v_errs[~kkvh.mask] = 0 v_errs.mask[~kkvh.mask] = True #DISTANCES #start with all inf, and all masked dist = np.ones_like(mastercat['Dist_edd']) * np.inf dist.mask[:] = True #first populate those that are in EDD with CMD-based distance msk = mastercat['So_eddkk'] == 1 dist[msk] = mastercat['Dist_edd'][msk] #now populate from the NSA if not in the above msk = (dist.mask) & (~mastercat['ZDIST'].mask) dist[msk] = mastercat['ZDIST'][msk] * ckps / WMAP9.H(0).value #finally, add in anything in the KK that's not elsewhere msk = (dist.mask) & (~mastercat['Dist_kk'].mask) dist[msk] = mastercat['Dist_kk'][msk] # #for those *without* EDD or KK, use the redshift's luminosity distance # premsk = dist.mask.copy() # zs = vs[premsk]/ckps # if quickld: # ldx = np.linspace(zs.min(), zs.max(), 1000) # ldy = WMAP9.luminosity_distance(ldx).to(u.Mpc).value # ld = np.interp(zs, ldx, ldy) # else: # ld = WMAP9.luminosity_distance(zs).to(u.Mpc).value # dist[premsk] = ld # dist.mask[premsk] = vs.mask[premsk] distmod = 5 * np.log10(dist) + 25 # used in phot section tab.add_column(table.MaskedColumn(name='vhelio', data=vs)) #decided to remove v-errors #tab.add_column(table.MaskedColumn(name='vhelio_err', data=v_errs)) tab.add_column(table.MaskedColumn(name='distance', data=dist, unit=u.Mpc)) #PHOTOMETRY tab.add_column( table.MaskedColumn(name='r', data=mastercat['ABSMAG_r'] + distmod)) tab.add_column( table.MaskedColumn(name='i', data=mastercat['ABSMAG_i'] + distmod)) tab.add_column( table.MaskedColumn(name='z', data=mastercat['ABSMAG_z'] + distmod)) tab.add_column(table.MaskedColumn(name='I', data=mastercat['it'])) tab.add_column(table.MaskedColumn(name='K', data=mastercat['K_tc'])) tab.add_column(table.MaskedColumn(name='K_err', data=mastercat['e_K'])) #Stellar mass/SFR tab.add_column( table.MaskedColumn(name='M_star', data=mastercat['MASS'] * (WMAP9.H(0).value / 100)**-2)) tab.add_column(table.MaskedColumn(name='SFR_B300', data=mastercat['B300'])) tab.add_column( table.MaskedColumn(name='SFR_B1000', data=mastercat['B1000'])) return tab
def load_model(objname='', datname='', zname='', agelims=[], **extras): ###### REDSHIFT ###### ### open file, load data with open(datname, 'r') as f: hdr = f.readline().split() dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]]) dat = np.loadtxt(datname, comments='#', delimiter=' ', dtype=dtype) with open(zname, 'r') as fz: hdr_z = fz.readline().split() dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]]) zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z) idx = dat['id'] == objname zred = zout['z_spec'][idx][0] # BUCKET FIX THE INDEX HERE if zred == -99: zred = zout['z_peak'][idx][0] print(zred, 'zred') #### CALCULATE TUNIV ##### tuniv = WMAP9.age(zred).value print(tuniv, 'tuniv') n = [p['name'] for p in model_params] model_params[n.index('tage')]['prior_args']['maxi'] = tuniv #### NONPARAMETRIC SFH ###### # NEW # agelims[-1] = np.log10(tuniv*1e9) ncomp = len(agelims) - 1 agelims = [ 0.0, 7.0, 8.0, (8.0 + (np.log10(tuniv * 1e9) - 8.0) / 4), (8.0 + 2 * (np.log10(tuniv * 1e9) - 8.0) / 4), (8.0 + 3 * (np.log10(tuniv * 1e9) - 8.0) / 4), np.log10(tuniv * 1e9) ] agebins = np.array([agelims[:-1], agelims[1:]]) # calculate the somethings: [0, a, b, b + (f-b)/4, b + 2*(f-b)/4, b + 3*(f-b)/4, b + 4*(f-b)/4 = f] #### INSERT REDSHIFT INTO MODEL PARAMETER DICTIONARY #### zind = n.index('zred') model_params[zind]['init'] = zred #### SET UP AGEBINS model_params[n.index('agebins')]['N'] = ncomp model_params[n.index('agebins')]['init'] = agebins.T #### FRACTIONAL MASS INITIALIZATION # NEW # N-1 bins, last is set by x = 1 - np.sum(sfr_fraction) model_params[n.index('sfr_fraction')]['N'] = ncomp - 1 model_params[n.index('sfr_fraction')]['prior_args'] = { 'maxi': np.full(ncomp - 1, 1.0), 'mini': np.full(ncomp - 1, 0.0), # NOTE: ncomp instead of ncomp-1 makes the prior take into # account the implicit Nth variable too } model_params[n.index('sfr_fraction')]['init'] = np.zeros(ncomp - 1) + 1. / ncomp model_params[n.index('sfr_fraction')]['init_disp'] = 0.02 #### CREATE MODEL model = BurstyModel(model_params) return model
from astropy.cosmology import WMAP9 as cosmo from astropy import units as u import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt small_radius = 7 * u.kpc large_radius = 50 * u.kpc z_range = np.linspace(0.02, 1.50, 75) small_angles = [] large_angles = [] for z in z_range: scale = cosmo.arcsec_per_kpc_proper(z) small_angles.append((small_radius * scale).value) large_angles.append((large_radius * scale).value) plt.figure() plt.plot(z_range, small_angles, label="Typical Small Galaxy (Radius 7 Kpc)") plt.plot(z_range, large_angles, label="Typical Large Galaxy (Radius 50 Kpc)") plt.axhline(y=10, color='r', linestyle='-', label="ASASSN cut") plt.legend() path = "plots/galaxy_radius.pdf" print "Saving to", path plt.savefig(path)
def begin(index): #i = int(index) i = int(index.split('-')[0]) mgi = int(index.split('-')[1]) color = index.split('-')[2] #try: print(index) #filename = 'Test Data Extract/' + str(i) + '.fit' #filename = str(i) + '-g.fit' #filename = '/data/marvels/billzhu/2175 Dataset/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/2175 Reference Dataset/' + color + '/' + str(index) + '.fit' filename = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '.fit' hdulist = fits.open(filename) #qlist = fits.open('MG II Test Cut/' + str(i) + '_MG.fit') #qlist = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qlist = fits.open('/data/marvels/billzhu/2175 Reference Quasar Cut/' + color + '/' + str(index) + '_REF.fit') qlist = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qlist = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qx = qlist[0].header['XCOORD'] qy = qlist[0].header['YCOORD'] obj_id = qlist[0].header['ID'] mean1 = qlist[0].header['MEAN_BKG'] median1 = qlist[0].header['MED_BKG'] std1 = qlist[0].header['STD_BKG'] redshift = qlist[0].header['ZABS'] #print("%f, %f" % (x, y)) qlist.close() #except: # print("No coordinates") # return # Save some frickin time scidata = hdulist[0].data.astype(float) #print(sigma_clipped_stats(scidata, sigma=3.0, maxiters=5)) pointer = 0 if color == 'g': pointer = 1 if color == 'r': pointer = 2 if color == 'i': pointer = 3 if color == 'z': pointer = 4 if color == 'u': pointer = 0 #bkg_sigma = mad_std(scidata) try: #print('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '.fit') #obj_table = Table.read('/data/marvels/billzhu/2175 Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/2175 Reference Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) obj_table = Table.read('/data/marvels/billzhu/Reference Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) except: print(str(i) + ' No Table') return #line_data = linecache.getline('Full Data.txt', i).split() #line_data = linecache.getline('DR12 QSO.txt', i).split() #print(len(line_data)) #obj_id = int(line_data[52]) quasar = obj_table[obj_id - 1] try: print("%d, %f" % (obj_id, obj_table['M_rr_cc'][obj_id - 1][pointer])) except: print("can't print table") return # If no quasar is found, the field image is deemed corrupt and not used if quasar == 0: print(str(i) + ' No quasar') return # Calculate the 18 magnitude threshold mag18 = 0 header = hdulist[0].header pstable = Table.read('/data/marvels/billzhu/Reference psField/0.37 - 0.55/' + str(i) + '_psField.fit', hdu=7) mag20 = pstable['flux20'] if color == 'g': mag18 = mag20[1] * 10 **(8. - 18/2.5) if color == 'r': mag18 = mag20[2] * 10 **(8. - 18/2.5) if color == 'i': mag18 = mag20[3] * 10 **(8. - 18/2.5) if color == 'z': mag18 = mag20[4] * 10 **(8. - 18/2.5) #qsocut = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qsocut = fits.open('/data/marvels/billzhu/2175 Reference Quasar Cut/' + color + '/' + str(index) + '_REF.fit') qsocut = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qsodata = qsocut[0].data.astype(float) if qsodata[50, 50] < 5: return print('reached') largearr = [] stars = [] chunk_size = 50 diff_fwhm = 1000000 counter = 0 scale = cosmo.kpc_proper_per_arcmin(redshift) * u.arcmin / u.kiloparsec * 0.396 / 60 #filedir = '/data/marvels/billzhu/background/' + color + '/' filedir = '/data/marvels/billzhu/Reference Background/' + color + '/' for j in range(len(obj_table)): sx = obj_table['colc'][j][pointer] sy = obj_table['rowc'][j][pointer] flags1 = detflags(obj_table['objc_flags'][j]) flags2 = detflags(obj_table['objc_flags2'][j]) try: if obj_table['objc_type'][j] == 6 and flags1[12] == False and flags1[17] == False and flags1[18] == False and flags2[27] == False and distance(sx, sy, qx, qy) > 5 and inbounds(sx + chunk_size + 6, sy + chunk_size + 6) and inbounds(sx - chunk_size - 5, sy - chunk_size - 5) and obj_table['psfCounts'][j][pointer] > mag18 and obj_table['M_rr_cc'][j][pointer] > 0 and abs(obj_table['M_rr_cc'][j][pointer] - obj_table['M_rr_cc'][obj_id - 1][pointer]) < 0.1 * obj_table['M_rr_cc'][obj_id - 1][pointer]: #try: """ preshift = scidata[int(sy - 10) : int(sy + 11), int(sx - 10) : int(sx + 11)] xc, yc = centroid_2dg(preshift, mask = None) xc += quasar['colc'][pointer] - 10 yc += quasar['rowc'][pointer] - 10 """ xc = obj_table['colc'][j][pointer] yc = obj_table['rowc'][j][pointer] #preshift = scidata[int(yc - chunk_size - 5) : int(yc + chunk_size + 6), int(xc - chunk_size - 5) : int(xc + chunk_size + 6)] #print("%f, %f" % (xc, yc)) xu = xc + 350.0 xl = xc - 350.0 yu = yc + 350.0 yl = yc - 350.0 xc1 = xc yc1 = yc if xu >= 2048: xu = 2047 if xl < 0: xl = 0 else: xc1 = xc - int(xc) + 350.0 if yu >= 1489: yu = 1488 if yl < 0: yl = 0 else: yc1 = yc - int(yc) + 350.0 #print("%f, %f, %f, %f, %f, %f" % (xc1, yc1, xl, yl, xu, yu)) scidata2 = np.array(scidata[int(yl) : int(yu), int(xl) : int(xu)]) visited = np.zeros((len(scidata2), len(scidata2[0])), dtype=bool) scidata2 = checkInner(scidata2, obj_table, xc, yc, xc1, yc1, mean1, std1, visited, pointer) bkg_stats = calc_background_stats(scidata2, xc1, yc1, int(400 / scale), int(500 / scale)) print(bkg_stats) if scidata2[int(yc1), int(xc1)] - bkg_stats[0] > 20: bkg_stats_arr.append([bkg_stats]) #print("%f, %f" % (xc1, yc1)) coldefs = calc_arcsec_stats(scidata2, xc1, yc1, counter, color) bkg_col = fits.Column(name=str(round(float(400/scale * 0.396), 3)) + '-' + str(round(float(500/scale * 0.396), 3)) + ' arcsec bkg stats', format='D', unit='counts', array=bkg_stats) coldefs.add_col(bkg_col) hdu = fits.BinTableHDU.from_columns(coldefs) hdu.writeto(filedir + index + '_' + str(counter) + '.fits', overwrite=True) #print(counter) counter += 1 else: print("ERASED") except: print('EXCEPTION') continue
def lce_norm(ns): E=Omega_m*(1+step2z[ns])**3+(1-Omega_m) f=(Omega_m*(1+step2z[ns])**3/E)**0.6 H=cosmo.H(step2z[ns]).value k_unit=2*np.pi/boxlen return f*H/k_unit/(1+step2z[ns])
def Flux_and_lumin_rest_frame(results, GRB_name, z, Epeak_rest): #epeak is log value, so we need to recalculate it back Ep = np.power(10, Epeak_rest) Fluxes = [] Flux_errs = [] Lumins = [] Lumin_errs = [] for timebin in results: ep_index = results.index(timebin) e1 = 0.5 * Ep[ep_index] e2 = Ep[ep_index] variate = Variates(timebin) loop_len = variate.length if variate.name == 'Band_grbm': flux = [] for id in range(loop_len): param = Params(id, variate) params = param.K, param.alpha, param.xc, param.beta temp_flux = integrate.quad(my_band, e1, e2, args=params) temp_flux = temp_flux[ 0] * 1.60218e-9 # from keV to ergs! (/cm2/s) flux.append(temp_flux) if variate.name == 'Cutoff_powerlaw': flux = [] for id in range(loop_len): param = Params(id, variate) params = param.K, param.index, param.xc temp_flux = integrate.quad(my_cutoffpl, e1, e2, args=params) temp_flux = temp_flux[ 0] * 1.60218e-9 # from keV to ergs! (/cm2/s) flux.append(temp_flux) flux = RandomVariates(flux) log_flux = np.log10(flux) hpd = log_flux.highest_posterior_density_interval() err_temp = hpd[1] - log_flux.median, log_flux.median - hpd[0] log_flux_err = (np.array(err_temp)).max() timebin_index = results.index(timebin) + 1 #plot! fig, ax = plt.subplots() fig.suptitle('log_Flux distribution - %s_timebin_%d' % (GRB_name, timebin_index), fontsize=12) ax.hist(log_flux, 50, alpha=0.5) ax.scatter(hpd, [1, 1], label='hpd', marker='v', s=100) ax.scatter(log_flux.median, 1, label='median', marker='v', s=100) plt.legend() fig.savefig( 'physical_properties_distribution_plots/%s_timebin_%d_distribution_logFlux.png' % (GRB_name, timebin_index), bbox_inches="tight", frameon=True, overwrite=True) plt.close(fig) # now calculate luminosities: dL = cosmo.luminosity_distance(z) dL = dL.to(u.cm) dL = dL.value lumin = flux * 4 * math.pi * dL * dL log_lumin = np.log10(lumin) hpd = log_lumin.highest_posterior_density_interval() err_temp = hpd[1] - log_lumin.median, log_lumin.median - hpd[0] log_lumin_err = (np.array(err_temp)).max() #plot! fig, ax = plt.subplots() fig.suptitle('log_Luminosity distribution - %s_timebin_%d' % (GRB_name, timebin_index), fontsize=12) ax.hist(log_lumin, 50, alpha=0.5) ax.scatter(hpd, [1, 1], label='hpd', marker='v', s=100) ax.scatter(log_lumin.median, 1, label='median', marker='v', s=100) plt.legend() fig.savefig( 'physical_properties_distribution_plots/%s_timebin_%d_distribution_logLumin.png' % (GRB_name, timebin_index), bbox_inches="tight", frameon=True, overwrite=True) plt.close(fig) Fluxes.append(log_flux.median) Flux_errs.append(log_flux_err) Lumins.append(log_lumin.median) Lumin_errs.append(log_lumin_err) Fluxes = np.array(Fluxes) Flux_errs = np.array(Flux_errs) Lumins = np.array(Lumins) Lumin_errs = np.array(Lumin_errs) F_and_L_w_errs = Fluxes, Flux_errs, Lumins, Lumin_errs return F_and_L_w_errs
def rand_counts(gal_field, z, R=10**np.linspace(1.2, 3.6, 13), delta_z=0.1, min_mass=9.415): #picking random location for galaxy number density# if gal_field == 'AEGIS': ra1 = random.uniform(3.746000, 3.756821) dec1 = random.uniform(0.920312, 0.925897) elif gal_field == 'COSMOS': ra1 = random.uniform(2.619737, 2.620718) dec1 = random.uniform(0.038741, 0.043811) elif gal_field == 'GOODS-N': ra1 = random.uniform(3.298072, 3.307597) dec1 = random.uniform(1.084787, 1.087936) elif gal_field == 'GOODS-S': ra1 = random.uniform(0.925775, 0.929397) dec1 = random.uniform(-0.487098, -0.483591) elif gal_field == 'UDS': ra1 = random.uniform(0.59815, 0.602889) dec1 = random.uniform(-0.091376, -0.090305) from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #switching ra and dec to degrees# ra1 = ra1 * (180.0 / math.pi) dec1 = dec1 * (180.0 / math.pi) #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] #binning the satellites based on mass# mask = ((np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] lst_gal1 = lst_gal[(lst_gal['lmass'] < 9.8)] lst_gal2 = lst_gal[((lst_gal['lmass'] < 10.3) & (lst_gal['lmass'] > 9.8))] lst_gal3 = lst_gal[((lst_gal['lmass'] < 10.8) & (lst_gal['lmass'] > 10.3))] lst_gal4 = lst_gal[((lst_gal['lmass'] < 11.8) & (lst_gal['lmass'] > 10.8))] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc * (R * u.kpc) #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(ra1 * u.deg, dec1 * u.deg) sc1 = SkyCoord(lst_gal1['ra'] * u.deg, lst_gal1['dec'] * u.deg) sc2 = SkyCoord(lst_gal2['ra'] * u.deg, lst_gal2['dec'] * u.deg) sc3 = SkyCoord(lst_gal3['ra'] * u.deg, lst_gal3['dec'] * u.deg) sc4 = SkyCoord(lst_gal4['ra'] * u.deg, lst_gal4['dec'] * u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) sep3 = sc0.separation(sc3).to(u.arcmin) sep4 = sc0.separation(sc4).to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn1 = np.empty(len(R)) nn2 = np.empty(len(R)) nn3 = np.empty(len(R)) nn4 = np.empty(len(R)) for ii, r in enumerate(arcmin): nn1[ii] = np.sum(sep1 <= r) nn2[ii] = np.sum(sep2 <= r) nn3[ii] = np.sum(sep3 <= r) nn4[ii] = np.sum(sep4 <= r) #returning four lists of counts per radius with low end number for low mass bin# return [nn1, nn2, nn3, nn4]
def load_model(objname, field, agelims=[], **extras): # REDSHIFT # open file, load data photname, zname, fname, filtername, filts = get_names(field) with open(photname, 'r') as f: hdr = f.readline().split() dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]]) dat = np.loadtxt(photname, comments='#', delimiter=' ', dtype=dtype) with open(zname, 'r') as fz: hdr_z = fz.readline().split() dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]]) zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z) idx = dat['id'] == objname # creates T/F array: True when dat[id] = objname zred = zout['z_spec'][idx][0] # use z_spec if zred == -99: # if z_spec doesn't exist zred = zout['z_peak'][idx][0] # use z_phot print(zred, 'zred') # NEW FOUT (MASS-METALLICITY) with open(fname, 'r') as ff: hdr_f = ff.readline().split() dtype_f = np.dtype([(hdr_f[1], 'S20')] + [(n, np.float) for n in hdr_f[2:]]) fout = np.loadtxt(fname, comments='#', delimiter=' ', dtype=dtype_f) idx_f = fout[ 'id'] == objname # creates T/F array: True when fout[id] = objname lmass = fout['lmass'][idx_f][0] print(lmass, 'lmass') # met = [-1.5, 0.5, -0.3] if lmass <= 9.7: met = [-1.0, 0.0, -0.6] elif 9.7 < lmass <= 10.0: met = [-0.9, 0.1, -0.4] elif 10.0 < lmass <= 10.3: met = [-0.8, 0.15, -0.2] elif lmass > 10.3: met = [-0.2, 0.3, 0.0] # CALCULATE AGE OF THE UNIVERSE (TUNIV) AT REDSHIFT ZRED tuniv = WMAP9.age(zred).value print(tuniv, 'tuniv') n = [p['name'] for p in model_params] # model_params[n.index('tage')]['prior_args']['maxi'] = tuniv # NEW FOUT MASS-METALLICITY model_params[n.index('logzsol')]['prior_args'] = { 'mini': met[0], 'maxi': met[1] } # {'mini': -0.8, 'maxi': 0.15} model_params[n.index('logzsol')]['init'] = met[2] # -0.25 # 4942_cosmos_noelgmet was run with met[2] = -0.4 # 4942_cosmos_noelg25 was run with met[2] = -0.25 # NONPARAMETRIC SFH agelims = [ 0.0, 8.0, 8.7, 9.0, (9.0 + (np.log10(tuniv * 1e9) - 9.0) / 3), (9.0 + 2 * (np.log10(tuniv * 1e9) - 9.0) / 3), np.log10(tuniv * 1e9) ] ncomp = len(agelims) - 1 agebins = np.array([agelims[:-1], agelims[1:] ]) # why agelims[1:] instead of agelims[0:]? # INSERT REDSHIFT INTO MODEL PARAMETER DICTIONARY zind = n.index('zred') model_params[zind]['init'] = zred # SET UP AGEBINS model_params[n.index('agebins')]['N'] = ncomp model_params[n.index('agebins')]['init'] = agebins.T # FRACTIONAL MASS INITIALIZATION # NEW # N-1 bins, last is set by x = 1 - np.sum(sfr_fraction) model_params[n.index('sfr_fraction')]['N'] = ncomp - 1 model_params[n.index('sfr_fraction')]['prior_args'] = { 'maxi': np.full(ncomp - 1, 1.0), 'mini': np.full(ncomp - 1, 0.0), # NOTE: ncomp instead of ncomp-1 makes the prior take into # account the implicit Nth variable too } model_params[n.index('sfr_fraction')]['init'] = np.zeros(ncomp - 1) + 1. / ncomp model_params[n.index('sfr_fraction')]['init_disp'] = 0.02 # CREATE MODEL model = BurstyModel(model_params) return model
def color_mag_plots(mgs,s82,decals,savefig=False): # Make paneled histograms of the color distribution for several magnitude bins of Galaxy Zoo data. """ SDSS main sample (GZ2) Stripe 82 coadded data (GZ2) DECaLS """ redshifts = (0.12,0.08,0.05) appmag_lim = 17.0 # Work out the magnitude limit from cosmology fig,axarr = plt.subplots(num=1,nrows=3,ncols=3,figsize=(12,10)) for z,ax in zip(redshifts,axarr.ravel()): absmag_lim = appmag_lim - WMAP9.distmod(z).value maglim = (mgs['PETROMAG_MR'] < absmag_lim) & (mgs['REDSHIFT'] <= z) spiral = mgs['t01_smooth_or_features_a02_features_or_disk_weighted_fraction'] >= 0.8 elliptical = mgs['t01_smooth_or_features_a01_smooth_weighted_fraction'] >= 0.8 ax.hist(mgs[maglim & spiral]['PETROMAG_U'] - mgs[maglim & spiral]['PETROMAG_R'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral') ax.hist(mgs[maglim & elliptical]['PETROMAG_U'] - mgs[maglim & elliptical]['PETROMAG_R'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical') ax.set_xlabel(r'$(u-r)$',fontsize=16) ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16) ax.text(0.95,0.95,'MGS',ha='right',va='top',transform=ax.transAxes) if ax == axarr.ravel()[0]: ax.legend(loc='upper left',fontsize=10) s82_lim = 17.77 for z,ax in zip(redshifts,axarr.ravel()[3:6]): absmag_lim = s82_lim - WMAP9.distmod(z).value maglim = (s82['PETROMAG_MR'] < absmag_lim) & (s82['REDSHIFT'] <= z) spiral = s82['t01_smooth_or_features_a02_features_or_disk_weighted_fraction'] >= 0.8 elliptical = s82['t01_smooth_or_features_a01_smooth_weighted_fraction'] >= 0.8 ax.hist(s82[maglim & spiral]['PETROMAG_U'] - s82[maglim & spiral]['PETROMAG_R'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral') ax.hist(s82[maglim & elliptical]['PETROMAG_U'] - s82[maglim & elliptical]['PETROMAG_R'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical') ax.set_xlabel(r'$(u-r)$',fontsize=16) ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16) ax.text(0.95,0.95,'Stripe 82',ha='right',va='top',transform=ax.transAxes) decals_lim = 17.77 for z,ax in zip(redshifts,axarr.ravel()[6:]): absmag_lim = decals_lim - WMAP9.distmod(z).value maglim = (decals['metadata.mag.abs_r'] < absmag_lim) & (decals['metadata.redshift'] <= z) spiral = decals['t00_smooth_or_features_a1_features_frac'] >= 0.8 elliptical = decals['t00_smooth_or_features_a0_smooth_frac'] >= 0.8 ax.hist(decals[maglim & spiral]['metadata.mag.u'] - decals[maglim & spiral]['metadata.mag.r'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral') ax.hist(decals[maglim & elliptical]['metadata.mag.u'] - decals[maglim & elliptical]['metadata.mag.r'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical') ax.set_xlabel(r'$(u-r)$',fontsize=16) ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16) ax.text(0.95,0.95,'DECaLS',ha='right',va='top',transform=ax.transAxes) fig.tight_layout() if savefig: plt.savefig('{0}/color_hist.pdf'.format(plot_path)) else: plt.show() return None
cat_massive_z_slice = cat_massive_gal[abs(cat_massive_gal['zKDEPeak'] - z) < eval(z_bin_size)] coord_massive_gal = SkyCoord(cat_massive_z_slice['RA'] * u.deg, cat_massive_z_slice['DEC'] * u.deg) cat_all_z_slice = cat_gal[abs(cat_gal[z_keyname] - z) < 0.5] massive_counts = len(cat_massive_z_slice) massive_count = 0 massive_counts_cq = 0 massive_counts_csf = 0 for gal in cat_massive_z_slice: massive_count += 1 print('Progress:' + str(massive_count) + '/' + str(len(cat_massive_z_slice)), end='\r') dis = WMAP9.angular_diameter_distance(gal[z_keyname]).value coord_gal = SkyCoord(gal['RA'] * u.deg, gal['DEC'] * u.deg) cat_neighbors_z_slice = cat_all_z_slice[ abs(cat_all_z_slice[z_keyname] - gal[z_keyname]) < 1.5 * 0.044 * (1 + z)] cat_neighbors = cat_neighbors_z_slice[abs(cat_neighbors_z_slice['RA'] - gal['RA']) < 0.7 / dis / np.pi * 180] cat_neighbors = cat_neighbors[ abs(cat_neighbors['DEC'] - gal['DEC']) < 0.7 / dis / np.pi * 180] if len(cat_neighbors) == 0: # central gals which has no companion continue else: ind = KDTree(np.array(cat_neighbors['RA', 'DEC']).tolist()).query_radius( [(gal['RA'], gal['DEC'])],