def Counts(gal_id, gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data[data['field'] == gal_field] #separating the potential satellites into star-forming(b) and quiescent(r) bins# mask = ((np.abs(data_tmp['z'] - z) <= delta_z) & (data_tmp['id'] != gal_id) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] lst_galr = lst_gal[(((lst_gal['vj'] < 0.92) & (lst_gal['uv'] > 1.3)) | ((lst_gal['vj'] > 0.8) & (lst_gal['vj'] < 1.6) & (lst_gal['uv'] > (0.88*lst_gal['vj'] +0.49))))] lst_galb = lst_gal[(((lst_gal['vj'] < 0.92) & (lst_gal['uv'] < 1.3)) | ((lst_gal['vj'] > 0.8) & (lst_gal['vj'] < 1.6) & (lst_gal['uv'] < (0.88*lst_gal['vj'] +0.49))) | (lst_gal['vj']>1.5))] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc*(R*u.kpc) #retrieving RA and DEC data of given galaxy# p1 = data_tmp[(data_tmp['id'] == gal_id)] #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(p1['ra']*u.deg, p1['dec']*u.deg) sc1 = SkyCoord(lst_galr['ra']*u.deg, lst_galr['dec']*u.deg) sc2 = SkyCoord(lst_galb['ra']*u.deg, lst_galb['dec']*u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nnr = np.empty(len(R)) nnb = np.empty(len(R)) for ii,r in enumerate(arcmin): nnr[ii] = np.sum(sep1 <= r) nnb[ii] = np.sum(sep2 <= r) return [nnr, nnb]
def Counts(gal_id, gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] mask = ((np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['id'] != gal_id) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc*(R*u.kpc) #retrieving RA and DEC data of given galaxy# p1 = data_tmp[(data_tmp['id'] == gal_id)] #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(p1['ra']*u.deg, p1['dec']*u.deg) sc = SkyCoord(lst_gal['ra']*u.deg, lst_gal['dec']*u.deg) sep = sc0.separation(sc) sep = sep.to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn = np.empty(len(R)) for ii,r in enumerate(arcmin): nn[ii] = np.sum(sep <= r) return nn
def rand_counts(gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): #picking random location for galaxy number density# if gal_field == 'AEGIS': ra1 = random.uniform(3.746000, 3.756821) dec1 = random.uniform(0.920312, 0.925897) elif gal_field == 'COSMOS': ra1 = random.uniform(2.619737, 2.620718) dec1 = random.uniform(0.038741, 0.043811) elif gal_field == 'GOODS-N': ra1 = random.uniform(3.298072, 3.307597) dec1 = random.uniform(1.084787, 1.087936) elif gal_field == 'GOODS-S': ra1 = random.uniform(0.925775, 0.929397) dec1 = random.uniform(-0.487098, -0.483591) elif gal_field == 'UDS': ra1 = random.uniform(0.59815, 0.602889) dec1 = random.uniform(-0.091376, -0.090305) #switching ra and dec to degrees# ra1 = ra1*(180.0/math.pi) dec1 = dec1*(180.0/math.pi) #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data[data['field'] == gal_field] mask = ((np.abs(data_tmp['z'] - z) <= delta_z) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] lst_galr = lst_gal[(((lst_gal['vj'] < 0.92) & (lst_gal['uv'] > 1.3)) | ((lst_gal['vj'] > 0.8) & (lst_gal['vj'] < 1.6) & (lst_gal['uv'] > (0.88*lst_gal['vj'] +0.49))))] lst_galb = lst_gal[(((lst_gal['vj'] < 0.92) & (lst_gal['uv'] < 1.3)) | ((lst_gal['vj'] > 0.8) & (lst_gal['vj'] < 1.6) & (lst_gal['uv'] < (0.88*lst_gal['vj'] +0.49))) | (lst_gal['vj']>1.5))] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc*(R*u.kpc) #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(ra1*u.deg, dec1*u.deg) sc1 = SkyCoord(lst_galr['ra']*u.deg, lst_galr['dec']*u.deg) sc2 = SkyCoord(lst_galb['ra']*u.deg, lst_galb['dec']*u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn1 = np.empty(len(R)) nn2 = np.empty(len(R)) for ii,r in enumerate(arcmin): nn1[ii] = np.sum(sep1 <= r) nn2[ii] = np.sum(sep2 <= r) #nn1 is density list for quiescent, nn2 is for star-forming# return [nn1, nn2]
def Counts(gal_id, gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] #separatign the satellite galaxies into four bins based on mass# mask = ((np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['id'] != gal_id) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] lst_gal1 = lst_gal[(lst_gal['lmass'] < 9.8)] lst_gal2 = lst_gal[((lst_gal['lmass'] < 10.3) & (lst_gal['lmass'] > 9.8))] lst_gal3 = lst_gal[((lst_gal['lmass'] < 10.8) & (lst_gal['lmass'] > 10.3))] lst_gal4 = lst_gal[((lst_gal['lmass'] < 11.8) & (lst_gal['lmass'] > 10.8))] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc*(R*u.kpc) #retrieving RA and DEC data of given galaxy# p1 = data_tmp[(data_tmp['id'] == gal_id)] #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(p1['ra']*u.deg, p1['dec']*u.deg) sc1 = SkyCoord(lst_gal1['ra']*u.deg, lst_gal1['dec']*u.deg) sc2 = SkyCoord(lst_gal2['ra']*u.deg, lst_gal2['dec']*u.deg) sc3 = SkyCoord(lst_gal3['ra']*u.deg, lst_gal3['dec']*u.deg) sc4 = SkyCoord(lst_gal4['ra']*u.deg, lst_gal4['dec']*u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) sep3 = sc0.separation(sc3).to(u.arcmin) sep4 = sc0.separation(sc4).to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn1 = np.empty(len(R)) nn2 = np.empty(len(R)) nn3 = np.empty(len(R)) nn4 = np.empty(len(R)) for ii,r in enumerate(arcmin): nn1[ii] = np.sum(sep1 <= r) nn2[ii] = np.sum(sep2 <= r) nn3[ii] = np.sum(sep3 <= r) nn4[ii] = np.sum(sep4 <= r) #returning four lists of counts pre radius with lower end number for lower mass bin# return [nn1, nn2, nn3, nn4]
def nth_nearest(gal_id, gal_field, N): #creating a redshift range about the chosen galaxy# z_un = data_flagged[(data_flagged['id'] == gal_id) & (data_flagged['field'] == gal_field)] z_und = z_un['z_peak'] z = z_und[0] z_bin = data_flagged[((data_flagged['z_peak'] >= (z - 0.08)) & (data_flagged['z_peak'] <= (z + 0.08)))] #create a list of ids of galaxies in z range and with lmass above 9.415# lst_id =[] for gal in z_bin: if (gal['id'] != gal_id) and (gal['field'] == gal_field): if gal['lmass'] >= 9.415: lst_id.append([gal['id'], gal['field']]) #finding kpc per radian ratio at given redshift z# kpc_arcmin = cosmo.kpc_proper_per_arcmin(z) kpc_degrees = kpc_arcmin*60 kpc_radians = kpc_degrees/(math.pi/180) kpc_radian = kpc_radians.value #create a list of distances from galaxy gal_id to each galaxy in z range# lst_dist = [] #convert from degrees to radians# ra1_ = (z_un['ra'])*(math.pi/180) ra1 = ra1_[0] dec1_ = (z_un['dec'])*(math.pi/180) dec1 = dec1_[0] #making a list of distances to galaxies in the z-bin in radians# lst_radians = [] for gal in lst_id: #pulling the necessary info of each galaxy in the range# position_info = data_flagged[(data_flagged['id'] == gal[0]) & (data_flagged['field'] == gal[1])] ra_ = (position_info['ra'])*(math.pi/180) ra = ra_[0] dec_ = (position_info['dec'])*(math.pi/180) dec = dec_[0] #converting data to find the distance in radians to given galaxy# del_dec = dec - dec1 del_ra = ra - ra1 mean_dec = (dec + dec1)/2.0 del_radians = math.sqrt(del_dec**2 + (del_ra*math.cos(mean_dec))**2) lst_radians.append(del_radians) lst_radians.sort() #finding distance to nth nearest galaxy and then calculating density from that# r_n_rad = lst_radians[(N-1)] r_n = r_n_rad*kpc_radian sig = N/(math.pi*(r_n**2)) return sig
def rand_counts(gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): #picking random location for galaxy number density# if gal_field == 'AEGIS': ra1 = random.uniform(3.746000, 3.756821) dec1 = random.uniform(0.920312, 0.925897) elif gal_field == 'COSMOS': ra1 = random.uniform(2.619737, 2.620718) dec1 = random.uniform(0.038741, 0.043811) elif gal_field == 'GOODS-N': ra1 = random.uniform(3.298072, 3.307597) dec1 = random.uniform(1.084787, 1.087936) elif gal_field == 'GOODS-S': ra1 = random.uniform(0.925775, 0.929397) dec1 = random.uniform(-0.487098, -0.483591) elif gal_field == 'UDS': ra1 = random.uniform(0.59815, 0.602889) dec1 = random.uniform(-0.091376, -0.090305) from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #switching ra and dec to degrees# ra1 = ra1*(180.0/math.pi) dec1 = dec1*(180.0/math.pi) #making a list of galaxies in within a redshift range of given z# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] mask = (np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['lmass'] >= min_mass) lst_gal = data_tmp[mask] #converting radius R (kpc) to radians at given redshift z# kpc_per = cosmo.kpc_proper_per_arcmin(z) arcmin_per = kpc_per**(-1) arcmin = arcmin_per*(R*u.kpc) #retrieving RA and DEC data of given galaxy# sc0 = SkyCoord(ra1*u.deg, dec1*u.deg) sc = SkyCoord(lst_gal['ra']*u.deg, lst_gal['dec']*u.deg) sep = sc.separation(sc0) sep = sep.to(u.arcmin) nn = np.empty(len(R)) for ii,r in enumerate(arcmin): nn[ii] = np.sum(sep <= r) return nn
def Counts_q(gal_id, gal_field, z, R=10 ** np.linspace(1.2, 3.6, 13), delta_z=0.1, min_mass=9.415): # making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data1[data1["field"] == gal_field] mask = (np.abs(data_tmp["z"] - z) <= delta_z) & (data_tmp["id"] != gal_id) & (data_tmp["lmass"] >= min_mass) # making list of satellites in total and list of satellites that are quiescent# lst_gal = data_tmp[mask] lst_galr = lst_gal[ ( ((lst_gal["vj"] < 0.92) & (lst_gal["uv"] > 1.3)) | ((lst_gal["vj"] > 0.8) & (lst_gal["vj"] < 1.6) & (lst_gal["uv"] > (0.88 * lst_gal["vj"] + 0.49))) ) ] # finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin ** (-1) arcmin = arcmin_per_kpc * (R * u.kpc) # retrieving RA and DEC data of given galaxy# p1 = data_tmp[(data_tmp["id"] == gal_id)] # calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(p1["ra"] * u.deg, p1["dec"] * u.deg) sc1 = SkyCoord(lst_galr["ra"] * u.deg, lst_galr["dec"] * u.deg) sc2 = SkyCoord(lst_gal["ra"] * u.deg, lst_gal["dec"] * u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) # finding number of "sep's" within the list 'arcmin' already created# nnr = np.empty(len(R)) nn = np.empty(len(R)) for ii, r in enumerate(arcmin): nnr[ii] = np.sum(sep1 <= r) nn[ii] = np.sum(sep2 <= r) lst_q = [] # calculating quiescent percentage and adding it to a list# for i in range(len(nnr)): # filtering out (making a 5 that later gets ignored) results that divide by 0# if nn[i] == 0: lst_q.append(5) else: lst_q.append(nnr[i] / nnb[i]) return lst_q
def cosmoScale(redshift, WMAP9=False, H0=70.0, Om0=0.30, Planck15=False): """ Get the Angular Scale (kpc/") at redshift=z. This is simply a wrapper of astropy.cosmology The input redsfhit can be an array """ if WMAP9: from astropy.cosmology import WMAP9 as cosmo elif Planck15: from astropy.cosmology import Planck15 as cosmo else: from astropy.cosmology import FlatLambdaCDM cosmo = FlatLambdaCDM(H0=H0, Om0=Om0) scale = cosmo.kpc_proper_per_arcmin(redshift).to(u.kpc / u.arcsec) return scale.value
def calc_density_operator(rad, sourcereg, pars, z): # Select values in the source region kpcp = cosmo.kpc_proper_per_arcmin(z).value rfit = rad[sourcereg] * kpcp npt = len(rfit) npars = len(pars[:, 0]) # Compute linear combination of basis functions in the source region beta = np.repeat(pars[:, 0], npt).reshape(npars, npt) rc = np.repeat(pars[:, 1], npt).reshape(npars, npt) base = 1. + np.power(rfit / rc, 2) expon = -3. * beta func_base = np.power(base, expon) cfact = gamma(3 * beta) / gamma(3 * beta - 0.5) / np.sqrt(np.pi) / rc fng = func_base * cfact # Recast into full matrix and add column for background nptot = len(rad) Ktot = np.zeros((nptot, npars + 1)) Ktot[0:npt, 0:npars] = fng.T Ktot[:, npars] = 0.0 return Ktot
def Counts(gal_id, gal_field, z, R=10**np.linspace(1.2, 3.6, 13), delta_z=0.1, min_mass=9.415): from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] mask = ((np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['id'] != gal_id) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc * (R * u.kpc) #retrieving RA and DEC data of given galaxy# p1 = data_tmp[(data_tmp['id'] == gal_id)] #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(p1['ra'] * u.deg, p1['dec'] * u.deg) sc = SkyCoord(lst_gal['ra'] * u.deg, lst_gal['dec'] * u.deg) sep = sc0.separation(sc) sep = sep.to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn = np.empty(len(R)) for ii, r in enumerate(arcmin): nn[ii] = np.sum(sep <= r) return nn
def ProjectionFactor(self, z, betaparams, region_size=1.): pixsize = self.data.pixsize npar = len(betaparams) if npar == 4: print('We will use a single beta profile') betaparams[1] = betaparams[1] / pixsize betaparams[2] = 0. elif npar == 6: print('We will use a double beta profile') betaparams[1] = betaparams[1] / pixsize betaparams[2] = betaparams[2] / pixsize betaparams[4] = 0. else: print('Invalid number of SB parameters') return fmask = fits.open('mask.fits') mask = fmask[0].data data_size = mask.shape fmask.close() kpcp = cosmo.kpc_proper_per_arcmin(z).value Mpcpix = 1000. / kpcp / self.data.pixsize # 1 Mpc in pixel regsizepix = region_size * Mpcpix if regsizepix > data_size[0] / 2: print('Error: region size larger than image size') return minx = int(np.round(data_size[1] / 2 - regsizepix)) maxx = int(np.round(data_size[1] / 2 + regsizepix)) miny = int(np.round(data_size[0] / 2 - regsizepix)) maxy = int(np.round(data_size[0] / 2 + regsizepix)) msk = mask[miny:maxy, minx:maxx] npix = len(msk) minscale = 2 # minimum scale of 2 pixels maxscale = regsizepix / 2. # at least 4 resolution elements on a side scale = np.logspace(np.log10(minscale), np.log10(maxscale), 10) # 10 scale logarithmically spaced self.cfact = calc_projection_factor(npix, msk, betaparams, scale) return self.cfact
def within_radius(ra, dec, z, zerr, neighbors, search_rad=2 * u.Mpc, zerr_cutoff=False, in_z=True): if in_z: if in_z == True: # Only include galaxies whose redshifts fall within errors of the bent double neighbors = neighbors[np.where([ in_error(z, zerr, i['photo_z'], i['photo_zerr']) for i in neighbors ])[0]] else: # include galaxy in slab of size in_z neighbors = neighbors[np.where( abs(z - neighbors['photo_z']) < in_z)] # Find the value that corresponds to search_rad in arcsec kpcperarcmin = cosmo.kpc_proper_per_arcmin(z) rad_arcsec = (search_rad / kpcperarcmin).to(u.arcsec).value # Only select galaxies that fall within search_rad and update neighbors table near_sky = np.where( angsep(ra, dec, neighbors['RA'], neighbors['DEC']) * 3600 < rad_arcsec)[0] neighbors = neighbors[near_sky] # If a redshift error is specified, excludes galaxies with errors that fall above this if zerr_cutoff: neighbors = neighbors[np.where( neighbors['photo_zerr'] < zerr_cutoff)[0]] return neighbors
def matchFC(cat1, cat2, sigmaz, printprocess): #, dz, opts): # Read cat1 #rich1 = Mhalo id1, ra1, dec1, z1, ngal1, rich1, r2001 = N.loadtxt( cat1, dtype='|S20,float,float,float,float,float,float', usecols=(0, 1, 2, 3, 4, 5, 6), unpack=True) # Sort the catalogue putting the most massive first be = (-rich1).ravel().argsort() id1 = id1[be] ra1 = ra1[be] dec1 = dec1[be] z1 = z1[be] ngal1 = ngal1[be] rich1 = rich1[be] r2001 = r2001[be] # Read cat2 # rich2 = SNR id2, ra2, dec2, z2, ngal2, rich2, r2002 = N.loadtxt( cat2, dtype='|S20,float,float,float,float,float,float', usecols=(0, 1, 2, 3, 4, 5, 6), unpack=True) kpcmin = cosmo.kpc_proper_per_arcmin(z2) r2002 = (z2 * 0. + 1.0) * 1000. / kpcmin.value #1 Mpc in minutes # Sort the catalogue putting the firts ranked first be = (-rich2).ravel().argsort() id2 = id2[be] ra2 = ra2[be] dec2 = dec2[be] z2 = z2[be] ngal2 = ngal2[be] rich2 = rich2[be] r2002 = r2002[be] c2 = C.SkyCoord(ra=ra2 * U.degree, dec=dec2 * U.degree) origID = [] origRA = [] origDEC = [] origZ = [] origNGAL = [] origRICH = [] origRAD = [] matchID = [] matchRA = [] matchDEC = [] matchZ = [] matchNGAL = [] matchRICH = [] matchDISTI = [] kpcmin1 = cosmo.kpc_proper_per_arcmin(z1) r2001Mpc = kpcmin1.value * r2001 / 1000. null = float(-99.) for i in range(len(id1)): # for i in range(50000,50100): if printprocess == 'yes': if i / 100. == int(i / 100.): print i c1 = C.SkyCoord(ra=ra1[i] * U.degree, dec=dec1[i] * U.degree) sep = c1.separation(c2) #arcmins sepV = sep.arcminute # ja=N.less_equal((N.array(sepV)-r2001[i]),0.0)*N.less_equal(abs(z2-z1[i])-2.*sigmaz*(1.+z1[i]),0.) ja = N.less_equal((N.array(sepV) - 2. * r2001[i]), 0.0) * N.less_equal( abs(z2 - z1[i]) - 2. * sigmaz * (1. + z1[i]), 0.) #2 veces radio 2R id2S, ra2S, dec2S, z2S, ngal2S, rich2S, r2002S, sepS = B.multicompress( ja, (id2, ra2, dec2, z2, ngal2, rich2, r2002, sepV)) origID.append(id1[i]) origRA.append(ra1[i]) origDEC.append(dec1[i]) origZ.append(z1[i]) origNGAL.append(ngal1[i]) origRICH.append(rich1[i]) origRAD.append(r2001Mpc[i]) if len(ra2S) > 0: c2F = C.SkyCoord(ra=ra2S * U.degree, dec=dec2S * U.degree) # I search for Friends of Friends for j in range(len(id2S)): matchFID = [] matchFRA = [] matchFDEC = [] matchFZ = [] matchFNGAL = [] matchFRICH = [] matchFDISTI = [] c1F = C.SkyCoord(ra=ra2S[j] * U.degree, dec=dec2S[j] * U.degree) sepF = c1F.separation(c2F) #arcmins sepVF = sepF.arcminute ja = N.less_equal( (N.array(sepVF) - 2. * r2002S[j]), 0.0) * N.less_equal( abs(z2S - z2S[j]) - 2. * sigmaz * (1. + z2S[j]), 0.) #2 veces radio 2R id2F, ra2F, dec2F, z2F, ngal2F, rich2F, r2002F, sepF = B.multicompress( ja, (id2S, ra2S, dec2S, z2S, ngal2S, rich2S, r2002S, sepS)) if len(id2F) > 0: matchFID.append(id2F) matchFRA.append(ra2F) matchFDEC.append(dec2F) matchFZ.append(z2F) matchFNGAL.append(ngal2F) matchFRICH.append(rich2F) # Append the distances of the FoF kpcmin2F = cosmo.kpc_proper_per_arcmin(z2F) distiMpcF = kpcmin2F.value * sepF / 1000. matchFDISTI.append(distiMpcF) # print j,len(id2F),len(matchFID),r2002S[j] # Sort the catalogue putting the most massive first and select the most massive if len(matchFID) > 0: # print 'ea',matchFID # print 'yo',ra1[i],dec1[i],z1[i] # print 'ea',matchFDISTI,matchFRA,matchFDEC matchFID = N.concatenate(matchFID, axis=0) matchFRA = N.concatenate(matchFRA, axis=0) matchFDEC = N.concatenate(matchFDEC, axis=0) matchFZ = N.concatenate(matchFZ, axis=0) matchFNGAL = N.concatenate(matchFNGAL, axis=0) matchFRICH = N.concatenate(matchFRICH, axis=0) matchFDISTI = N.concatenate(matchFDISTI, axis=0) # print 'ea2',matchFDISTI,matchFRA,matchFDEC be = (-matchFRICH).ravel().argsort() # be=(matchFDISTI).ravel().argsort(); matchFID = matchFID[be[0]] matchFRA = matchFRA[be[0]] matchFDEC = matchFDEC[be[0]] matchFZ = matchFZ[be[0]] matchFNGAL = matchFNGAL[be[0]] matchFRICH = matchFRICH[be[0]] matchFDISTI = matchFDISTI[be[0]] # print matchFDISTI matchID.append(matchFID) matchRA.append(matchFRA) matchDEC.append(matchFDEC) matchZ.append(matchFZ) matchNGAL.append(matchFNGAL) matchRICH.append(matchFRICH) matchDISTI.append(matchFDISTI) if len(matchFID) == 0: matchID.append(null) matchRA.append(null) matchDEC.append(null) matchZ.append(null) matchNGAL.append(null) matchRICH.append(null) matchDISTI.append(null) # I save all even if not matched (necessary for the Completeness & Purity computations) if len(ra2S) == 0: matchID.append(null) matchRA.append(null) matchDEC.append(null) matchZ.append(null) matchNGAL.append(null) matchRICH.append(null) matchDISTI.append(null) # Check how many are matched ja = N.greater_equal((N.array(matchDISTI)), 0.0) la = N.compress(ja, N.array(matchDISTI)) print len(la), 'matched out of ', len(ra1) return origID, origRA, origDEC, origZ, origNGAL, origRICH, matchID, matchRA, matchDEC, matchZ, matchNGAL, matchRICH, matchDISTI, origRAD
def jwst_nirspec_figure(alph=0.1, Q=1.0, dx=0, dy=0, Npix=400.0, do_psf=False): natch_z2 = 'nref11/RD0020/mcrx.fits' extra_z2 = 'nref11_refine200kpc_z4to2/RD0020/mcrx.fits' extra_z3 = 'nref11_refine200kpc_z4to2/RD0017/mcrx.fits' cs = 'CAMERA3-NONSCATTER' natch_z2_o = snapshot() extra_z2_o = snapshot() extra_z3_o = snapshot() rf = 21 #F200W 0.75 gf = 19 #F115W 0.40 bf = 4 #ACS/F775W rest far-UV 0.25 il = 1310 in2 = 1316 ic = 1296 fudge = 1.0 lfact = 1.0 mid = np.int64(400.0 / 2) if Npix is not None: delt = np.int64(Npix / 2) filen = 'nirspec.pdf' extra_z2_o.ha = fudge * pyfits.open( extra_z2)[cs].data[il, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact extra_z3_o.ha = fudge * pyfits.open( extra_z3)[cs].data[il, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact extra_z2_o.n2 = fudge * pyfits.open( extra_z2)[cs].data[in2, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact extra_z3_o.n2 = fudge * pyfits.open( extra_z3)[cs].data[in2, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact extra_z2_o.c = fudge * pyfits.open(extra_z2)[cs].data[ic, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact extra_z3_o.c = fudge * pyfits.open(extra_z3)[cs].data[ic, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact natch_z2_o.ha = fudge * pyfits.open( natch_z2)[cs].data[il, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact natch_z2_o.n2 = fudge * pyfits.open( natch_z2)[cs].data[in2, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact natch_z2_o.c = fudge * pyfits.open(natch_z2)[cs].data[ic, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * lfact kpc_per_arcsec_z2 = WMAP9.kpc_proper_per_arcmin(2.0).value / 60.0 kpc_per_arcsec_z3 = WMAP9.kpc_proper_per_arcmin(2.75).value / 60.0 kpc_per_pix = 0.125 #high-res version first spinelw = 1.0 f1 = pyplot.figure(figsize=(10.0, 10.0), dpi=200) pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0, wspace=0.0, hspace=0.0) axi = f1.add_subplot(2, 2, 1) axi.set_xticks([]) axi.set_yticks([]) axi.annotate('JWST-Nirspec/IFU', (0.50, 0.88), xycoords='axes fraction', color='Black', ha='center', va='center', fontsize=25) rat = np.log10(natch_z2_o.n2 / natch_z2_o.ha) vmin = -0.5 vmax = 0.0 themap = cm.viridis rat = np.where(rat > vmax, -1000.0, rat) Zm = ma.masked_where(rat < -100.0, rat) cscale_function = pycolors.Normalize(vmin=vmin, vmax=vmax, clip=True) axi.imshow(np.fliplr(np.transpose(Zm)), interpolation='nearest', origin='upper', vmin=vmin, vmax=vmax, cmap=themap) for ss in axi.spines: s = axi.spines[ss] s.set_color('white') s.set_linewidth(spinelw) axi = f1.add_subplot(2, 2, 2) axi.set_xticks([]) axi.set_yticks([]) rat = np.log10(extra_z2_o.n2 / extra_z2_o.ha) vmin = -0.5 vmax = 0.0 themap = cm.viridis rat = np.where(rat > vmax, -1000.0, rat) Zm = ma.masked_where(rat < -100.0, rat) cscale_function = pycolors.Normalize(vmin=vmin, vmax=vmax, clip=True) axi.imshow(np.fliplr(np.transpose(Zm)), interpolation='nearest', origin='upper', vmin=vmin, vmax=vmax, cmap=themap) for ss in axi.spines: s = axi.spines[ss] s.set_color('white') s.set_linewidth(spinelw) axi = f1.add_subplot(2, 2, 4) axi.set_xticks([]) axi.set_yticks([]) rat = np.log10(extra_z3_o.n2 / extra_z3_o.ha) vmin = -0.5 vmax = 0.0 themap = cm.viridis rat = np.where(rat > vmax, -1000.0, rat) Zm = ma.masked_where(rat < -100.0, rat) cscale_function = pycolors.Normalize(vmin=vmin, vmax=vmax, clip=True) obj = axi.imshow(np.fliplr(np.transpose(Zm)), interpolation='nearest', origin='upper', vmin=vmin, vmax=vmax, cmap=themap) gth.make_colorbar(obj, f1, title=r'$log_{10} [NII]/H \alpha $', ticks=[-0.5, 0.0], loc=[0.60, 0.50, 0.3, 0.05], fontsize=25) for ss in axi.spines: s = axi.spines[ss] s.set_color('white') s.set_linewidth(spinelw) pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0, wspace=0.0, hspace=0.0) f1.savefig(filen, dpi=200) pyplot.close(f1) return
Create a custom cosmology object >>> from astropy.cosmology import FlatLambdaCDM >>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3) >>> cosmo FlatLambdaCDM(H0=70, Om0=0.3, Ode0=0.7) Compute the comoving volume to z=6.5 in cubic Mpc using this cosmology >>> cosmo.comoving_volume(6.5) 2521696198211.6924 Compute the age of the universe in Gyr using the pre-defined WMAP 5-year and WMAP 9-year cosmologies >>> from astropy.cosmology import WMAP5, WMAP9 >>> WMAP5.age(0) 13.723782349795023 >>> WMAP9.age(0) 13.768899510689097 Create a cosmology with a varying `w' >>> from astropy.cosmology import Flatw0waCDM >>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-1, wa=0.2) Find the separation in proper kpc at z=4 corresponding to 10 arcsec in this cosmology compared to a WMAP9 cosmology >>> cosmo.kpc_proper_per_arcmin(4) * 10 / 60. 68.87214405278925 >>> WMAP9.kpc_proper_per_arcmin(4) * 10 / 60. 71.21374615575363
def begin(index): #i = int(index) i = int(index.split('-')[0]) mgi = int(index.split('-')[1]) color = index.split('-')[2] #try: print(index) #filename = 'Test Data Extract/' + str(i) + '.fit' #filename = str(i) + '-g.fit' #filename = '/data/marvels/billzhu/2175 Dataset/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/2175 Reference Dataset/' + color + '/' + str(index) + '.fit' filename = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '.fit' hdulist = fits.open(filename) #qlist = fits.open('MG II Test Cut/' + str(i) + '_MG.fit') #qlist = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qlist = fits.open('/data/marvels/billzhu/2175 Reference Quasar Cut/' + color + '/' + str(index) + '_REF.fit') qlist = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qlist = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qx = qlist[0].header['XCOORD'] qy = qlist[0].header['YCOORD'] obj_id = qlist[0].header['ID'] mean1 = qlist[0].header['MEAN_BKG'] median1 = qlist[0].header['MED_BKG'] std1 = qlist[0].header['STD_BKG'] redshift = qlist[0].header['ZABS'] #print("%f, %f" % (x, y)) qlist.close() #except: # print("No coordinates") # return # Save some frickin time scidata = hdulist[0].data.astype(float) #print(sigma_clipped_stats(scidata, sigma=3.0, maxiters=5)) pointer = 0 if color == 'g': pointer = 1 if color == 'r': pointer = 2 if color == 'i': pointer = 3 if color == 'z': pointer = 4 if color == 'u': pointer = 0 #bkg_sigma = mad_std(scidata) try: #print('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '.fit') #obj_table = Table.read('/data/marvels/billzhu/2175 Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/2175 Reference Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) obj_table = Table.read('/data/marvels/billzhu/Reference Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) except: print(str(i) + ' No Table') return #line_data = linecache.getline('Full Data.txt', i).split() #line_data = linecache.getline('DR12 QSO.txt', i).split() #print(len(line_data)) #obj_id = int(line_data[52]) quasar = obj_table[obj_id - 1] try: print("%d, %f" % (obj_id, obj_table['M_rr_cc'][obj_id - 1][pointer])) except: print("can't print table") return # If no quasar is found, the field image is deemed corrupt and not used if quasar == 0: print(str(i) + ' No quasar') return # Calculate the 18 magnitude threshold mag18 = 0 header = hdulist[0].header pstable = Table.read('/data/marvels/billzhu/Reference psField/0.37 - 0.55/' + str(i) + '_psField.fit', hdu=7) mag20 = pstable['flux20'] if color == 'g': mag18 = mag20[1] * 10 **(8. - 18/2.5) if color == 'r': mag18 = mag20[2] * 10 **(8. - 18/2.5) if color == 'i': mag18 = mag20[3] * 10 **(8. - 18/2.5) if color == 'z': mag18 = mag20[4] * 10 **(8. - 18/2.5) #qsocut = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qsocut = fits.open('/data/marvels/billzhu/2175 Reference Quasar Cut/' + color + '/' + str(index) + '_REF.fit') qsocut = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') #qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qsodata = qsocut[0].data.astype(float) if qsodata[50, 50] < 5: return print('reached') largearr = [] stars = [] chunk_size = 50 diff_fwhm = 1000000 counter = 0 scale = cosmo.kpc_proper_per_arcmin(redshift) * u.arcmin / u.kiloparsec * 0.396 / 60 #filedir = '/data/marvels/billzhu/background/' + color + '/' filedir = '/data/marvels/billzhu/Reference Background/' + color + '/' for j in range(len(obj_table)): sx = obj_table['colc'][j][pointer] sy = obj_table['rowc'][j][pointer] flags1 = detflags(obj_table['objc_flags'][j]) flags2 = detflags(obj_table['objc_flags2'][j]) try: if obj_table['objc_type'][j] == 6 and flags1[12] == False and flags1[17] == False and flags1[18] == False and flags2[27] == False and distance(sx, sy, qx, qy) > 5 and inbounds(sx + chunk_size + 6, sy + chunk_size + 6) and inbounds(sx - chunk_size - 5, sy - chunk_size - 5) and obj_table['psfCounts'][j][pointer] > mag18 and obj_table['M_rr_cc'][j][pointer] > 0 and abs(obj_table['M_rr_cc'][j][pointer] - obj_table['M_rr_cc'][obj_id - 1][pointer]) < 0.1 * obj_table['M_rr_cc'][obj_id - 1][pointer]: #try: """ preshift = scidata[int(sy - 10) : int(sy + 11), int(sx - 10) : int(sx + 11)] xc, yc = centroid_2dg(preshift, mask = None) xc += quasar['colc'][pointer] - 10 yc += quasar['rowc'][pointer] - 10 """ xc = obj_table['colc'][j][pointer] yc = obj_table['rowc'][j][pointer] #preshift = scidata[int(yc - chunk_size - 5) : int(yc + chunk_size + 6), int(xc - chunk_size - 5) : int(xc + chunk_size + 6)] #print("%f, %f" % (xc, yc)) xu = xc + 350.0 xl = xc - 350.0 yu = yc + 350.0 yl = yc - 350.0 xc1 = xc yc1 = yc if xu >= 2048: xu = 2047 if xl < 0: xl = 0 else: xc1 = xc - int(xc) + 350.0 if yu >= 1489: yu = 1488 if yl < 0: yl = 0 else: yc1 = yc - int(yc) + 350.0 #print("%f, %f, %f, %f, %f, %f" % (xc1, yc1, xl, yl, xu, yu)) scidata2 = np.array(scidata[int(yl) : int(yu), int(xl) : int(xu)]) visited = np.zeros((len(scidata2), len(scidata2[0])), dtype=bool) scidata2 = checkInner(scidata2, obj_table, xc, yc, xc1, yc1, mean1, std1, visited, pointer) bkg_stats = calc_background_stats(scidata2, xc1, yc1, int(400 / scale), int(500 / scale)) print(bkg_stats) if scidata2[int(yc1), int(xc1)] - bkg_stats[0] > 20: bkg_stats_arr.append([bkg_stats]) #print("%f, %f" % (xc1, yc1)) coldefs = calc_arcsec_stats(scidata2, xc1, yc1, counter, color) bkg_col = fits.Column(name=str(round(float(400/scale * 0.396), 3)) + '-' + str(round(float(500/scale * 0.396), 3)) + ' arcsec bkg stats', format='D', unit='counts', array=bkg_stats) coldefs.add_col(bkg_col) hdu = fits.BinTableHDU.from_columns(coldefs) hdu.writeto(filedir + index + '_' + str(counter) + '.fits', overwrite=True) #print(counter) counter += 1 else: print("ERASED") except: print('EXCEPTION') continue
def PS(self, z, region_size=1., radius_in=0., radius_out=1.): kpcp = cosmo.kpc_proper_per_arcmin(z).value Mpcpix = 1000. / kpcp / self.data.pixsize # 1 Mpc in pixel regsizepix = region_size * Mpcpix ###################### # Set the scales ###################### minscale = 2 # minimum scale of 2 pixels maxscale = regsizepix / 2. scale = np.logspace(np.log10(minscale), np.log10(maxscale), 10) # 10 scale logarithmically spaced sckpc = scale * self.data.pixsize * kpcp kr = 1. / np.sqrt(2. * np.pi**2) * np.divide( 1., scale) # Eq. A5 of Arevalo et al. 2012 ###################### # Define the region where the power spectrum will be extracted ###################### fmask = fits.open('mask.fits') mask = fmask[0].data data_size = mask.shape fmask.close() y, x = np.indices(data_size) rads = np.hypot(y - data_size[0] / 2., x - data_size[1] / 2.) region = np.where( np.logical_and( np.logical_and(rads > radius_in * Mpcpix, rads <= radius_out * Mpcpix), mask > 0.0)) ###################### # Extract the PS from the various images ###################### nsc = len(scale) ps, psnoise, amp, eamp = np.empty(nsc), np.empty(nsc), np.empty( nsc), np.empty(nsc) vals = [] nreg = 20 # Number of subregions for bootstrap calculation for i in range(nsc): # Read images fco = fits.open('conv_scale_%d_kpc.fits' % (int(np.round(sckpc[i])))) convimg = fco[0].data.astype(float) fco.close() fmod = fits.open('conv_model_%d_kpc.fits' % (int(np.round(sckpc[i])))) convmod = fmod[0].data.astype(float) fmod.close() print('Computing the power at scale', sckpc[i], 'kpc') ps[i], psnoise[i], vps = calc_ps(region, convimg, convmod, kr[i], nreg) vals.append(vps) # Bootstrap the data and compute covariance matrix print('Computing the covariance matrix...') nboot = int(1e4) # number of bootstrap resamplings cov = do_bootstrap(vals, nboot) # compute eigenvalues of covariance matrix to verify that the matrix is positive definite la, v = np.linalg.eig(cov) print('Eigenvalues: ', la) eps = np.empty(nsc) for i in range(nsc): eps[i] = np.sqrt(cov[i, i]) amp = np.sqrt(np.abs(ps) * 2. * np.pi * kr**2 / cf) eamp = 1. / 2. * np.power(np.abs(ps) * 2. * np.pi * kr**2 / cf, -0.5) * 2. * np.pi * kr**2 / cf * eps self.kpix = kr self.k = 1. / np.sqrt(2. * np.pi**2) * np.divide(1., sckpc) self.ps = ps self.eps = eps self.psnoise = psnoise self.amp = amp self.eamp = eamp self.cov = cov
if 'RA' in prihdr.comments['CD1_1']: raDegColPix = prihdr['CD1_1'] raDegRowPix = prihdr['CD1_2'] decDegColPix = prihdr['CD2_1'] decDegRowPix = prihdr['CD2_2'] else: decDegColPix = prihdr['CD1_1'] decDegRowPix = prihdr['CD1_2'] raDegColPix = prihdr['CD2_1'] raDegRowPix = prihdr['CD2_2'] tot_ra = raDegRowPix * 42 tot_dec = decDegColPix * 42 scale = cosmo.kpc_proper_per_arcmin(0.37) * 60. x_kpc = scale * tot_ra y_kpc = scale * tot_dec print(lowest_redshift) refdirs = os.listdir( '/data/marvels/billzhu/Reference PSF Subtract/0.37 - 0.55/') # For each quasar, both absorption and reference, calculate the distance, kpc across the image, and scale for f in refdirs: index = int(f.split('_')[0]) if index == lindex: continue
length = 0 mean, median, stddev = sigma_clipped_stats(scidata, sigma=3.0, iters=5) for i in range(len(scidata)): for j in range(len(scidata[0])): if distance(i, j, 50, 50) <= radius1 and distance(i, j, 50, 50) >= radius2: length += 1 return length #load the data here: hdulist = fits.open('MGSUBComb62-g.fit') scidata = hdulist[0].data.astype(float) scale = cosmo.kpc_proper_per_arcmin(0.48) * u.arcmin / u.kiloparsec * 0.396 / 60 #kpc/pixel #get the size of image: width = scidata.shape[0] max_boundary = width/scale*0.5 print(max_boundary) fig, ax = plt.subplots() boundaries = [3, 10, 13, 16, 19, 27, 37, 51, 67, 100, 140] #kpc #calcluate the sky error here: sky_count = photoncount(scidata, boundaries[-1] / scale, max_boundary/ scale) sky_flux = sky_count / 2000 / 10**8
def Mgas(self, radius, plot=True, outfile=None): if self.samples is None or self.z is None or self.cf is None: print('Error: no gas density profile found') return prof = self.profile kpcp = cosmo.kpc_proper_per_arcmin(self.z).value rkpc = prof.bins * kpcp erkpc = prof.ebins * kpcp nhconv = mh * mu_e * nhc * kpc**3 / msun # Msun/kpc^3 rad = prof.bins sourcereg = np.where(rad < self.bkglim) transf = 4. * (1. + self.z)**2 * ( 180. * 60.)**2 / np.pi / 1e-14 / nhc / Mpc * 1e3 pardens = list_params_density(rad, sourcereg, self.z) Kdens = calc_density_operator(rad, sourcereg, pardens, self.z) # All gas density profiles alldens = np.sqrt( np.dot(Kdens, np.exp(self.samples.T)) / self.cf * transf) # [0:nptfit, :] # Matrix containing integration volumes volmat = np.repeat(4. * np.pi * rkpc**2 * 2. * erkpc, alldens.shape[1]).reshape(len(prof.bins), alldens.shape[1]) # Compute Mgas profile as cumulative sum over the volume mgas = np.cumsum(alldens * nhconv * volmat, axis=0) # Interpolate at the radius of interest f = interp1d(rkpc, mgas, axis=0) mgasdist = f(radius) mg, mgl, mgh = np.percentile(mgasdist, [50., 50. - 68.3 / 2., 50. + 68.3 / 2.]) if plot: plt.clf() fig = plt.figure(figsize=(13, 10)) ax_size = [0.14, 0.12, 0.85, 0.85] ax = fig.add_axes(ax_size) ax.minorticks_on() ax.tick_params(length=20, width=1, which='major', direction='in', right='True', top='True') ax.tick_params(length=10, width=1, which='minor', direction='in', right='True', top='True') for item in (ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(22) # plt.yscale('log') plt.hist(mgasdist, bins=30) plt.xlabel('$M_{gas} [M_\odot]$', fontsize=40) plt.ylabel('Frequency', fontsize=40) if outfile is not None: plt.savefig(outfile) else: plt.show() return mg, mgl, mgh
def within_radius_prob(ra, dec, z, zerr, neighbors, search_rad=2 * u.Mpc, zerr_cutoff=False, least_prob=.67, zdif=.1): ''' Finds all galaxies within search_rad Mpc of bent double search_rad: search radius, in Mpc zerr_cutoff: maximum allowed redshift error least_prob: lowest allowed probability for two galaxies to be at same z ''' # cuts neighbors with z difference larger than 1 neighbors = neighbors[np.where(abs(z - neighbors['photo_z']) < zdif)] # Find the value that corresponds to search_rad in arcsec kpcperarcmin = cosmo.kpc_proper_per_arcmin(z) rad_arcsec = (search_rad / kpcperarcmin).to(u.arcsec).value # Only select galaxies that fall within search_rad and update neighbors table near_sky = np.where( angsep(ra, dec, neighbors['RA'], neighbors['DEC']) * 3600 < rad_arcsec)[0] neighbors = neighbors[near_sky] # If a redshift error is specified, excludes galaxies with errors that fall above this if zerr_cutoff: neighbors = neighbors[np.where( neighbors['photo_zerr'] < zerr_cutoff)[0]] gi = [] for i in range(len(neighbors)): # Save the redshifts and errors to 2 different arrays vs = np.array([z, neighbors[i]['photo_z']]) es = np.array([zerr, neighbors[i]['photo_zerr']]) # Create array of possible start and end points in plot pos = [ vs[0] - es[0] * 4, vs[0] + es[0] * 4, vs[1] + es[1] * 4, vs[1] - es[1] * 4 ] # Create integral array using min and max of previous list x = np.linspace(min(pos), max(pos), 10000) # Create gaussian distributions g1 = norm.pdf(x, vs[0], es[0]) g2 = norm.pdf(x, vs[1], es[1]) # Define max probability to normalize by max_p = g1 * norm.pdf(x, vs[0], es[1]) max_integ = np.trapz(max_p, x) # Calculate normalized probability prob = np.trapz(g1 * g2, x) / max_integ if prob > least_prob: gi.append(i) neighbors = neighbors[gi] return neighbors
def absolute_FoF_prob(bd_ra, bd_dec, bd_z, bd_zerr, neighbors, search_rad=200., zerr_cutoff=False, least_prob=.67, zdif=.1, cutoff=5000.): ''' FoF algorithm that applies probability-based solution in redshift space search_rad: linking length, in kpc zerr_cutoff: maximum allowed redshift error least_prob: lowest allowed probability for two galaxies to be at same z ''' # cuts neighbors with z difference larger than zdif neighbors = neighbors[np.where(abs(bd_z - neighbors['photo_z']) < zdif)] # implements redshift error cutoff if specified if zerr_cutoff: neighbors = neighbors[np.where( neighbors['photo_zerr'] < zerr_cutoff)[0]] # Calculates initial search radius init_dist = search_rad / (cosmo.kpc_proper_per_arcmin(bd_z).value * 60.) # Find galaxies that fall within this radius of bent double group_ni = np.where( angsep(bd_ra, bd_dec, neighbors['RA'], neighbors['DEC']) < init_dist )[0] group_ni = list(group_ni) max_dist = cutoff / (cosmo.kpc_proper_per_arcmin(bd_z).value * 60.) # loops through indices of galaxies within search radius of bent double i = False for n in group_ni: i = neighbors[n] # Calculate new search distance from selected neighbor sdist = search_rad / (cosmo.kpc_proper_per_arcmin( (i['photo_z'] + bd_z) / 2).value * 60.) if cutoff: i_neighbors = np.where((angsep(i['RA'], i['DEC'], neighbors['RA'], neighbors['DEC']) < sdist) & (angsep(bd_ra, bd_dec, neighbors['RA'], neighbors['DEC']) < max_dist))[0] else: # Find neighbors of this neighbor i_neighbors = np.where( angsep(i['RA'], i['DEC'], neighbors['RA'], neighbors['DEC']) < sdist)[0] # Loops through all new neighbors and adds them to list of neighbors to loop through if they aren't already in that list for j in i_neighbors: if j not in group_ni: group_ni.append(j) neighbors = neighbors[group_ni] # implement probability selection on remaining neighbors list gi = [] for i in range(len(neighbors)): # Save the redshifts and errors to 2 different arrays vs = np.array([bd_z, neighbors[i]['photo_z']]) es = np.array([bd_zerr, neighbors[i]['photo_zerr']]) # Create array of possible start and end points in plot pos = [ vs[0] - es[0] * 4, vs[0] + es[0] * 4, vs[1] + es[1] * 4, vs[1] - es[1] * 4 ] # Create integral array using min and max of previous list x = np.linspace(min(pos), max(pos), 10000) # Create gaussian distributions g1 = norm.pdf(x, vs[0], es[0]) g2 = norm.pdf(x, vs[1], es[1]) # Define max probability to normalize by max_p = g1 * norm.pdf(x, vs[0], es[1]) max_integ = np.trapz(max_p, x) # Calculate normalized probability prob = np.trapz(g1 * g2, x) / max_integ if prob > least_prob: gi.append(i) neighbors = neighbors[gi] # final table of neighbors return neighbors
if cand_i in candidate_hosts: continue # some events do not have complete RA/Dec info. if not (cand_info_i['ra'] and cand_info_i['dec']): continue # construct coord crd_i = SkyCoord(ra=cand_info_i['ra'], dec=cand_info_i['dec'], unit=('hour', 'deg')) # New 190506: use 30 kpc redshift cut. zred_i = np.abs(float(cand_info_i['redshift'])) try: ksc_i = cosmo.kpc_proper_per_arcmin(zred_i).value / 60. # kpc/asec rad_i = min(30. / ksc_i, 120.) except: rad_i = 120. # search catalogs. (30" limit) tab_list_i = Vizier.query_region(crd_i, radius=rad_i * u.arcsec, catalog=vizier_cats) sources_i = OrderedDict([('search_radius', rad_i)]) for cat_name_i, tab_i in tab_list_i._dict.items(): sources_i[cat_name_i] = list() for rec_j in tab_i: sources_i[cat_name_i].append(as_tuple(rec_j))
fit, ax = plt.subplots() hdulist = fits.open('2175SUBComb22-g.fit') scidata = hdulist[0].data.astype(float) mean, median, stddev = sigma_clipped_stats(scidata, sigma=3.0, iters=5) print(median) #scidata -= median spline = interpolate.interp2d(np.arange(len(scidata)), np.arange(len(scidata)), scidata) scidata = spline(np.arange(0, len(scidata), 0.1), np.arange(0, len(scidata), 0.1)) #scidata *= 1.15 SBarray = [] outter = [] scale = cosmo.kpc_proper_per_arcmin( 1.44) * u.arcmin / u.kiloparsec * 0.396 / 60 print(scale) #print(cosmo.kpc_proper_per_arcmin(1.03)) #print(cosmo.kpc_proper_per_arcmin(5)) #print("%s" % ('%.4E' % Decimal(photoncount(scidata, 250, 200)))) #print(-2.5 * math.log10(photoncount(scidata, 250, 200)) + 2.5 * math.log10(scale**2)) #print(-2.5 * math.log10(2/(10**8 * 2000)) + 2.5 * np.log10(scale**2)) #print(scale) for j in range(1, 12): #print(5 * j / scale) f = photoncount(scidata, 60 * (25 / 3)**((1.0 / 8) * j) / scale, 60 * (25 / 3)**((1.0 / 8) * (j - 1)) / scale)
warnings.simplefilter("ignore", RuntimeWarning) version = '0_1_2' nsa_not_gz = fits.getdata( '../fits/nsa_v{0}_not_in_GZ_all_in_one.fits'.format(version), 1) N = len(nsa_not_gz) t = Table() t['coords.0'] = nsa_not_gz['RA'] t['coords.1'] = nsa_not_gz['DEC'] # Calculate absolute size in kpc size = WMAP9.kpc_proper_per_arcmin(nsa_not_gz['Z']).to( u.kpc / u.arcsec) * (nsa_not_gz['PETROTHETA'] * u.arcsec) size[nsa_not_gz['Z'] < 0] = -99. * u.kpc # Calculate absolute and apparent magnitude absmag_r = nsa_not_gz['ABSMAG'][:, 4].astype(float) mag = 22.5 - 2.5 * np.log10(nsa_not_gz['NMGY']).astype(float) mag[~np.isfinite(mag)] = -99. fluxarr = nsa_not_gz['PETROFLUX'][:, 4].astype(float) url_stub = "http://www.galaxyzoo.org.s3.amazonaws.com/subjects/sdss_lost_set" for imgtype in ('standard', 'inverted', 'thumbnail'): lst = url_stub + '/{0}/'.format(imgtype) + nsa_not_gz['IAUNAME'] + '.jpeg' t['location.{0}'.format(imgtype)] = lst
def MexicanHat(self, modimg_file, z, region_size=1., factshift=1.5): # Function to compute Mexican Hat convolution # Inputs: # z: redshift # region_size: size of the region of interest in Mpc # factshift: size of the border around the region imgo = self.data.img expo = self.data.exposure bkg = self.data.bkg pixsize = self.data.pixsize # Read model image fmod = fits.open(modimg_file) modimg = fmod[0].data.astype(float) # Define the mask nonz = np.where(expo > 0.0) masko = np.copy(expo) masko[nonz] = 1.0 imgt = np.copy(imgo) noexp = np.where(expo == 0.0) imgt[noexp] = 0.0 # Set the region of interest x_c = self.profile.cra # Center coordinates y_c = self.profile.cdec kpcp = cosmo.kpc_proper_per_arcmin(z).value Mpcpix = 1000. / kpcp / pixsize # 1 Mpc in pixel regsizepix = region_size * Mpcpix self.regsize = regsizepix minx = int(np.round(x_c - factshift * regsizepix)) maxx = int(np.round(x_c + factshift * regsizepix + 1)) miny = int(np.round(y_c - factshift * regsizepix)) maxy = int(np.round(y_c + factshift * regsizepix + 1)) if minx < 0: minx = 0 if miny < 0: miny = 0 if maxx > self.data.axes[1]: maxx = self.data.axes[1] if maxy > self.data.axes[0]: maxy = self.data.axes[0] img = np.nan_to_num( np.divide(imgt[miny:maxy, minx:maxx], modimg[miny:maxy, minx:maxx])) mask = masko[miny:maxy, minx:maxx] self.size = img.shape self.mask = mask fmod[0].data = mask fmod.writeto('mask.fits', overwrite=True) # Simulate perfect model with Poisson noise randmod = np.random.poisson(modimg[miny:maxy, minx:maxx]) simmod = np.nan_to_num(np.divide(randmod, modimg[miny:maxy, minx:maxx])) # Set the scales minscale = 2 # minimum scale of 2 pixels maxscale = regsizepix / 2. # at least 4 resolution elements on a side scale = np.logspace(np.log10(minscale), np.log10(maxscale), 10) # 10 scale logarithmically spaced sckpc = scale * pixsize * kpcp # Convolve images for i in range(len(scale)): sc = scale[i] print('Convolving with scale', sc) convimg, convmod = calc_mexicanhat(sc, img, mask, simmod) # Save image fmod[0].data = convimg fmod.writeto('conv_scale_%d_kpc.fits' % (int(np.round(sckpc[i]))), overwrite=True) fmod[0].data = convmod fmod.writeto('conv_model_%d_kpc.fits' % (int(np.round(sckpc[i]))), overwrite=True) fmod.close()
gzpath = '/Users/willettk/Astronomy/Research/GalaxyZoo' version = '1_0_0' nsa_decals = fits.getdata( '%s/decals/nsa_decals_v%s_goodimgs.fits' % (gzpath, version), 1) N = len(nsa_decals) t = Table() t['coords.0'] = nsa_decals['RA'] t['coords.1'] = nsa_decals['DEC'] # Calculate absolute size in kpc size = [ WMAP9.kpc_proper_per_arcmin(z).to(u.kpc / u.arcsec) * (r * u.arcsec) if z > 0 else -99. * u.kpc for z, r in zip(nsa_decals['Z'], nsa_decals['PETROTHETA']) ] # Calculate absolute and apparent magnitude absmag_r = [float(x[4]) for x in nsa_decals['ABSMAG']] mag_faruv = [22.5 - 2.5 * np.log10(x[0]) for x in nsa_decals['NMGY']] mag_nearuv = [22.5 - 2.5 * np.log10(x[1]) for x in nsa_decals['NMGY']] mag_u = [22.5 - 2.5 * np.log10(x[2]) for x in nsa_decals['NMGY']] mag_g = [22.5 - 2.5 * np.log10(x[3]) for x in nsa_decals['NMGY']] mag_r = [22.5 - 2.5 * np.log10(x[4]) for x in nsa_decals['NMGY']] mag_i = [22.5 - 2.5 * np.log10(x[5]) for x in nsa_decals['NMGY']] mag_z = [22.5 - 2.5 * np.log10(x[6]) for x in nsa_decals['NMGY']] sizearr = [s.value for s in size]
def scale_based_on_redshift(z): scale_now = 1000 * (1 / 0.2) * (1 / 1000) * (60) * 1 / ( cosmo.kpc_proper_per_arcmin(z).value) return scale_now
hdu.writeto(outfile, overwrite=True) outfile = 'imaging/out_sigma_convolved_' + str(sdss) + '.fits' hdu = fits.PrimaryHDU((camera_data_sigma)) hdu_number = 0 hdu.writeto(outfile, overwrite=True) hdr = fits.getheader(outfile, hdu_number) hdr['EXPTIME'] = 1 hdr['EXPTIME'] hdu.writeto(outfile, overwrite=True) # 24 μm; 0.396 arcseconds pixel-1 pixelscale = 0.396 redshift = df['redshift'][i] kpc_arcmin = cosmo.kpc_proper_per_arcmin(redshift) #insert the redshift # Divide the pixelscale (arcsec) by kpc/arcsec to get kpc size of pixels kpc_pix = pixelscale / (kpc_arcmin.value / 60 ) #This is the number of kpc/pixel # Running sextractor before galfit to determine good guess input parameters write_sex_default(str(sdss)) run_sex(str(sdss)) sex_out = sex_params_galfit(str(sdss)) #output if there are 2 bright sources aka 2 stellar bulges: #return sex_pet_r, minor, flux, x_max, y_max, n_bulges, eff_radius_1, B_A_1, PA_1, back_1, x_max_2, y_max_2, mag_max_2, eff_radius_2, B_A_2, PA_2 # #output if 1 bulge: #return sex_pet_r, minor, flux, x_max, y_max, n_bulges, eff_radius_1, B_A_1, PA_1, back_1
nearest_src, survey_coverage = OrderedDict(), OrderedDict() # for candidate events for event_i, event_info_i in tqdm(cand_events.items(), total=len(cand_events)): # nearby sources and dataset coverage for this event: srcs_i, coverage_i = list(), list() # construct coord crd_i = SkyCoord(ra=event_info_i['ra'], dec=event_info_i['dec'], unit=('hour', 'deg')) # scale of projected distance kpc_per_asec_i = cosmo.kpc_proper_per_arcmin( \ float(event_info_i['redshift'])).value / 60. # for Vizier sources: tabs_i = cand_hosts_v[event_i] for cat_j, tab_j in tabs_i.items(): if cat_j == 'search_radius': continue ra_colid_j, dec_colid_j = radec_cols[cat_j][0] radec_units_j = radec_cols[cat_j][1] for rec_k in tab_j: try: crd_k = SkyCoord(ra=rec_k[ra_colid_j], dec=rec_k[dec_colid_j], unit=radec_units_j) except: continue # not my fault :)
def begin(index): print(index) i = int(index.split('-')[0]) #mgi = int(index.split('-')[1]) color = index.split('-')[1] try: #hdulist = fits.open('/data/marvels/billzhu/2175 Dataset/' + color + '/' + str(index) + '.fit') #hdulist = fits.open('/data/marvels/billzhu/2175 Dataset/' + color + '/' + str(index) + '.fit') hdulist = fits.open( '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '.fit') #hdulist = fits.open('/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + str(mgi) + '-' + color + '.fit') scidata = hdulist[0].data.astype(float) #obj_table = Table.read('/data/marvels/billzhu/2175 Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/2175 Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) obj_table = Table.read('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/Reference Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) test = obj_table['colc'][0] except: print(str(i) + ' Can not get table') return line_data = linecache.getline('Full Data.txt', i).split() obj_id = int(line_data[52]) """ print(dr12_table[mgi]['OBJ_ID']) obj_id = objid_extract(int(dr12_table[mgi]['OBJ_ID']), False)['id'] print("%f, %f" % (obj_id, len(obj_table))) if obj_id >= len(obj_table): print('obj_id over table length') return """ quasar = obj_table[obj_id - 1] pointer = 0 if color == 'g': pointer = 1 if color == 'r': pointer = 2 if color == 'i': pointer = 3 if color == 'z': pointer = 4 if color == 'u': pointer = 0 chunk_size = 50 try: hdulist[0].header.append(('XCOORD', int( quasar['colc'][pointer]), 'x coordinate of quasar in image'), end=True) hdulist[0].header.append(('YCOORD', int( quasar['rowc'][pointer]), 'y coordinate of quasar in image'), end=True) hdulist[0].header.append( ('ID', obj_id, 'id of the quasar in the fpObj file'), end=True) hdulist[0].header.append( ('MEAN_BKG', obj_id, '3 sigma 5 iters mean background level'), end=True) hdulist[0].header.append( ('MED_BKG', obj_id, '3 sigma 5 iters median background level'), end=True) hdulist[0].header.append( ('STD_BKG', obj_id, '3 sigma 5 iters std of background level'), end=True) hdulist[0].header.append(('ZABS', obj_id, 'redshift of absorber'), end=True) except: print(str(i) + ' Unable to get coords') #return if inbounds(quasar['colc'][pointer] + chunk_size + 6, quasar['rowc'][pointer] + chunk_size + 6) and inbounds( quasar['colc'][pointer] - chunk_size - 5, quasar['rowc'][pointer] - chunk_size - 5): xc = quasar['colc'][pointer] yc = quasar['rowc'][pointer] mean1, median1, std1 = sigma_clipped_stats(scidata, sigma=3.0, iters=5) # Approximate mag 20 counts values for griz bands """ mag20 = 0 if color == 'g': mag20 = 2500 if color == 'r': mag20 = 1900 if color == 'i': mag20 = 1400 if color == 'z': mag20 = 300 """ pstable = Table.read( '/data/marvels/billzhu/MG II psField/0.37 - 0.55/' + str(i) + '_psField.fit', hdu=7) mag20 = pstable['flux20'] RA = float(linecache.getline('Full Data.txt', i).split()[1]) DEC = float(linecache.getline('Full Data.txt', i).split()[2]) redshift = 0 for j in range(len(mgtable)): if abs(mgtable['RA'][j] - RA ) < 0.0001 and mgtable['DEC'][j] - DEC < 0.0001 and mgtable[ 'ZABS'][j][0] >= 0.37 and mgtable['ZABS'][j][0] < 0.55: redshift = mgtable['ZABS'][j][0] break if redshift == 0: return #redshift = float(line_data[3]) thresh = thresh_mags[int(redshift / 0.005)] thresh = mag20 * 10**(8 - thresh / 2.5) image = scidata[int(yc - chunk_size - 5):int(yc + chunk_size + 6), int(xc - chunk_size - 5):int(xc + chunk_size + 6)] scidata = checkInner(scidata, obj_table, xc, yc, mean1, std1, pointer, thresh) scale = cosmo.kpc_proper_per_arcmin( redshift) * u.arcmin / u.kiloparsec * 0.396 / 60 print(thresh) image -= calc_background(scidata, xc, yc, int(400 / scale), int(500 / scale)) #mean1, median1, std1 = sigma_clipped_stats(image, sigma=3.0, iters=10) print("%f, %f" % (quasar['colc'][pointer], quasar['rowc'][pointer])) print("%f, %f, %f" % (mean1, median1, std1)) #image -= quasar['sky'][pointer] + 1000 spline = interpolate.interp2d( np.arange(int(xc - chunk_size - 5), int(xc + chunk_size + 6)), np.arange(int(yc - chunk_size - 5), int(yc + chunk_size + 6)), image) xrang = np.arange(xc - chunk_size, xc + chunk_size + 1) yrang = np.arange(yc - chunk_size, yc + chunk_size + 1) if len(xrang) > 2 * chunk_size + 1: xrang = xrang[:-1] if len(yrang) > 2 * chunk_size + 1: yrang = yrang[:-1] shifted = spline(xrang, yrang) shifted = perimeter(shifted, mean1, std1) #mean1, median1, std1 = sigma_clipped_stats(shifted, sigma=3.0, iters=10) #shifted -= median1 hdulist[0].header['XCOORD'] = xc hdulist[0].header['YCOORD'] = yc hdulist[0].header['MEAN_BKG'] = mean1 hdulist[0].header['MED_BKG'] = median1 hdulist[0].header['STD_BKG'] = std1 hdulist[0].header['ZABS'] = redshift try: #shifted /= hdulist[0].header['NMGY'] #fits.writeto('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit', shifted, hdulist[0].header, overwrite = True) #fits.writeto('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit', shifted, hdulist[0].header, overwrite = True) fits.writeto( '/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit', shifted, hdulist[0].header, overwrite=True) #fits.writeto('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + str(mgi) + '-' + color + '_REF.fit', shifted, hdulist[0].header, overwrite = True) except: print('Header is corrupt') return else: print('Can not cut in bounds') return
def jwst_image_figure(alph=0.1, Q=1.0, dx=0, dy=0, Npix=1000.0, do_psf=False, cs='CAMERA5-BROADBAND-NONSCATTER', label=''): nref11_z2 = 'nref11/RD0020/broadbandz.fits' nref11_z3 = 'nref11/RD0017/broadbandz.fits' extra_z2 = 'nref11_refine200kpc_z4to2/RD0020/broadbandz.fits' extra_z3 = 'nref11_refine200kpc_z4to2/RD0017/broadbandz.fits' nref11_z2_o = snapshot() nref11_z3_o = snapshot() extra_z2_o = snapshot() extra_z3_o = snapshot() rf = 21 #F200W 0.75 gf = 19 #F115W 0.40 bf = 4 #ACS/F775W rest far-UV 0.25 fudge = 1.0 bfact = 1.5 gfact = 0.5 / (0.115**2) rfact = 2.0 / (0.200**2) mid = np.int64(1000.0 / 2) if Npix is not None: delt = np.int64(Npix / 2) nref11_z2_o.r = fudge * pyfits.open( nref11_z2)[cs].data[rf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * rfact nref11_z2_o.g = fudge * pyfits.open( nref11_z2)[cs].data[gf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * gfact nref11_z2_o.b = fudge * pyfits.open( nref11_z2)[cs].data[bf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * bfact nref11_z3_o.r = fudge * pyfits.open( nref11_z3)[cs].data[rf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * rfact nref11_z3_o.g = fudge * pyfits.open( nref11_z3)[cs].data[gf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * gfact nref11_z3_o.b = fudge * pyfits.open( nref11_z3)[cs].data[bf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * bfact print(np.max(nref11_z2_o.r), np.max(nref11_z2_o.b)) extra_z2_o.r = fudge * pyfits.open(extra_z2)[cs].data[rf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * rfact extra_z2_o.g = fudge * pyfits.open(extra_z2)[cs].data[gf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * gfact extra_z2_o.b = fudge * pyfits.open(extra_z2)[cs].data[bf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * bfact extra_z3_o.r = fudge * pyfits.open(extra_z3)[cs].data[rf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * rfact extra_z3_o.g = fudge * pyfits.open(extra_z3)[cs].data[gf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * gfact extra_z3_o.b = fudge * pyfits.open(extra_z3)[cs].data[bf, dx + mid - delt:dx + mid + delt, dy + mid - delt:dy + mid + delt] * bfact print(extra_z2_o.r.shape) kpc_per_arcsec_z2 = WMAP9.kpc_proper_per_arcmin(2.0).value / 60.0 kpc_per_arcsec_z3 = WMAP9.kpc_proper_per_arcmin(2.75).value / 60.0 kpc_per_pix = 0.05 if do_psf == False: filen = 'images_hires_' + label + '_' + cs[0:7] + '.pdf' nref11_z2_o.rgbthing = make_color_image.make_interactive( nref11_z2_o.b, nref11_z2_o.g, nref11_z2_o.r, alph, Q) nref11_z3_o.rgbthing = make_color_image.make_interactive( nref11_z3_o.b, nref11_z3_o.g, nref11_z3_o.r, alph, Q) extra_z2_o.rgbthing = make_color_image.make_interactive( extra_z2_o.b, extra_z2_o.g, extra_z2_o.r, alph, Q) extra_z3_o.rgbthing = make_color_image.make_interactive( extra_z3_o.b, extra_z3_o.g, extra_z3_o.r, alph, Q) else: filen = 'images_psf_' + label + '_' + cs[0:7] + '.pdf' psf_fwhm_arcsec = 0.05 fwhm_pixels_z2 = psf_fwhm_arcsec * kpc_per_arcsec_z2 / kpc_per_pix fwhm_pixels_z3 = psf_fwhm_arcsec * kpc_per_arcsec_z3 / kpc_per_pix print('KPC per as, z2: ', kpc_per_arcsec_z2) print('FWHM arcsec: ', psf_fwhm_arcsec) print('FWHM pixels z2: ', fwhm_pixels_z2) print('FWHM pixels z3: ', fwhm_pixels_z3) nref11_z2_o.rgbthing = make_color_image.make_interactive( nref11_z2_o.b, nref11_z2_o.g, nref11_z2_o.r, alph, Q, fwhm_pixels=fwhm_pixels_z2) nref11_z3_o.rgbthing = make_color_image.make_interactive( nref11_z3_o.b, nref11_z3_o.g, nref11_z3_o.r, alph, Q, fwhm_pixels=fwhm_pixels_z3) extra_z2_o.rgbthing = make_color_image.make_interactive( extra_z2_o.b, extra_z2_o.g, extra_z2_o.r, alph, Q, fwhm_pixels=fwhm_pixels_z2) extra_z3_o.rgbthing = make_color_image.make_interactive( extra_z3_o.b, extra_z3_o.g, extra_z3_o.r, alph, Q, fwhm_pixels=fwhm_pixels_z3) #high-res version first spinelw = 1.0 f1 = pyplot.figure(figsize=(10.0, 10.0), dpi=200) pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0, wspace=0.0, hspace=0.0) axi = f1.add_subplot(2, 2, 1) axi.set_xticks([]) axi.set_yticks([]) axi.imshow(nref11_z2_o.rgbthing, interpolation='nearest', origin='lower') axi.annotate('HST-ACS/F775W', (0.25, 0.94), xycoords='axes fraction', color='Blue', ha='center', va='center', fontsize=20) axi.annotate('JWST-NC/F115W', (0.25, 0.88), xycoords='axes fraction', color='Green', ha='center', va='center', fontsize=20) axi.annotate('JWST-NC/F200W', (0.25, 0.82), xycoords='axes fraction', color='Red', ha='center', va='center', fontsize=20) #axi.annotate(anfk,(0.5,0.15),xycoords='axes fraction',color=anc,ha='center',va='center',fontsize=12,backgroundcolor='Black') for ss in axi.spines: s = axi.spines[ss] s.set_color('white') s.set_linewidth(spinelw) axi = f1.add_subplot(2, 2, 2) axi.set_xticks([]) axi.set_yticks([]) axi.imshow(extra_z2_o.rgbthing, interpolation='nearest', origin='lower') #10kpc scale bar if label == 'zoom': axi.plot([delt - 10, delt + 10], [delt / 3, delt / 3], marker='None', linestyle='solid', color='White', lw=5) axi.annotate('1 kpc', (delt, delt / 4), xycoords='data', color='White', ha='center', va='center', fontsize=20) axi.annotate('z = 2', (0.5 * delt, 2 * delt * 0.90), xycoords='data', color='White', ha='center', va='center', fontsize=20) else: axi.plot([delt - 100, delt + 100], [delt / 3, delt / 3], marker='None', linestyle='solid', color='White', lw=5) axi.annotate('10 kpc', (delt, delt / 4), xycoords='data', color='White', ha='center', va='center', fontsize=20) axi.annotate('z = 2', (0.5 * delt, 2 * delt * 0.90), xycoords='data', color='White', ha='center', va='center', fontsize=20) if do_psf == False: axi.annotate('No PSF', (1.5 * delt, 2 * delt * 0.90), xycoords='data', color='White', ha='center', va='center', fontsize=20) else: axi.annotate('0.05" FWHM', (1.5 * delt, 2 * delt * 0.90), xycoords='data', color='White', ha='center', va='center', fontsize=20) for ss in axi.spines: s = axi.spines[ss] s.set_color('white') s.set_linewidth(spinelw) axi = f1.add_subplot(2, 2, 3) axi.imshow(nref11_z3_o.rgbthing, interpolation='nearest', origin='lower') axi.set_xticks([]) axi.set_yticks([]) axi.annotate('natural refine', (delt, 0.25 * delt), xycoords='data', color='White', ha='center', va='center', fontsize=20) for ss in axi.spines: s = axi.spines[ss] s.set_color('white') s.set_linewidth(spinelw) axi = f1.add_subplot(2, 2, 4) axi.set_xticks([]) axi.set_yticks([]) axi.imshow(extra_z3_o.rgbthing, interpolation='nearest', origin='lower') axi.annotate('z = 2.75', (0.5 * delt, 2 * delt * 0.90), xycoords='data', color='White', ha='center', va='center', fontsize=20) axi.annotate('forced refine', (delt, 0.25 * delt), xycoords='data', color='White', ha='center', va='center', fontsize=20) for ss in axi.spines: s = axi.spines[ss] s.set_color('white') s.set_linewidth(spinelw) pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0, wspace=0.0, hspace=0.0) f1.savefig(filen, dpi=200) pyplot.close(f1) return
import numpy as np import scipy as sp import pickle import matplotlib.pyplot as plt import matplotlib.patches as mpatches from matplotlib.ticker import MultipleLocator, FormatStrFormatter from astropy.cosmology import WMAP9 as cosmo from astropy import units as u scale = cosmo.kpc_proper_per_arcmin( 0.37) * u.arcmin / u.kiloparsec * 0.396 / 60 b400 = 400 / scale * 0.396 b500 = 500 / scale * 0.396 b10 = 10 / scale * 0.396 b100 = 100 / scale * 0.396 scale = cosmo.kpc_proper_per_arcmin( 0.55) * u.arcmin / u.kiloparsec * 0.396 / 60 t400 = 400 / scale * 0.396 t500 = 500 / scale * 0.396 fig, ax = plt.subplots() majorLocator = MultipleLocator(10) majorFormatter = FormatStrFormatter('%.1f') minorLocator = MultipleLocator(2.5) minorFormatter = FormatStrFormatter('%f') with open('g-bkg.txt', 'rb') as fp: arr = pickle.load(fp) with open('g2-bkg.txt', 'rb') as fp2:
for s in survey: zcol = 'z_%s_%s' % (zt,s) mask = np.isfinite(data[zcol]) & (data[zcol] > 0.) & np.logical_not(prevmask) blankz[mask] = data[zcol][mask] prevmask = mask return blankz zarr = choose_redshift(data) # Calculate absolute size in kpc from astropy.cosmology import WMAP9 from astropy import units as u size = [WMAP9.kpc_proper_per_arcmin(z).to(u.kpc/u.arcsec) * (r * u.arcsec) if z > 0 else -99. * u.kpc for z,r in zip(zarr,data['KRON_RADIUS_I'])] #re = 0.162 * data['FLUX_RADIUS1_B_1']**1.87 absmag = [zmag * u.mag - WMAP9.distmod(z) if z > 0 else -99. * u.mag for z,zmag in zip(zarr,data['Z_1'])] data.rename_column('hubble_id_1' , 'hubble_id' ) data.rename_column('coords_ra_1' , 'ra') data.rename_column('coords_dec_1' , 'dec') data.rename_column('KRON_RADIUS_I' , 'kron_radius_I') data.rename_column('FLUX_RADIUS1_I', 'flux_radius1_I') data.rename_column('B_1' , 'B') data.rename_column('V_1' , 'V') data.rename_column('I_1' , 'I') data.rename_column('Z_1' , 'Z') data.rename_column('group_type_1' , 'group_type') data.rename_column('retire_at_1' , 'retire_at') data.rename_column('survey_1' , 'survey')
def matchC(cat1, cat2, sigmaz, printprocess): #, dz, opts): # Read cat1 #rich1 = Mhalo id1, ra1, dec1, z1, ngal1, rich1, r2001 = N.loadtxt( cat1, dtype='|S20,float,float,float,float,float,float', usecols=(0, 1, 2, 3, 4, 5, 6), unpack=True) # Sort the catalogue putting the most massive first be = (-rich1).ravel().argsort() id1 = id1[be] ra1 = ra1[be] dec1 = dec1[be] z1 = z1[be] ngal1 = ngal1[be] rich1 = rich1[be] r2001 = r2001[be] # Read cat2 # rich2 = SNR id2, ra2, dec2, z2, ngal2, rich2, r2002 = N.loadtxt( cat2, dtype='|S20,float,float,float,float,float,float', usecols=(0, 1, 2, 3, 4, 5, 6), unpack=True) # Sort the catalogue putting the firts ranked first be = (-rich2).ravel().argsort() id2 = id2[be] ra2 = ra2[be] dec2 = dec2[be] z2 = z2[be] ngal2 = ngal2[be] rich2 = rich2[be] r2002 = r2002[be] c2 = C.SkyCoord(ra=ra2 * U.degree, dec=dec2 * U.degree) origID = [] origRA = [] origDEC = [] origZ = [] origNGAL = [] origRICH = [] matchID = [] matchRA = [] matchDEC = [] matchZ = [] matchNGAL = [] matchRICH = [] matchDISTI = [] origRAD = [] kpcmin1 = cosmo.kpc_proper_per_arcmin(z1) r2001Mpc = kpcmin1.value * r2001 / 1000. null = float(-99.) for i in range(len(id1)): # for i in range(84100,84200): # for i in range(0,299): if printprocess == 'yes': if i / 100. == int(i / 100.): print i c1 = C.SkyCoord(ra=ra1[i] * U.degree, dec=dec1[i] * U.degree) sep = c1.separation(c2) #arcmins sepV = sep.arcminute # ja=N.less_equal((N.array(sepV)-r2001[i]),0.0)*N.less_equal(abs(z2-z1[i])-2.*sigmaz*(1.+z1[i]),0.) ja = N.less_equal((N.array(sepV) - 2. * r2001[i]), 0.0) * N.less_equal( abs(z2 - z1[i]) - 2. * sigmaz * (1. + z1[i]), 0.) #2 veces radio 2R # ja=N.less_equal((N.array(sepV)-2.*r2001[i]),0.0) #2 veces radio 2R id2S, ra2S, dec2S, z2S, ngal2S, rich2S, sepS = B.multicompress( ja, (id2, ra2, dec2, z2, ngal2, rich2, sepV)) origID.append(id1[i]) origRA.append(ra1[i]) origDEC.append(dec1[i]) origZ.append(z1[i]) origNGAL.append(ngal1[i]) origRICH.append(rich1[i]) origRAD.append(r2001Mpc[i]) # picks up the most massive (the files are originally sort) if len(ra2S) > 0: matchID.append(id2S[0]) matchRA.append(ra2S[0]) matchDEC.append(dec2S[0]) matchZ.append(z2S[0]) matchNGAL.append(ngal2S[0]) matchRICH.append(rich2S[0]) kpcmin2 = cosmo.kpc_proper_per_arcmin(z2S[0]) distiMpc = kpcmin2.value * sepS[0] / 1000. matchDISTI.append(distiMpc) # I save all even if not matched (necessary for the Completeness & Purity computations) if len(ra2S) == 0: matchID.append(null) matchRA.append(null) matchDEC.append(null) matchZ.append(null) matchNGAL.append(null) matchRICH.append(null) matchDISTI.append(null) # Check how many are matched ja = N.greater_equal((N.array(matchDISTI)), 0.0) la = N.compress(ja, N.array(matchDISTI)) print len(la), 'matched out of ', len(ra1) return origID, origRA, origDEC, origZ, origNGAL, origRICH, matchID, matchRA, matchDEC, matchZ, matchNGAL, matchRICH, matchDISTI, origRAD
def rand_counts(gal_field, z, R = 10**np.linspace(1.2,3.6,13), delta_z = 0.1, min_mass = 9.415): #picking random location for galaxy number density# if gal_field == 'AEGIS': ra1 = random.uniform(3.746000, 3.756821) dec1 = random.uniform(0.920312, 0.925897) elif gal_field == 'COSMOS': ra1 = random.uniform(2.619737, 2.620718) dec1 = random.uniform(0.038741, 0.043811) elif gal_field == 'GOODS-N': ra1 = random.uniform(3.298072, 3.307597) dec1 = random.uniform(1.084787, 1.087936) elif gal_field == 'GOODS-S': ra1 = random.uniform(0.925775, 0.929397) dec1 = random.uniform(-0.487098, -0.483591) elif gal_field == 'UDS': ra1 = random.uniform(0.59815, 0.602889) dec1 = random.uniform(-0.091376, -0.090305) from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #switching ra and dec to degrees# ra1 = ra1*(180.0/math.pi) dec1 = dec1*(180.0/math.pi) #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] #binning the satellites based on mass# mask = ((np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] lst_gal1 = lst_gal[(lst_gal['lmass'] < 9.8)] lst_gal2 = lst_gal[((lst_gal['lmass'] < 10.3) & (lst_gal['lmass'] > 9.8))] lst_gal3 = lst_gal[((lst_gal['lmass'] < 10.8) & (lst_gal['lmass'] > 10.3))] lst_gal4 = lst_gal[((lst_gal['lmass'] < 11.8) & (lst_gal['lmass'] > 10.8))] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc*(R*u.kpc) #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(ra1*u.deg, dec1*u.deg) sc1 = SkyCoord(lst_gal1['ra']*u.deg, lst_gal1['dec']*u.deg) sc2 = SkyCoord(lst_gal2['ra']*u.deg, lst_gal2['dec']*u.deg) sc3 = SkyCoord(lst_gal3['ra']*u.deg, lst_gal3['dec']*u.deg) sc4 = SkyCoord(lst_gal4['ra']*u.deg, lst_gal4['dec']*u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) sep3 = sc0.separation(sc3).to(u.arcmin) sep4 = sc0.separation(sc4).to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn1 = np.empty(len(R)) nn2 = np.empty(len(R)) nn3 = np.empty(len(R)) nn4 = np.empty(len(R)) for ii,r in enumerate(arcmin): nn1[ii] = np.sum(sep1 <= r) nn2[ii] = np.sum(sep2 <= r) nn3[ii] = np.sum(sep3 <= r) nn4[ii] = np.sum(sep4 <= r) #returning four lists of counts per radius with low end number for low mass bin# return [nn1, nn2, nn3, nn4]
def begin(index): #i = int(index) i = int(index.split('-')[0]) #mgi = int(index.split('-')[1]) color = index.split('-')[1] #try: print(index) #filename = 'Test Data Extract/' + str(i) + '.fit' #filename = str(i) + '-g.fit' #filename = '/data/marvels/billzhu/2175 Dataset/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/2175 Reference Dataset/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(index) + '.fit' filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str( i) + '-' + color + '.fit' hdulist = fits.open(filename) #qlist = fits.open('MG II Test Cut/' + str(i) + '_MG.fit') #qlist = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qlist = fits.open('/data/marvels/billzhu/2175 Reference Quasar Cut/' + color + '/' + str(index) + '_REF.fit') #qlist = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') qlist = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qx = qlist[0].header['XCOORD'] qy = qlist[0].header['YCOORD'] obj_id = qlist[0].header['ID'] mean1 = qlist[0].header['MEAN_BKG'] median1 = qlist[0].header['MED_BKG'] std1 = qlist[0].header['STD_BKG'] redshift = qlist[0].header['ZABS'] #print("%f, %f" % (x, y)) qlist.close() #except: # print("No coordinates") # return # Save some frickin time scidata = hdulist[0].data.astype(float) """ if 'SKY' in hdulist[0].header.keys(): scidata -= float(hdulist[0].header['SOFTBIAS']) scidata -= float(hdulist[0].header['SKY']) else: mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, maxiters=5) scidata -= median print(str(i) + ' No sky') #return """ #print(sigma_clipped_stats(scidata, sigma=3.0, maxiters=5)) pointer = 0 if color == 'g': pointer = 1 if color == 'r': pointer = 2 if color == 'i': pointer = 3 if color == 'z': pointer = 4 if color == 'u': pointer = 0 #bkg_sigma = mad_std(scidata) try: #print('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '.fit') #obj_table = Table.read('/data/marvels/billzhu/2175 Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/2175 Reference Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) obj_table = Table.read('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/Reference Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) except: print(str(i) + ' No Table') return #line_data = linecache.getline('Full Data.txt', i).split() #line_data = linecache.getline('DR12 QSO.txt', i).split() #print(len(line_data)) #obj_id = int(line_data[52]) quasar = obj_table[obj_id - 1] try: print("%d, %f" % (obj_id, obj_table['M_rr_cc'][obj_id - 1][pointer])) except: print("can't print table") return # If no quasar is found, the field image is deemed corrupt and not used if quasar == 0: print(str(i) + ' No quasar') return # Calculate the 18 magnitude threshold mag18 = 0 header = hdulist[0].header pstable = Table.read('/data/marvels/billzhu/MG II psField/0.37 - 0.55/' + str(i) + '_psField.fit', hdu=7) mag20 = pstable['flux20'] if color == 'g': mag18 = mag20[1] * 10**(8. - 18 / 2.5) if color == 'r': mag18 = mag20[2] * 10**(8. - 18 / 2.5) if color == 'i': mag18 = mag20[3] * 10**(8. - 18 / 2.5) if color == 'z': mag18 = mag20[4] * 10**(8. - 18 / 2.5) #mag18 = 10**(4.5/2.5) #qsocut = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + color + '/' + str(index) + '_DUST.fit') #qsocut = fits.open('/data/marvels/billzhu/2175 Reference Quasar Cut/' + color + '/' + str(index) + '_REF.fit') #qsocut = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit') qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qsodata = qsocut[0].data.astype(float) """ if 'SKY' in hdulist[0].header.keys(): qsodata -= float(hdulist[0].header['SOFTBIAS']) qsodata -= float(hdulist[0].header['SKY']) else: qsodata -= median """ #qsodata -= obj_table['sky'][obj_id - 1][pointer] #qsodata -= 1000 if qsodata[50, 50] < 5: print('bad QSO') return print('reached') """ for r in range(len(qsodata)): for c in range(len(qsodata)): if distance(r, c, 150, 150) > 150: background.append(qsodata[r, c]) """ #print(sigma_clipped_stats(qsodata, sigma=3.0, maxiters=5)) # Recalculate the sigma stats after background subtraction #mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, maxiters=5) #print("%f, %f, %f" % (mean, median, std)) largearr = [] stars = [] chunk_size = 50 diff_fwhm = 1000000 #psf_fwhm = 100 #qsovisited = connected(qsodata, 50, 50, mean + 1 * std, np.zeros((101, 101), dtype=bool)) #qmax = np.max(qsodata) counter = 0 #qsocount = photonCount(50, 50, 15, qsodata) #mean1, median1, std1 = sigma_clipped_stats(scidata, sigma=3.0, maxiters=5) scale = cosmo.kpc_proper_per_arcmin( redshift) * u.arcmin / u.kiloparsec * 0.396 / 60 for j in range(len(obj_table)): sx = obj_table['colc'][j][pointer] sy = obj_table['rowc'][j][pointer] flags1 = detflags(obj_table['objc_flags'][j]) flags2 = detflags(obj_table['objc_flags2'][j]) #try: if obj_table['objc_type'][j] == 6 and flags1[12] == False and flags1[ 17] == False and flags1[18] == False and flags2[ 27] == False and distance(sx, sy, qx, qy) > 5 and inbounds( sx + chunk_size + 6, sy + chunk_size + 6) and inbounds( sx - chunk_size - 5, sy - chunk_size - 5 ) and obj_table['psfCounts'][j][ pointer] > mag18 and obj_table['M_rr_cc'][j][ pointer] > 0 and abs( obj_table['M_rr_cc'][j][pointer] - obj_table['M_rr_cc'][obj_id - 1][pointer] ) < 0.1 * obj_table['M_rr_cc'][obj_id - 1][pointer]: #try: """ preshift = scidata[int(sy - 10) : int(sy + 11), int(sx - 10) : int(sx + 11)] xc, yc = centroid_2dg(preshift, mask = None) xc += quasar['colc'][pointer] - 10 yc += quasar['rowc'][pointer] - 10 """ xc = obj_table['colc'][j][pointer] yc = obj_table['rowc'][j][pointer] #preshift = scidata[int(yc - chunk_size - 5) : int(yc + chunk_size + 6), int(xc - chunk_size - 5) : int(xc + chunk_size + 6)] xu = xc + 350.0 xl = xc - 350.0 yu = yc + 350.0 yl = yc - 350.0 xc1 = xc yc1 = yc if xu >= 2048: xu = 2047 if xl < 0: xl = 0 else: xc1 = xc - int(xc) + 350.0 if yu >= 1489: yu = 1488 if yl < 0: yl = 0 else: yc1 = yc - int(yc) + 350.0 scidata2 = np.array(scidata[int(yl):int(yu), int(xl):int(xu)]) visited = np.zeros((len(scidata2), len(scidata2[0])), dtype=bool) scidata2 = checkInner(scidata2, obj_table, xc, yc, xc1, yc1, mean1, std1, visited, pointer) preshift = scidata2[int(yc1 - chunk_size - 5):int(yc1 + chunk_size + 6), int(xc1 - chunk_size - 5):int(xc1 + chunk_size + 6)] preshift -= calc_background(scidata2, xc1, yc1, int(400 / scale), int(500 / scale)) spline = interpolate.interp2d( np.arange(int(xc1 - chunk_size - 5), int(xc1 + chunk_size + 6)), np.arange(int(yc1 - chunk_size - 5), int(yc1 + chunk_size + 6)), preshift) xrang = np.arange(xc1 - chunk_size, xc1 + chunk_size + 1) yrang = np.arange(yc1 - chunk_size, yc1 + chunk_size + 1) if len(xrang) > 2 * chunk_size + 1: xrang = xrang[:-1] if len(yrang) > 2 * chunk_size + 1: yrang = yrang[:-1] shifted1 = spline(xrang, yrang) if shifted1[chunk_size][chunk_size] > 20: #shifted1 = normalize(shifted1, 50, 50, np.max(shifted1), np.max(qsodata), mean1 + 5 * std1, np.zeros((101, 101), dtype=bool)) mean, median, std = sigma_clipped_stats(shifted1, sigma=3.0, maxiters=5) visited = connected( shifted1, 50, 50, mean + 1 * std, np.zeros((101, 101), dtype=bool), 2.4 * obj_table['iso_a'][j][pointer] / 0.396 + 3) #print(visited) #shifted1 = gaussian_filter(shifted1, sigma=std) #smax = np.max(shifted1) #objcount = photonCount(chunk_size, chunk_size, 15, shifted1) #fits.writeto('Test' + str(j) + '.fit', shifted1, overwrite=True) for r in range(len(visited)): for c in range(len(visited)): #if distance(r, c, 50, 50) < 1.2 * obj_table['iso_a'][obj_id - 1][pointer]/2 + 3: shifted1[c, r] /= obj_table['psfCounts'][j][pointer] shifted1[c, r] *= obj_table['psfCounts'][obj_id - 1][pointer] #shifted1 /= objcount#obj_table['psfCounts'][j][pointer] #shifted1 *= qsocount#obj_table['psfCounts'][obj_id - 1][pointer] #fits.writeto('Test0' + str(counter) + '.fit', shifted1, overwrite=True) #print("%d, %f, %f" % (counter, obj_table['M_rr_cc'][j][pointer], obj_table['iso_a'][j][pointer])) #counter += 1 #shifted1 -= np.median(shifted1) largearr.append(np.reshape(shifted1, (2 * chunk_size + 1)**2)) #print(True) #stars.append(shifted1) else: print("ERASED") #except: #continue largearr = np.array(largearr) print(np.shape(largearr)) # Set number of components in PCA, use incremental PCA (IPCA) due to high efficiency and speed numcomp = len(largearr) # Need a healthy number of sources to make the PSF fitting in order to decrease noise, setting at 5% threshold #if len(largearr) < 10: # print('No Sources') # return print(numcomp) mean_vector = [] #print(np.shape(largearr)) try: for j in range(0, (2 * chunk_size + 1)**2): mean_vector.append(np.mean(largearr[:, j])) except: print("NO SOURCE FOUND") return largearr -= mean_vector ipca = IncrementalPCA(n_components=numcomp) ipca.fit(largearr) ipca_comp = ipca.components_ #print(np.shape(ipca_comp)) # Only use the components of the central portion of the quasar, since there may be overfitting due to strength of ipca new_comp = [] for j in range(len(largearr)): temp = np.reshape(ipca_comp[j, :], (2 * chunk_size + 1, 2 * chunk_size + 1)) new_comp.append( np.reshape( temp[chunk_size - 6:chunk_size + 7, chunk_size - 6:chunk_size + 7], 169)) new_comp = np.array(new_comp) new_comp = new_comp.T print(np.shape(new_comp)) ipca_comp = ipca_comp.T #print(ipca_comp) #print(np.shape(largearr[0, :])) #print(np.shape(ipca_comp)) take_final = numcomp # Final fitting of the first n components, as determined by take_final, into the quasar to build a PSF fit print(np.shape(ipca_comp)) qsodata1 = np.reshape(qsodata, (2 * chunk_size + 1)**2) qsodata1 -= mean_vector qsodata1 = np.reshape(qsodata1, (2 * chunk_size + 1, 2 * chunk_size + 1)) coeff = np.dot( np.reshape( qsodata1[chunk_size - 6:chunk_size + 7, chunk_size - 6:chunk_size + 7], 169), new_comp) #coeff = np.dot(qsodata, ipca_comp) final_fit = np.dot(ipca_comp[:, 0:take_final], coeff[0:take_final]) final_fit += mean_vector final_fit = np.reshape(final_fit, (2 * chunk_size + 1, 2 * chunk_size + 1)) #final_fit /= len(largearr) qsodata1 = np.reshape(qsodata1, (2 * chunk_size + 1)**2) qsodata1 += mean_vector qsodata1 = np.reshape(qsodata1, (2 * chunk_size + 1, 2 * chunk_size + 1)) """ background = [] for r in range(len(final_fit)): for c in range(len(final_fit)): if distance(r, c, 50, 50) > 50: background.append(final_fit[r, c]) """ #mean, median, stddev = sigma_clipped_stats(final_fit, sigma=3.0, maxiters=10) #final_fit -= median #final_fit = gaussian_filter(final_fit, sigma=stddev) """ fmax = np.max(final_fit) qmax = np.max(qsodata) for r in range(len(qsovisited)): for c in range(len(qsovisited)): if qsovisited[r][c] == True: final_fit[r][c] /= fmax final_fit[r][c] *= qmax print(np.min(final_fit)) print(np.min(qsodata)) """ # Section to normalize the PSF fitting by photon count, but this is unneccesary since CORRECT PCA fits better """ gauss_fit = photutils.fit_2dgaussian(final_fit[40 : 61, 40 : 61], mask = None) fit_fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss_fit.x_stddev) #print(fit_fwhm) print("%f, %f" % (fwhm_x, fit_fwhm)) print("%f, %f" % (np.max(qsodata), np.max(final_fit))) ffwhm = max(fwhm_x, fit_fwhm) ffphoton_1sig = photonCount(50, 50, 4 * fit_fwhm, final_fit) qsophoton_1sig = photonCount(50, 50, 4 * fit_fwhm, qsodata) print("%f, %f" % (qsophoton_1sig, ffphoton_1sig)) for j in range(len(final_fit)): for k in range(len(final_fit)): if distance(50, 50, j, k) < 4 * fit_fwhm: final_fit[j][k] /= ffphoton_1sig final_fit[j][k] *= qsophoton_1sig """ print("%f, %f" % (np.max(qsodata), np.max(final_fit))) #final_fit /= ffphoton_1sig #final_fit *= qsophoton_1sig """ line_data = linecache.getline('Full Data.txt', i).split() if color == 'g': mag = float(line_data[6]) if color == 'r': mag = float(line_data[8]) if color == 'i': mag = float(line_data[10]) if color == 'z': mag = float(line_data[12]) if color == 'u': mag = float(line_data[4]) #try: multiplier = 10**(8 - mag / 2.5) * header['FLUX20'] multiplier1 = quasar['psfCounts'][pointer] print(multiplier) print(str(multiplier1 - header['SKY'])) final_fit /= ffphoton_1sig final_fit *= multiplier #except: #final_fit *= qsodata[50, 50] #return print("%f, %f" % (np.max(qsodata), np.max(final_fit))) """ # Final residue from subtraction of PSF from QSO residue = qsodata - final_fit #mean, median, stddev = sigma_clipped_stats(residue, sigma=3.0, maxiters=10) #residue -= median try: #fits.writeto('/data/marvels/billzhu/2175 PSF Cut/' + color + '/'+ str(index) + '_PSF.fit', final_fit, qsocut[0].header, overwrite = True) #fits.writeto('/data/marvels/billzhu/2175 PSF Subtract/' + color + '/' + str(index) + '_SUB.fit', residue, qsocut[0].header, overwrite = True) #fits.writeto('/data/marvels/billzhu/2175 Reference PSF Cut/' + color + '/'+ str(index) + '_PSF.fit', final_fit, hdulist[0].header, overwrite = True) #fits.writeto('/data/marvels/billzhu/2175 Reference PSF Subtract/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, overwrite = True) #fits.writeto('/data/marvels/billzhu/Reference PSF Cut/0.37 - 0.55/' + color + '/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, overwrite = True) #fits.writeto('/data/marvels/billzhu/Reference PSF Subtract/0.37 - 0.55/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, overwrite = True) fits.writeto('/data/marvels/billzhu/MG II PSF Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_PSF.fit', final_fit, hdulist[0].header, overwrite=True) fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB.fit', residue, hdulist[0].header, overwrite=True) #fits.writeto('Reference Subtract/' + str(i) + '_SUB.fit', residue, hdulist[0].header, overwrite = True) #fits.writeto('Reference PSF Cut/' + str(i) + '_PSF.fit', final_fit, hdulist[0].header, overwrite = True) print('\n') print("DONE TO BOTTOM") except: print('HEADER IS CORRUPT')
def fit_ellipse_for_source( friendid=None, detectid=None, coords=None, shotid=None, subcont=True, convolve_image=False, pixscale=pixscale, imsize=imsize, wave_range=None, ): if detectid is not None: global deth5 detectid_obj = detectid if detectid_obj <= 2190000000: det_info = deth5.root.Detections.read_where("detectid == detectid_obj")[0] linewidth = det_info["linewidth"] wave_obj = det_info["wave"] redshift = wave_obj / (1216) - 1 else: det_info = conth5.root.Detections.read_where("detectid == detectid_obj")[0] redshift = 0 wave_obj = 4500 coords_obj = SkyCoord(det_info["ra"], det_info["dec"], unit="deg") shotid_obj = det_info["shotid"] fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")["fwhm_virus"][0] amp = det_info["multiframe"] if wave_range is not None: wave_range_obj = wave_range else: if detectid_obj <= 2190000000: wave_range_obj = [wave_obj - 2 * linewidth, wave_obj + 2 * linewidth] else: wave_range_obj = [4100, 4200] if coords is not None: coords_obj = coords if shotid is not None: shotid_obj = shotid fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")[ "fwhm_virus" ][0] try: hdu = make_narrowband_image( coords=coords_obj, shotid=shotid_obj, wave_range=wave_range_obj, imsize=imsize * u.arcsec, pixscale=pixscale * u.arcsec, subcont=subcont, convolve_image=convolve_image, include_error=True, ) except: print("Could not make narrowband image for {}".format(detectid)) return np.nan, np.nan elif friendid is not None: global friend_cat sel = friend_cat["friendid"] == friendid group = friend_cat[sel] coords_obj = SkyCoord(ra=group["icx"][0] * u.deg, dec=group["icy"][0] * u.deg) wave_obj = group["icz"][0] redshift = wave_obj / (1216) - 1 linewidth = group["linewidth"][0] shotid_obj = group["shotid"][0] fwhm = group["fwhm"][0] amp = group["multiframe"][0] if wave_range is not None: wave_range_obj = wave_range else: wave_range_obj = [wave_obj - 2 * linewidth, wave_obj + 2 * linewidth] if shotid is not None: shotid_obj = shotid fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")[ "fwhm_virus" ][0] try: hdu = make_narrowband_image( coords=coords_obj, shotid=shotid_obj, wave_range=wave_range_obj, imsize=imsize * u.arcsec, pixscale=pixscale * u.arcsec, subcont=subcont, convolve_image=convolve_image, include_error=True, ) except: print("Could not make narrowband image for {}".format(friendid)) return None elif coords is not None: coords_obj = coords if wave_range is not None: wave_range_obj = wave_range else: print( "You need to supply wave_range=[wave_start, wave_end] for collapsed image" ) if shotid is not None: shotid_obj = shotid fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")[ "fwhm_virus" ][0] else: print("Enter the shotid to use (eg. 20200123003)") hdu = make_narrowband_image( coords=coords_obj, shotid=shotid_obj, wave_range=wave_range_obj, imsize=imsize * u.arcsec, pixscale=pixscale * u.arcsec, subcont=subcont, convolve_image=convolve_image, include_error=True, ) else: print("You must provide a detectid, friendid or coords/wave_range/shotid") return np.nan, np.nan w = wcs.WCS(hdu[0].header) if friendid is not None: sel_friend_group = friend_cat["friendid"] == friendid group = friend_cat[sel_friend_group] eps = 1 - group["a2"][0] / group["b2"][0] pa = group["pa"][0] * np.pi / 180.0 - 90 sma = group["a"][0] * 3600 / pixscale coords = SkyCoord(ra=group["icx"][0] * u.deg, dec=group["icy"][0] * u.deg) wave_obj = group["icz"][0] redshift = wave_obj / (1216) - 1 linewidth = np.nanmedian(group["linewidth"]) shotid_obj = group["shotid"][0] fwhm = group["fwhm"][0] geometry = EllipseGeometry( x0=w.wcs.crpix[0], y0=w.wcs.crpix[0], sma=sma, eps=eps, pa=pa ) else: geometry = EllipseGeometry( x0=w.wcs.crpix[0], y0=w.wcs.crpix[0], sma=20, eps=0.2, pa=20.0 ) geometry = EllipseGeometry( x0=w.wcs.crpix[0], y0=w.wcs.crpix[0], sma=20, eps=0.2, pa=20.0 ) # geometry.find_center(hdu.data) # aper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma, # geometry.sma*(1 - geometry.eps), geometry.pa) # plt.imshow(hdu.data, origin='lower') # aper.plot(color='white') ellipse = Ellipse(hdu[0].data) isolist = ellipse.fit_image() iso_tab = isolist.to_table() if len(iso_tab) == 0: geometry.find_center(hdu[0].data, verbose=False, threshold=0.5) ellipse = Ellipse(hdu[0].data, geometry) isolist = ellipse.fit_image() iso_tab = isolist.to_table() if len(iso_tab) == 0: return np.nan, np.nan, np.nan try: # compute iso's manually in steps of 3 pixels ellipse = Ellipse(hdu[0].data) # reset ellipse iso_list = [] for sma in np.arange(1, 60, 2): iso = ellipse.fit_isophote(sma) if np.isnan(iso.intens): # print('break at {}'.format(sma)) break else: iso_list.append(iso) isolist = IsophoteList(iso_list) iso_tab = isolist.to_table() except: return np.nan, np.nan, np.nan try: model_image = build_ellipse_model(hdu[0].data.shape, isolist) residual = hdu[0].data - model_image except: return np.nan, np.nan, np.nan sma = iso_tab["sma"] * pixscale const_arcsec_to_kpc = cosmo.kpc_proper_per_arcmin(redshift).value / 60.0 def arcsec_to_kpc(sma): dist = const_arcsec_to_kpc * sma return dist def kpc_to_arcsec(dist): sma = dist / const_arcsec_to_kpc return sma dist_kpc = ( sma * u.arcsec.to(u.arcmin) * u.arcmin * cosmo.kpc_proper_per_arcmin(redshift) ) dist_arcsec = kpc_to_arcsec(dist_kpc) # print(shotid_obj, fwhm) # s_exp1d = models.Exponential1D(amplitude=0.2, tau=-50) alpha = 3.5 s_moffat = models.Moffat1D( amplitude=1, gamma=(0.5 * fwhm) / np.sqrt(2 ** (1.0 / alpha) - 1.0), x_0=0.0, alpha=alpha, fixed={"amplitude": False, "x_0": True, "gamma": True, "alpha": True}, ) s_init = models.Exponential1D(amplitude=0.2, tau=-50) fit = fitting.LevMarLSQFitter() s_r = fit(s_init, dist_kpc, iso_tab["intens"]) # Fitting can be done using the uncertainties as weights. # To get the standard weighting of 1/unc^2 for the case of # Gaussian errors, the weights to pass to the fitting are 1/unc. # fitted_line = fit(line_init, x, y, weights=1.0/yunc) # s_r = fit(s_init, dist_kpc, iso_tab['intens'])#, weights=iso_tab['intens']/iso_tab['intens_err'] ) print(s_r) try: r_n = -1.0 * s_r.tau # _0 #* const_arcsec_to_kpc except: r_n = np.nan # r_n = -1. * s_r.tau_0 try: sel_iso = np.where(dist_kpc >= 2 * r_n)[0][0] except: sel_iso = -1 aper = EllipticalAperture( (isolist.x0[sel_iso], isolist.y0[sel_iso]), isolist.sma[sel_iso], isolist.sma[sel_iso] * (1 - isolist.eps[sel_iso]), isolist.pa[sel_iso], ) phottable = aperture_photometry(hdu[0].data, aper, error=hdu[1].data) flux = phottable["aperture_sum"][0] * 10 ** -17 * u.erg / (u.cm ** 2 * u.s) flux_err = phottable["aperture_sum_err"][0] * 10 ** -17 * u.erg / (u.cm ** 2 * u.s) lum_dist = cosmo.luminosity_distance(redshift).to(u.cm) lum = flux * 4.0 * np.pi * lum_dist ** 2 lum_err = flux_err * 4.0 * np.pi * lum_dist ** 2 if detectid: name = detectid elif friendid: name = friendid # Get Image data from Elixer catlib = catalogs.CatalogLibrary() try: cutout = catlib.get_cutouts( position=coords_obj, side=imsize, aperture=None, dynamic=False, filter=["r", "g", "f606W"], first=True, allow_bad_image=False, allow_web=True, )[0] except: print("Could not get imaging for " + str(name)) zscale = ZScaleInterval(contrast=0.5, krej=1.5) vmin, vmax = zscale.get_limits(values=hdu[0].data) fig = plt.figure(figsize=(20, 12)) fig.suptitle( "{} ra={:3.2f}, dec={:3.2f}, wave={:5.2f}, z={:3.2f}, mf={}".format( name, coords_obj.ra.value, coords_obj.dec.value, wave_obj, redshift, amp ), fontsize=22, ) ax1 = fig.add_subplot(231, projection=w) plt.imshow(hdu[0].data, vmin=vmin, vmax=vmax) plt.xlabel("RA") plt.ylabel("Dec") plt.colorbar() plt.title("Image summed across 4*linewidth") ax2 = fig.add_subplot(232, projection=w) plt.imshow(model_image, vmin=vmin, vmax=vmax) plt.xlabel("RA") plt.ylabel("Dec") plt.colorbar() plt.title("model") ax3 = fig.add_subplot(233, projection=w) plt.imshow(residual, vmin=vmin, vmax=vmax) plt.xlabel("RA") plt.ylabel("Dec") plt.colorbar() plt.title("residuals (image-model)") # fig = plt.figure(figsize=(10,5)) im_zscale = ZScaleInterval(contrast=0.5, krej=2.5) im_vmin, im_vmax = im_zscale.get_limits(values=cutout["cutout"].data) ax4 = fig.add_subplot(234, projection=cutout["cutout"].wcs) plt.imshow( cutout["cutout"].data, vmin=im_vmin, vmax=im_vmax, origin="lower", cmap=plt.get_cmap("gray"), interpolation="none", ) plt.text( 0.8, 0.9, cutout["instrument"] + cutout["filter"], transform=ax4.transAxes, fontsize=20, color="w", ) plt.contour(hdu[0].data, transform=ax4.get_transform(w)) plt.xlabel("RA") plt.ylabel("Dec") aper.plot( color="white", linestyle="dashed", linewidth=2, transform=ax4.get_transform(w) ) ax5 = fig.add_subplot(235) plt.errorbar( dist_kpc.value, iso_tab["intens"], yerr=iso_tab["intens_err"] * iso_tab["intens"], linestyle="none", marker="o", label="Lya SB profile", ) plt.plot(dist_kpc, s_r(dist_kpc), color="r", label="Lya exp SB model", linewidth=2) plt.xlabel("Semi-major axis (kpc)") # plt.xlabel('Semi-major axis (arcsec)') plt.ylabel("Flux ({})".format(10 ** -17 * (u.erg / (u.s * u.cm ** 2)))) plt.text(0.4, 0.7, "r_n={:3.2f}".format(r_n), transform=ax5.transAxes, fontsize=16) plt.text( 0.4, 0.6, "L_lya={:3.2e}".format(lum), transform=ax5.transAxes, fontsize=16 ) secax = ax5.secondary_xaxis("top", functions=(kpc_to_arcsec, kpc_to_arcsec)) secax.set_xlabel("Semi-major axis (arcsec)") # secax.set_xlabel('Semi-major axis (kpc)') plt.xlim(0, 100) # plt.plot(sma, s_r(sma), label='moffat psf') # plt.plot(dist_kpc.value, s1(kpc_to_arcsec(dist_kpc.value)), # linestyle='dashed', linewidth=2, # color='green', label='PSF seeing:{:3.2f}'.format(fwhm)) # These two are the exact same # s1 = models.Moffat1D() # s1.amplitude = iso_tab['intens'][0] # alpha=3.5 # s1.gamma = 0.5*(fwhm*const_arcsec_to_kpc)/ np.sqrt(2 ** (1.0 / alpha) - 1.0) # s1.alpha = alpha # plt.plot(r_1d, moffat_1d, color='orange') # plt.plot(dist_kpc.value, (s1(dist_kpc.value)), # linestyle='dashed', linewidth=2, # color='blue', label='PSF seeing:{:3.2f}'.format(fwhm)) E = Extract() E.load_shot(shotid_obj) moffat_psf = E.moffat_psf(seeing=fwhm, boxsize=imsize, scale=pixscale) moffat_shape = np.shape(moffat_psf) xcen = int(moffat_shape[1] / 2) ycen = int(moffat_shape[2] / 2) moffat_1d = ( moffat_psf[0, xcen:-1, ycen] / moffat_psf[0, xcen, ycen] * iso_tab["intens"][0] ) r_1d = moffat_psf[1, xcen:-1, ycen] E.close() plt.plot( arcsec_to_kpc(pixscale * np.arange(80)), iso_tab["intens"][0] * (moffat_psf[0, 80:-1, 80] / moffat_psf[0, 80, 80]), linestyle="dashed", color="green", label="PSF seeing:{:3.2f}".format(fwhm), ) plt.legend() if friendid is not None: ax6 = fig.add_subplot(236, projection=cutout["cutout"].wcs) plot_friends(friendid, friend_cat, cutout, ax=ax6, label=False) plt.savefig("fit2d_{}.png".format(name)) # filename = 'param_{}.txt'.format(name) # np.savetxt(filename, (r_n.value, lum.value)) return r_n, lum, lum_err
def rand_counts(gal_field, z, R=10**np.linspace(1.2, 3.6, 13), delta_z=0.1, min_mass=9.415): #picking random location for galaxy number density# if gal_field == 'AEGIS': ra1 = random.uniform(3.746000, 3.756821) dec1 = random.uniform(0.920312, 0.925897) elif gal_field == 'COSMOS': ra1 = random.uniform(2.619737, 2.620718) dec1 = random.uniform(0.038741, 0.043811) elif gal_field == 'GOODS-N': ra1 = random.uniform(3.298072, 3.307597) dec1 = random.uniform(1.084787, 1.087936) elif gal_field == 'GOODS-S': ra1 = random.uniform(0.925775, 0.929397) dec1 = random.uniform(-0.487098, -0.483591) elif gal_field == 'UDS': ra1 = random.uniform(0.59815, 0.602889) dec1 = random.uniform(-0.091376, -0.090305) from astropy.coordinates.sky_coordinate import SkyCoord from astropy import units as u #switching ra and dec to degrees# ra1 = ra1 * (180.0 / math.pi) dec1 = dec1 * (180.0 / math.pi) #making a list of galaxies in within a redshift range of given z, in the selected field, and above the mass limit# lst_gal = [] data_tmp = data_flagged[data_flagged['field'] == gal_field] #binning the satellites based on mass# mask = ((np.abs(data_tmp['z_peak'] - z) <= delta_z) & (data_tmp['lmass'] >= min_mass)) lst_gal = data_tmp[mask] lst_gal1 = lst_gal[(lst_gal['lmass'] < 9.8)] lst_gal2 = lst_gal[((lst_gal['lmass'] < 10.3) & (lst_gal['lmass'] > 9.8))] lst_gal3 = lst_gal[((lst_gal['lmass'] < 10.8) & (lst_gal['lmass'] > 10.3))] lst_gal4 = lst_gal[((lst_gal['lmass'] < 11.8) & (lst_gal['lmass'] > 10.8))] #finding the various aperture radii in arcminutes based on given z# kpc_per_arcmin = cosmo.kpc_proper_per_arcmin(z) arcmin_per_kpc = kpc_per_arcmin**(-1) arcmin = arcmin_per_kpc * (R * u.kpc) #calculating distance in special ANGLE measure to each galaxy in lst_gal# sc0 = SkyCoord(ra1 * u.deg, dec1 * u.deg) sc1 = SkyCoord(lst_gal1['ra'] * u.deg, lst_gal1['dec'] * u.deg) sc2 = SkyCoord(lst_gal2['ra'] * u.deg, lst_gal2['dec'] * u.deg) sc3 = SkyCoord(lst_gal3['ra'] * u.deg, lst_gal3['dec'] * u.deg) sc4 = SkyCoord(lst_gal4['ra'] * u.deg, lst_gal4['dec'] * u.deg) sep1 = sc0.separation(sc1).to(u.arcmin) sep2 = sc0.separation(sc2).to(u.arcmin) sep3 = sc0.separation(sc3).to(u.arcmin) sep4 = sc0.separation(sc4).to(u.arcmin) #finding number of "sep's" within the list 'arcmin' already created# nn1 = np.empty(len(R)) nn2 = np.empty(len(R)) nn3 = np.empty(len(R)) nn4 = np.empty(len(R)) for ii, r in enumerate(arcmin): nn1[ii] = np.sum(sep1 <= r) nn2[ii] = np.sum(sep2 <= r) nn3[ii] = np.sum(sep3 <= r) nn4[ii] = np.sum(sep4 <= r) #returning four lists of counts per radius with low end number for low mass bin# return [nn1, nn2, nn3, nn4]
gzpath = '/Users/willettk/Astronomy/Research/GalaxyZoo' version = '1_0_0' nsa_decals = fits.getdata('%s/decals/nsa_decals_v%s_goodimgs.fits' % (gzpath,version),1) N = len(nsa_decals) t = Table() t['coords.0'] = nsa_decals['RA'] t['coords.1'] = nsa_decals['DEC'] # Calculate absolute size in kpc size = [WMAP9.kpc_proper_per_arcmin(z).to(u.kpc/u.arcsec) * (r * u.arcsec) if z > 0 else -99. * u.kpc for z,r in zip(nsa_decals['Z'],nsa_decals['PETROTHETA'])] # Calculate absolute and apparent magnitude absmag_r = [float(x[4]) for x in nsa_decals['ABSMAG']] mag_faruv = [22.5 - 2.5*np.log10(x[0]) for x in nsa_decals['NMGY']] mag_nearuv = [22.5 - 2.5*np.log10(x[1]) for x in nsa_decals['NMGY']] mag_u = [22.5 - 2.5*np.log10(x[2]) for x in nsa_decals['NMGY']] mag_g = [22.5 - 2.5*np.log10(x[3]) for x in nsa_decals['NMGY']] mag_r = [22.5 - 2.5*np.log10(x[4]) for x in nsa_decals['NMGY']] mag_i = [22.5 - 2.5*np.log10(x[5]) for x in nsa_decals['NMGY']] mag_z = [22.5 - 2.5*np.log10(x[6]) for x in nsa_decals['NMGY']] sizearr = [s.value for s in size] fluxarr = [x[4] for x in nsa_decals['PETROFLUX']]
def begin(index): #i = int(index) i = int(index.split('-')[0]) #mgi = int(index.split('-')[1]) color = index.split('-')[1] #try: print(index) #filename = 'Test Data Extract/' + str(i) + '.fit' #filename = str(i) + '-g.fit' #filename = '/data/marvels/billzhu/2175 Dataset/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/2175 Reference Dataset/' + color + '/' + str(index) + '.fit' #filename = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(index) + '.fit' filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str( i) + '-' + color + '.fit' hdulist = fits.open(filename) scidata = hdulist[0].data.astype(float) #print(sigma_clipped_stats(scidata, sigma=3.0, iters=5)) pointer = 0 if color == 'g': pointer = 1 if color == 'r': pointer = 2 if color == 'i': pointer = 3 if color == 'z': pointer = 4 if color == 'u': pointer = 0 try: #print('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '.fit') #obj_table = Table.read('/data/marvels/billzhu/2175 Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/2175 Reference Obj/' + str(i) + '-' + str(mgi) + '.fit', hdu=1) obj_table = Table.read('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '_Obj.fit', hdu=1) #obj_table = Table.read('/data/marvels/billzhu/Reference Obj/0.37 - 0.55/' + str(i) + '.fit', hdu=1) except: print(str(i) + ' No Table') return #line_data = linecache.getline('Full Data.txt', i).split() #line_data = linecache.getline('DR12 QSO.txt', i).split() #print(len(line_data)) #obj_id = int(line_data[52]) qlist = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit') qx = qlist[0].header['XCOORD'] qy = qlist[0].header['YCOORD'] obj_id = qlist[0].header['ID'] mean1 = qlist[0].header['MEAN_BKG'] median1 = qlist[0].header['MED_BKG'] std1 = qlist[0].header['STD_BKG'] #print("%f, %f" % (x, y)) qlist.close() print("%d, %f" % (obj_id, obj_table['M_rr_cc'][obj_id - 1][pointer])) # Calculate the 18 magnitude threshold mag18 = 0 header = hdulist[0].header try: mag18 = header['FLUX20'] * 10**(8. - 18 / 2.5) except: if color == 'g': mag18 = 15000 if color == 'r': mag18 = 10500 if color == 'i': mag18 = 8800 if color == 'z': mag18 = 1900 print(str(i) + ' MAG20 APPROX = 15000') print('reached') largearr = [] stars = [] chunk_size = 50 diff_fwhm = 1000000 counter = 0 #qsocount = photonCount(50, 50, 15, star_image) #mean1, median1, std1 = sigma_clipped_stats(scidata, sigma=3.0, iters=5) chosen = -1 RA = float(linecache.getline('Full Data.txt', i).split()[1]) DEC = float(linecache.getline('Full Data.txt', i).split()[2]) redshift = 0 for j in range(len(mgtable)): if abs(mgtable['RA'][j] - RA) < 0.0001 and mgtable['DEC'][j] - DEC < 0.0001 and mgtable[ 'ZABS'][j][0] >= 0.37 and mgtable['ZABS'][j][0] < 0.55: redshift = mgtable['ZABS'][j][0] break if redshift == 0: return scale = cosmo.kpc_proper_per_arcmin( redshift) * u.arcmin / u.kiloparsec * 0.396 / 60 for j in range(len(obj_table)): sx = obj_table['colc'][j][pointer] sy = obj_table['rowc'][j][pointer] flags1 = detflags(obj_table['objc_flags'][j]) flags2 = detflags(obj_table['objc_flags2'][j]) #try: if j != obj_id - 1 and obj_table['objc_type'][j] == 6 and flags1[ 12] == False and flags1[17] == False and flags1[ 18] == False and flags2[27] == False and distance( sx, sy, qx, qy) > 5 and inbounds( sx + chunk_size + 6, sy + chunk_size + 6 ) and inbounds( sx - chunk_size - 5, sy - chunk_size - 5 ) and obj_table['psfCounts'][j][ pointer] > mag18 and obj_table['M_rr_cc'][j][ pointer] > 0 and abs( obj_table['M_rr_cc'][j][pointer] - obj_table['M_rr_cc'][obj_id - 1][pointer] ) < 0.1 * obj_table['M_rr_cc'][obj_id - 1][pointer]: print(j) #try: qx = obj_table['colc'][j][pointer] qy = obj_table['rowc'][j][pointer] #mean1, median1, std1 = sigma_clipped_stats(scidata, sigma=3.0, iters=5) #preshift = scidata[int(yc - chunk_size - 5) : int(yc + chunk_size + 6), int(xc - chunk_size - 5) : int(xc + chunk_size + 6)] xu = qx + 350.0 xl = qx - 350.0 yu = qy + 350.0 yl = qy - 350.0 xc1 = qx yc1 = qy if xu + 350 >= 2048: xu = 2047 if xl - 350 < 0: xl = 0 else: xc1 = qx - int(qx) + 350.0 if yu + 350 >= 1489: yu = 1488 if yl - 350 < 0: yl = 0 else: yc1 = qy - int(qy) + 350.0 scidata2 = np.array(scidata[int(yl):int(yu), int(xl):int(xu)]) visited = np.zeros((len(scidata2), len(scidata2[0])), dtype=bool) scidata2 = checkInner(scidata2, obj_table, qx, qy, xc1, yc1, mean1, std1, visited, pointer) preshift = scidata2[int(yc1 - chunk_size - 5):int(yc1 + chunk_size + 6), int(xc1 - chunk_size - 5):int(xc1 + chunk_size + 6)] preshift -= calc_background(scidata2, xc1, yc1, int(400 / scale), int(500 / scale)) spline = interpolate.interp2d( np.arange(int(xc1 - chunk_size - 5), int(xc1 + chunk_size + 6)), np.arange(int(yc1 - chunk_size - 5), int(yc1 + chunk_size + 6)), preshift) xrang = np.arange(xc1 - chunk_size, xc1 + chunk_size + 1) yrang = np.arange(yc1 - chunk_size, yc1 + chunk_size + 1) if len(xrang) > 2 * chunk_size + 1: xrang = xrang[:-1] if len(yrang) > 2 * chunk_size + 1: yrang = yrang[:-1] star_image = spline(xrang, yrang) if star_image[chunk_size, chunk_size] < 20: continue chosen = j print('reached point 1') break #except: #continue if chosen == -1: print("NO SOURCES FOUND") return visited = connected(star_image, 50, 50, mean1 + 1 * std1, np.zeros((101, 101), dtype=bool), 3 * obj_table['iso_a'][chosen][pointer] / 0.396 + 3) #print(np.shape(star_image)) for j in range(len(obj_table)): sx = obj_table['colc'][j][pointer] sy = obj_table['rowc'][j][pointer] flags2 = detflags(obj_table['objc_flags2'][j]) flags1 = detflags(obj_table['objc_flags'][j]) #try: if j != chosen and j != obj_id - 1 and obj_table['objc_type'][ j] == 6 and flags1[12] == False and flags1[ 17] == False and flags1[ 18] == False and flags2[27] == False and distance( sx, sy, qx, qy) > 5 and inbounds( sx + chunk_size + 6, sy + chunk_size + 6) and inbounds( sx - chunk_size - 5, sy - chunk_size - 5 ) and obj_table['psfCounts'][j][ pointer] > mag18 and obj_table['M_rr_cc'][ j][pointer] > 0 and abs( obj_table['M_rr_cc'][j][pointer] - obj_table['M_rr_cc'][chosen] [pointer]) < 0.1 * obj_table[ 'M_rr_cc'][chosen][pointer]: xc = obj_table['colc'][j][pointer] yc = obj_table['rowc'][j][pointer] #preshift = scidata[int(yc - chunk_size - 5) : int(yc + chunk_size + 6), int(xc - chunk_size - 5) : int(xc + chunk_size + 6)] xu = xc + 350.0 xl = xc - 350.0 yu = yc + 350.0 yl = yc - 350.0 xc1 = xc yc1 = yc if xu >= 2048: xu = 2047 if xl < 0: xl = 0 else: xc1 = xc - int(xc) + 350.0 if yu >= 1489: yu = 1488 if yl < 0: yl = 0 else: yc1 = yc - int(yc) + 350.0 scidata2 = np.array(scidata[int(yl):int(yu), int(xl):int(xu)]) visited1 = np.zeros((len(scidata2), len(scidata2[0])), dtype=bool) scidata2 = checkInner(scidata2, obj_table, xc, yc, xc1, yc1, mean1, std1, visited1, pointer) preshift = scidata2[int(yc1 - chunk_size - 5):int(yc1 + chunk_size + 6), int(xc1 - chunk_size - 5):int(xc1 + chunk_size + 6)] preshift -= calc_background(scidata2, xc1, yc1, int(400 / scale), int(500 / scale)) spline = interpolate.interp2d( np.arange(int(xc1 - chunk_size - 5), int(xc1 + chunk_size + 6)), np.arange(int(yc1 - chunk_size - 5), int(yc1 + chunk_size + 6)), preshift) xrang = np.arange(xc1 - chunk_size, xc1 + chunk_size + 1) yrang = np.arange(yc1 - chunk_size, yc1 + chunk_size + 1) if len(xrang) > 2 * chunk_size + 1: xrang = xrang[:-1] if len(yrang) > 2 * chunk_size + 1: yrang = yrang[:-1] shifted1 = spline(xrang, yrang) if shifted1[chunk_size][chunk_size] > 20: #shifted1 = normalize(shifted1, 50, 50, np.max(shifted1), np.max(star_image), mean1 + 5 * std1, np.zeros((101, 101), dtype=bool)) #print(visited) #shifted1 = gaussian_filter(shifted1, sigma=std) #smax = np.max(shifted1) #objcount = photonCount(chunk_size, chunk_size, 15, shifted1) #fits.writeto('Test' + str(j) + '.fit', shifted1, clobber=True) for r in range(len(visited)): for c in range(len(visited)): if visited[c, r] == True: shifted1[c, r] /= obj_table['psfCounts'][j][pointer] shifted1[ c, r] *= obj_table['psfCounts'][chosen][pointer] #shifted1 /= objcount#obj_table['psfCounts'][j][pointer] #shifted1 *= qsocount#obj_table['psfCounts'][obj_id - 1][pointer] #fits.writeto('Test00' + str(counter) + '.fit', scidata2, clobber=True) #print("%d, %f, %f" % (counter, obj_table['M_rr_cc'][j][pointer], obj_table['iso_a'][j][pointer])) #scounter += 1 #shifted1 -= np.median(shifted1) largearr.append(np.reshape(shifted1, (2 * chunk_size + 1)**2)) #print(True) #stars.append(shifted1) #except: #continue largearr = np.array(largearr) print(np.shape(largearr)) # Set number of components in PCA, use incremental PCA (IPCA) due to high efficiency and speed numcomp = len(largearr) # Need a healthy number of sources to make the PSF fitting in order to decrease noise, setting at 5% threshold #if len(largearr) < 10: # print('No Sources') # return print(numcomp) mean_vector = [] #print(np.shape(largearr)) try: for j in range(0, (2 * chunk_size + 1)**2): mean_vector.append(np.mean(largearr[:, j])) except: print("NO SOURCE FOUND") return largearr -= mean_vector ipca = IncrementalPCA(n_components=numcomp) ipca.fit(largearr) ipca_comp = ipca.components_ #print(np.shape(ipca_comp)) # Only use the components of the central portion of the quasar, since there may be overfitting due to strength of ipca new_comp = [] for j in range(len(largearr)): temp = np.reshape(ipca_comp[j, :], (2 * chunk_size + 1, 2 * chunk_size + 1)) new_comp.append( np.reshape( temp[chunk_size - 6:chunk_size + 7, chunk_size - 6:chunk_size + 7], 169)) new_comp = np.array(new_comp) new_comp = new_comp.T print(np.shape(new_comp)) ipca_comp = ipca_comp.T #print(ipca_comp) #print(np.shape(largearr[0, :])) #print(np.shape(ipca_comp)) take_final = numcomp # Final fitting of the first n components, as determined by take_final, into the quasar to build a PSF fit print(np.shape(ipca_comp)) star_image1 = np.reshape(star_image, (2 * chunk_size + 1)**2) star_image1 -= mean_vector star_image1 = np.reshape(star_image1, (2 * chunk_size + 1, 2 * chunk_size + 1)) coeff = np.dot( np.reshape( star_image1[chunk_size - 6:chunk_size + 7, chunk_size - 6:chunk_size + 7], 169), new_comp) #coeff = np.dot(star_image, ipca_comp) final_fit = np.dot(ipca_comp[:, 0:take_final], coeff[0:take_final]) final_fit += mean_vector final_fit = np.reshape(final_fit, (2 * chunk_size + 1, 2 * chunk_size + 1)) #final_fit /= len(largearr) star_image1 = np.reshape(star_image1, (2 * chunk_size + 1)**2) star_image1 += mean_vector star_image1 = np.reshape(star_image1, (2 * chunk_size + 1, 2 * chunk_size + 1)) print("%f, %f" % (np.max(star_image), np.max(final_fit))) # Final residue from subtraction of PSF from QSO residue = star_image - final_fit #mean, median, stddev = sigma_clipped_stats(residue, sigma=3.0, iters=10) #residue -= median try: #fits.writeto('/data/marvels/billzhu/2175 PSF Cut/' + color + '/'+ str(index) + '_PSF.fit', final_fit, qsocut[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/2175 PSF Subtract/' + color + '/' + str(index) + '_SUB.fit', residue, qsocut[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/2175 Reference PSF Cut/' + color + '/'+ str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/2175 Reference PSF Subtract/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/Reference PSF Cut/0.37 - 0.55/' + color + '/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) #fits.writeto('/data/marvels/billzhu/Reference PSF Subtract/0.37 - 0.55/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber = True) fits.writeto('/data/marvels/billzhu/Star PCA Cut/' + color + '/' + str(i) + '-' + color + '_PSF.fit', final_fit, hdulist[0].header, clobber=True) fits.writeto('/data/marvels/billzhu/Star PCA Subtract/' + color + '/' + str(i) + '-' + color + '_SUB.fit', residue, hdulist[0].header, clobber=True) #fits.writeto('Reference Subtract/' + str(i) + '_SUB.fit', residue, hdulist[0].header, clobber = True) #fits.writeto('Reference PSF Cut/' + str(i) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True) print('\n') print("DONE TO BOTTOM") except: print('HEADER IS CORRUPT')