Beispiel #1
0
def gaia_read(ra, dec, radius, dr='dr1'):
    """Partition all Gaia source fits files

    Parameters
    ----------
    ra : np.float64
        right ascension to search around (degrees, J2000)
    dec : np.float64
        declination to search around (degrees, J2000)
    radius : np.float64
        radius of search (degrees)
    dr : string
        name of data release (default 'dr1')

    Returns
    -------
    data : ndarray
        array with Gaia data for matching objects

    Comments
    --------
    Requires data in $GAIA_DATA to have been processed by gaia_run()
    into files partitioned by RA/Dec
    """
    gaia_path = os.path.join(os.getenv('GAIA_DATA'), dr, 'gaia_source',
                             'fits_sorted')
    gaia_index = fitsio.read(os.path.join(gaia_path, 'gaia-index.fits'))
    ra_arr = np.array([np.float64(ra)])
    dec_arr = np.array([np.float64(dec)])
    (iindex, i0, dindex) = spheregroup.spherematch(gaia_index['racen'],
                                                   gaia_index['deccen'],
                                                   ra_arr,
                                                   dec_arr,
                                                   np.float64(radius) + 1.,
                                                   maxmatch=0)
    if (len(iindex) == 0):
        return None
    data = None
    for (ira, idec) in zip(gaia_index['ira'][iindex],
                           gaia_index['idec'][iindex]):
        filename = _partition_filename(ira, idec)
        if (os.path.isfile(filename)):
            gaia = fitsio.read(filename, ext=1)
            (igaia, i0, dgaia) = spheregroup.spherematch(gaia['ra'],
                                                         gaia['dec'],
                                                         ra_arr,
                                                         dec_arr,
                                                         np.float64(radius),
                                                         maxmatch=0)
            if (len(igaia) > 0):
                tmp_data = gaia[igaia]
                if (data is None):
                    data = np.zeros(0, dtype=gaia[0].dtype)
                data = np.append(data, tmp_data)
    return data
    def _iSEDfitMatch(self): 
        ''' Match the GroupCat galaxies with iSEDfit galaxy properties from 
        John Moustakas's MFData objects. The matching is done using PyDL's 
        spherematch
        '''
        # import SDSS MFdata catalog
        mfdata = mrdfits(code_dir(), 'dat/observations/mfdata_all_supergrid01_sdss.fits.gz') 
        spherematch_time = time.time()
        match1, match2, d_match = spherematch(
                self.ra, self.dec, mfdata.ra, mfdata.dec, 0.001)
        print 'spherematch took ', time.time() - spherematch_time

        iSEDfit_mass = np.repeat(-999., len(self.ra))
        iSEDfit_SFR = np.repeat(-999., len(self.ra))
        iSEDfit_SSFR = np.repeat(-999., len(self.ra))
        
        if np.max(self.z[match1] - mfdata.z[match2]) > 0.1: 
            raise ValueError
        #wrong = np.argmax(self.z[match1] - mfdata.z[match2])
        #print self.ra[match1[wrong]], self.dec[match1[wrong]], self.z[match1[wrong]]
        #print mfdata.ra[match2[wrong]], mfdata.dec[match2[wrong]], mfdata.z[match2[wrong]]
        iSEDfit_mass[match1] = mfdata.mass[match2]
        iSEDfit_SFR[match1] = mfdata.sfr[match2]
        iSEDfit_SSFR[match1] = iSEDfit_SFR[match1]-iSEDfit_mass[match1]
        
        setattr(self, 'iSEDfit_mass', iSEDfit_mass) 
        setattr(self, 'iSEDfit_sfr', iSEDfit_SFR) 
        setattr(self, 'iSEDfit_ssfr', iSEDfit_SSFR) 
        return None
Beispiel #3
0
 def ClosestStarsPoint(self, cRA, cDec, radius):
     """
     Returns indices of stars that is
     within radius of the center_index star
     """
     
     tRA = self.tab['RA']    
     tDec = self.tab['Dec']
     
     indices = spherematch(tRA, tDec, cRA, cDec, maxmatch = 0, matchlength = radius)[0]
     
     return SubTable(self,self.tab['Index'][indices])
Beispiel #4
0
    def ClosestStarsPoint(self, cRA, cDec, radius):
        """
        Returns indices of stars that is
        within radius of the center_index star
        """

        tRA = self.tab['RA']
        tDec = self.tab['Dec']

        indices = spherematch(tRA,
                              tDec,
                              cRA,
                              cDec,
                              maxmatch=0,
                              matchlength=radius)[0]

        return SubTable(self, self.tab['Index'][indices])
Beispiel #5
0
def BOSS_fibercollision(ra, dec):
    ''' apply BOSS fiber collisions 
    '''
    fib_angscale = 0.01722  # 62'' fiber collision angular scale
    t0 = time.time()
    m1, m2, d12 = spherematch(ra, dec, ra, dec, fib_angscale, maxmatch=2)
    print('spherematch takes %f sec' % (time.time() - t0))

    notitself = (d12 > 0.0)

    # only ~60% of galaxies within the angular scale are fiber collided
    # since 40% are in overlapping regions with substantially lower
    # fiber collision rates
    notoverlap = (np.random.uniform(size=len(m1)) > 0.6)

    fibcollided = np.zeros(len(ra)).astype(bool)
    fibcollided[m1[notitself & notoverlap]] = True
    return fibcollided
    def ClosestStars(self, center_index, radius):
        """
        Returns "SubTable" object of stars that is
        within radius of the center_index star
        """

        tRA = self.tab['RA']
        tDec = self.tab['Dec']
        cRA = np.array([self.tab['RA'][center_index]])
        cDec = np.array([self.tab['Dec'][center_index]])

        indices = spherematch(tRA,
                              tDec,
                              cRA,
                              cDec,
                              maxmatch=0,
                              matchlength=radius)[0]

        return SubTable(self, self.tab['Index'][indices])
    def _Match_OtherSFR(self, lit='salim2016'): 
        ''' Match galaxies with UV SSFR and stellar masses from Salim et al. (2007). 
        Matching is done using PyDL's spherematch 
        '''
        # import other data 
        if lit == 'salim2016': 
            nsa_file = ''.join(['/mount/sirocco1/hahn/cenque/', 'GSWLC-A1.dat'])
            nsa_ra, nsa_dec, nsa_z, nsa_mass, nsa_sfr = np.loadtxt(nsa_file, unpack=True, usecols=[5, 6, 7, 9, 11])
        elif lit == 'uv': 
            nsa_file = ''.join(['/mount/sirocco1/hahn/cenque/', 'nsaid_ra_dec_z_mass_uvssfr']) 
            nsa_ra, nsa_dec, nsa_z, nsa_mass, nsa_uvssfr = np.loadtxt(nsa_file, unpack=True, usecols=[1,2,3,4,5])
            print nsa_uvssfr.min(), nsa_uvssfr.max(), np.mean(nsa_uvssfr) 
            nsa_mass = np.log10(nsa_mass)
            nsa_sfr = (nsa_mass + nsa_uvssfr)  # UV SFR

        spherematch_time = time.time()
        match1, match2, d_match = spherematch(self.ra, self.dec, nsa_ra, nsa_dec, 0.001)
        print 'spherematch took ', time.time() - spherematch_time
        print np.float(len(match1))/np.float(len(self.ra))
        
        salim_match = np.repeat(-999, len(self.ra))
        salim_mass = np.repeat(-999., len(self.ra))
        salim_SFR = np.repeat(-999., len(self.ra))
        salim_SSFR = np.repeat(-999., len(self.ra))
        if np.max(self.z[match1] - nsa_z[match2]) > 0.05: 
            raise ValueError
        salim_match[match1] = match2
        if lit == 'uv': 
            salim_mass[match1] = self.mass[match1]
        else: 
            salim_mass[match1] = nsa_mass[match2]
        salim_SFR[match1] = nsa_sfr[match2]
        salim_SSFR[match1] = salim_SFR[match1] - salim_mass[match1]
        
        setattr(self, lit+'_match', salim_match) 
        setattr(self, lit+'_mass', salim_mass) 
        setattr(self, lit+'_sfr', salim_SFR) 
        setattr(self, lit+'_ssfr', salim_SSFR) 
        return None
Beispiel #8
0
def Build_MPAJHU_TinkerCatalog_ASCII(Mrcut=18):
    ''' Append MPA-JHU SSFR values to the Tinker et al. (2011) catalog.
    The main purpose is to try to reproduce the Kauffmann et al. (2013) results. 
    Galaxies are matched to each other through spherematch. 
    '''
    # import Tinker et al. (2011) catalog with specified Mr cut
    catalog = TinkerCatalog(Mrcut=Mrcut)

    # import MPA-JHU catalog
    mpajhu_gals = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_info_dr7_v5_2.fit']))
    # SFR total
    mpajhu_sfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totsfr_dr7_v5_2.fits']))
    # SFR fiber
    mpajhu_sfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibsfr_dr7_v5_2.fits']))
    # SSFR total
    mpajhu_ssfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totspecsfr_dr7_v5_2.fits']))
    # SSFR fiber
    mpajhu_ssfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibspecsfr_dr7_v5_2.fits']))
    # stellar mass total
    mpajhu_masstot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'totlgm_dr7_v5_2.fit']))
    # stellar mass fiber
    mpajhu_massfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'fiblgm_dr7_v5_2.fit']))

    t_spherematch = time.time()
    match = spherematch(catalog['ra'], catalog['dec'], mpajhu_gals.ra,
                        mpajhu_gals.dec, 0.000833333)
    print 'Spherematch with matchlenght = ', 0.000833333
    print 'takes ', time.time() - t_spherematch, 'seconds'
    print 1. - np.float(len(match[0])) / np.float(len(
        catalog['ra'])), 'of the VAGC galaxies'
    print 'do not have matches, likely due to fiber collisions'
    if len(match[0]) != len(np.unique(match[0])):
        raise ValueError

    # save the MPAJHU indices, jsut in case
    catalog['mpajhu_index'] = np.repeat(-999, len(catalog['ra']))
    catalog['mpajhu_index'][match[0]] = match[1]

    # append SFR, SSFR, and mass values to catalog
    for col in [
            'sfr_tot_mpajhu', 'sfr_fib_mpajhu', 'ssfr_tot_mpajhu',
            'ssfr_fib_mpajhu', 'mass_tot_mpajhu', 'mass_fib_mpajhu'
    ]:  # initiate arrays
        catalog[col] = np.repeat(-999., len(catalog['ra']))

    catalog['sfr_tot_mpajhu'][match[0]] = mpajhu_sfrtot.median[match[1]]
    catalog['sfr_fib_mpajhu'][match[0]] = mpajhu_sfrfib.median[match[1]]
    catalog['ssfr_tot_mpajhu'][match[0]] = mpajhu_ssfrtot.median[match[1]]
    catalog['ssfr_fib_mpajhu'][match[0]] = mpajhu_ssfrfib.median[match[1]]
    catalog['mass_tot_mpajhu'][match[0]] = mpajhu_masstot.median[match[1]]
    catalog['mass_fib_mpajhu'][match[0]] = mpajhu_massfib.median[match[1]]
    print mpajhu_massfib.median[match[1]]

    first_cols = [
        'id_gal', 'ra', 'dec', 'z', 'mass', 'sfr', 'ssfr', 'mass_tot_mpajhu',
        'mass_fib_mpajhu', 'sfr_tot_mpajhu', 'sfr_fib_mpajhu',
        'ssfr_tot_mpajhu', 'ssfr_fib_mpajhu'
    ]

    data_fmt = []
    data_list = []
    for i_key, key in enumerate(first_cols):
        data_list.append(catalog[key])
        if key == 'id_gal':
            data_fmt.append('%i')
        else:
            data_fmt.append('%10.5f')

    later_cols = []
    for key in catalog.keys():
        if key not in first_cols:
            later_cols.append(key)

    for key in later_cols:
        data_list.append(catalog[key])
        if 'id' in key:
            data_fmt.append('%i')
        elif 'index' in key:
            data_fmt.append('%i')
        elif key == 'n_sersic':
            data_fmt.append('%i')
        elif key == 'stellmass':
            data_fmt.append('%1.5e')
        else:
            data_fmt.append('%10.5f')

    str_header = ', '.join(first_cols + later_cols)

    M_cut = Tinker_Masscut(Mrcut)
    mpajhu_tinker_file = ''.join([
        UT.dir_dat(), 'tinker2011catalogs/', 'GroupCat.Mr',
        str(Mrcut), '.Mass',
        str(M_cut), '.D360.MPAJHU.dat'
    ])
    np.savetxt(mpajhu_tinker_file, (np.vstack(np.array(data_list))).T,
               fmt=data_fmt,
               delimiter='\t',
               header=str_header)
    return None
Beispiel #9
0
def Build_VAGCdr72_MPAJHU(Ascii=False):
    ''' Build VAGC dr72 with cross referenced MPAJHU stellar masses 
    and SSFRs.
    '''
    # import VAGC dr72bright34
    vagc_dr72 = VAGCdr72bright34_Catalog()
    print len(vagc_dr72['ra']), ', VAGC dr72bright34 galaxies'

    # import MPA-JHU catalog
    mpajhu_gals = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_info_dr7_v5_2.fit']))
    # SFR total
    mpajhu_sfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totsfr_dr7_v5_2.fits']))
    # SFR fiber
    mpajhu_sfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibsfr_dr7_v5_2.fits']))
    # SSFR total
    mpajhu_ssfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totspecsfr_dr7_v5_2.fits']))
    # SSFR fiber
    mpajhu_ssfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibspecsfr_dr7_v5_2.fits']))
    # stellar mass total
    mpajhu_masstot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'totlgm_dr7_v5_2.fit']))
    # stellar mass fiber
    mpajhu_massfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'fiblgm_dr7_v5_2.fit']))

    catalog = {}
    catalog['ra'] = vagc_dr72['ra']
    catalog['dec'] = vagc_dr72['dec']
    catalog['z'] = vagc_dr72['z']
    for i_band, band in enumerate(['u', 'g', 'r', 'i', 'z']):
        catalog['M_' + band] = vagc_dr72['M_' + band]

    t_spherematch = time.time()
    match = spherematch(catalog['ra'], catalog['dec'], mpajhu_gals.ra,
                        mpajhu_gals.dec, 0.000833333)
    print 'Spherematch with matchlenght = ', 0.000833333
    print 'takes ', time.time() - t_spherematch, 'seconds'
    print 1. - np.float(len(match[0])) / np.float(len(
        catalog['ra'])), 'of the VAGC galaxies'
    print 'do not have matches'
    if len(match[0]) != len(np.unique(match[0])):
        raise ValueError

    # save the MPAJHU indices, jsut in case
    catalog['mpajhu_index'] = np.repeat(-999, len(catalog['ra']))
    catalog['mpajhu_index'][match[0]] = match[1]

    # append SFR, SSFR, and mass values to catalog
    for col in [
            'sfr_tot', 'sfr_fib', 'ssfr_tot', 'ssfr_fib', 'mass_tot',
            'mass_fib'
    ]:  # initiate arrays
        catalog[col] = np.repeat(-999., len(catalog['ra']))

    catalog['sfr_tot'][match[0]] = mpajhu_sfrtot.median[match[1]]
    catalog['sfr_fib'][match[0]] = mpajhu_sfrfib.median[match[1]]
    catalog['ssfr_tot'][match[0]] = mpajhu_ssfrtot.median[match[1]]
    catalog['ssfr_fib'][match[0]] = mpajhu_ssfrfib.median[match[1]]
    catalog['mass_tot'][match[0]] = mpajhu_masstot.median[match[1]]
    catalog['mass_fib'][match[0]] = mpajhu_massfib.median[match[1]]

    mpajhu_file = ''.join(
        [UT.dir_dat(), 'vagc/', 'VAGCdr72.MPAJHU.nocut.hdf5'])

    f = h5py.File(mpajhu_file, 'w')
    grp = f.create_group('data')
    for key in catalog.keys():
        grp.create_dataset(key, data=catalog[key])
    f.close()

    if Ascii:  # write to Ascii (for jeremy)
        mpajhu_file = ''.join(
            [UT.dir_dat(), 'vagc/', 'VAGCdr72.MPAJHU.nocut.dat'])
        column_order = [
            'ra', 'dec', 'z', 'mass_tot', 'sfr_tot', 'ssfr_tot', 'mass_fib',
            'sfr_fib', 'ssfr_fib'
        ]
        data_list = []
        data_fmt = ['%10.5f' for i in range(len(column_order))]
        str_header = ''
        for col in column_order:
            data_list.append(catalog[col])
            if 'mass' in col:
                str_header += ' ' + col + ' (Msun),'
            elif 'sfr' in col:
                if 'ssfr' not in col:
                    str_header += ' ' + col + ' (Msun/yr),'
                else:
                    str_header += ' ' + col + ','
            else:
                str_header += ' ' + col + ','
        np.savetxt(mpajhu_file, (np.vstack(np.array(data_list))).T,
                   fmt=data_fmt,
                   delimiter='\t',
                   header=str_header)
    return None
Beispiel #10
0
def Build_KauffmannParent():
    ''' Try to create the parent sample of Kauffmann et al.(2013) 
    '''
    # import VAGC dr72bright34
    vagc_dr72 = VAGCdr72bright34_Catalog()

    # import MPA-JHU catalog
    mpajhu_gals = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_info_dr7_v5_2.fit']))
    # SFR total
    mpajhu_sfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totsfr_dr7_v5_2.fits']))
    # SFR fiber
    mpajhu_sfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibsfr_dr7_v5_2.fits']))
    # SSFR total
    mpajhu_ssfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totspecsfr_dr7_v5_2.fits']))
    # SSFR fiber
    mpajhu_ssfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibspecsfr_dr7_v5_2.fits']))
    # stellar mass total
    mpajhu_masstot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'totlgm_dr7_v5_2.fit']))
    # stellar mass fiber
    mpajhu_massfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'fiblgm_dr7_v5_2.fit']))

    catalog = {}
    catalog['ra'] = vagc_dr72['ra']
    catalog['dec'] = vagc_dr72['dec']
    catalog['z'] = vagc_dr72['z']
    for i_band, band in enumerate(['u', 'g', 'r', 'i', 'z']):
        catalog['M_' + band] = vagc_dr72['M_' + band]

    # pre cut
    cut_z = (catalog['z'] > 0.017) & (catalog['z'] < 0.03)
    pre_cuts = np.where(cut_z)  #& cut_stellarmass & cut_absmag)
    for key in catalog.keys():
        catalog[key] = catalog[key][pre_cuts]

    t_spherematch = time.time()
    match = spherematch(catalog['ra'], catalog['dec'], mpajhu_gals.ra,
                        mpajhu_gals.dec, 0.000833333)
    print 'Spherematch with matchlenght = ', 0.000833333
    print 'takes ', time.time() - t_spherematch, 'seconds'
    print 1. - np.float(len(match[0])) / np.float(len(
        catalog['ra'])), 'of the VAGC galaxies'
    print 'do not have matches'
    if len(match[0]) != len(np.unique(match[0])):
        raise ValueError

    # save the MPAJHU indices, jsut in case
    catalog['mpajhu_index'] = np.repeat(-999, len(catalog['ra']))
    catalog['mpajhu_index'][match[0]] = match[1]

    # append SFR, SSFR, and mass values to catalog
    for col in [
            'sfr_tot_mpajhu', 'sfr_fib_mpajhu', 'ssfr_tot_mpajhu',
            'ssfr_fib_mpajhu', 'mass_tot_mpajhu', 'mass_fib_mpajhu'
    ]:  # initiate arrays
        catalog[col] = np.repeat(-999., len(catalog['ra']))

    catalog['sfr_tot_mpajhu'][match[0]] = mpajhu_sfrtot.median[match[1]]
    catalog['sfr_fib_mpajhu'][match[0]] = mpajhu_sfrfib.median[match[1]]
    catalog['ssfr_tot_mpajhu'][match[0]] = mpajhu_ssfrtot.median[match[1]]
    catalog['ssfr_fib_mpajhu'][match[0]] = mpajhu_ssfrfib.median[match[1]]
    catalog['mass_tot_mpajhu'][match[0]] = mpajhu_masstot.median[match[1]]
    catalog['mass_fib_mpajhu'][match[0]] = mpajhu_massfib.median[match[1]]

    # kauffmann et al.(2013) cuts
    cut_stellarmass = (catalog['mass_tot_mpajhu'] > 9.25)
    cut_absmag = (catalog['M_r'] < -16.) & (catalog['M_r'] > -24.)
    cut_match = (catalog['mpajhu_index'] != -999)

    final_cuts = np.where(cut_stellarmass & cut_absmag & cut_match)
    for key in catalog.keys():
        catalog[key] = catalog[key][final_cuts]

    mpajhu_file = ''.join(
        [UT.dir_dat(), 'vagc/', 'VAGCdr72.Kauff2013cut.hdf5'])

    f = h5py.File(mpajhu_file, 'w')
    grp = f.create_group('data')
    for key in catalog.keys():
        grp.create_dataset(key, data=catalog[key])
    f.close()
    return None
Beispiel #11
0
def Build_MPAJHU_TinkerCatalog(Mrcut=18):
    ''' Append MPA-JHU SSFR values to the Tinker et al. (2011) catalog.
    The main purpose is to try to reproduce the Kauffmann et al. (2013) results. 
    Galaxies are matched to each other through spherematch. 
    '''
    # import Tinker et al. (2011) catalog with specified Mr cut
    catalog = TinkerCatalog(Mrcut=Mrcut)

    # import MPA-JHU catalog
    mpajhu_gals = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_info_dr7_v5_2.fit']))
    # SFR total
    mpajhu_sfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totsfr_dr7_v5_2.fits']))
    # SFR fiber
    mpajhu_sfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibsfr_dr7_v5_2.fits']))
    # SSFR total
    mpajhu_ssfrtot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_totspecsfr_dr7_v5_2.fits']))
    # SSFR fiber
    mpajhu_ssfrfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'gal_fibspecsfr_dr7_v5_2.fits']))
    # stellar mass total
    mpajhu_masstot = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'totlgm_dr7_v5_2.fit']))
    # stellar mass fiber
    mpajhu_massfib = mrdfits(''.join(
        [UT.dir_dat(), 'mpa_jhu/', 'fiblgm_dr7_v5_2.fit']))

    t_spherematch = time.time()
    match = spherematch(catalog['ra'], catalog['dec'], mpajhu_gals.ra,
                        mpajhu_gals.dec, 0.000833333)
    print 'Spherematch with matchlenght = ', 0.000833333
    print 'takes ', time.time() - t_spherematch, 'seconds'
    print 1. - np.float(len(match[0])) / np.float(len(
        catalog['ra'])), 'of the VAGC galaxies'
    print 'do not have matches, likely due to fiber collisions'
    if len(match[0]) != len(np.unique(match[0])):
        raise ValueError

    # save the MPAJHU indices, jsut in case
    catalog['mpajhu_index'] = np.repeat(-999, len(catalog['ra']))
    catalog['mpajhu_index'][match[0]] = match[1]

    # append SFR, SSFR, and mass values to catalog
    for col in [
            'sfr_tot_mpajhu', 'sfr_fib_mpajhu', 'ssfr_tot_mpajhu',
            'ssfr_fib_mpajhu', 'mass_tot_mpajhu', 'mass_fib_mpajhu'
    ]:  # initiate arrays
        catalog[col] = np.repeat(-999., len(catalog['ra']))

    catalog['sfr_tot_mpajhu'][match[0]] = mpajhu_sfrtot.median[match[1]]
    catalog['sfr_fib_mpajhu'][match[0]] = mpajhu_sfrfib.median[match[1]]
    catalog['ssfr_tot_mpajhu'][match[0]] = mpajhu_ssfrtot.median[match[1]]
    catalog['ssfr_fib_mpajhu'][match[0]] = mpajhu_ssfrfib.median[match[1]]
    catalog['mass_tot_mpajhu'][match[0]] = mpajhu_masstot.median[match[1]]
    catalog['mass_fib_mpajhu'][match[0]] = mpajhu_massfib.median[match[1]]

    # trim galaxies without matches
    hasmatch = np.where(catalog['mpajhu_index'] != -999)
    for key in catalog.keys():
        key_val = catalog[key]
        catalog[key] = key_val[hasmatch]
    catalog['mpajhu_tinker_index'] = hasmatch[0]

    M_cut = Tinker_Masscut(Mrcut)
    mpajhu_tinker_file = ''.join([
        UT.dir_dat(), 'tinker2011catalogs/', 'GroupCat.Mr',
        str(Mrcut), '.Mass',
        str(M_cut), '.D360.MPAJHU.hdf5'
    ])

    f = h5py.File(mpajhu_tinker_file, 'w')
    grp = f.create_group('data')
    for key in catalog.keys():
        grp.create_dataset(key, data=catalog[key])

    f.close()
    return None
Beispiel #12
0
def _fanuc_check(plateid=None):
    """Read in plDrillPos file and make sure holes do not overlap

    Parameters
    ----------
    plateid : np.int32, int
        plate ID

    Returns
    -------
    ok : boolean
        True if OK, False if not

    Notes 
    -----
    Assumes 5 arcmin is biggest thing it needs to check

    For the off-axis acquisition camera at LCO,
     it assumes a 55 x 40 mm footprint.
    """
    offaxis_xsize = 55.
    offaxis_ysize = 40.
    drillpos_template = 'plDrillPos-{plate}.par' + post_str
    drillpos_name = drillpos_template.format(plate=plateid)
    dpos = yanny.yanny(drillpos_name)
    (m1, m2, d12) = spheregroup.spherematch(dpos['DRILLPOS']['ra'],
                                            dpos['DRILLPOS']['dec'],
                                            dpos['DRILLPOS']['ra'],
                                            dpos['DRILLPOS']['dec'],
                                            5. / 60., maxmatch=0)
    for indx1, indx2 in zip(m1, m2):
        if(indx1 != indx2):
            dx = (dpos['DRILLPOS']['xDrill'][indx1] -
                  dpos['DRILLPOS']['xDrill'][indx2])
            dy = (dpos['DRILLPOS']['yDrill'][indx1] -
                  dpos['DRILLPOS']['yDrill'][indx2])
            d12 = np.sqrt(dx**2 + dy**2)
            limit = 0.5 * (dpos['DRILLPOS']['holeDiam'][indx1] +
                           dpos['DRILLPOS']['holeDiam'][indx2])
            if(d12 < limit):
                conflict = True
                holeType1 = dpos['DRILLPOS']['holeType'][indx1].decode()
                holeType2 = dpos['DRILLPOS']['holeType'][indx2].decode()
                # Special case for acquisition camera
                if(holeType1 == "ACQUISITION_OFFAXIS" or
                   holeType2 == "ACQUISITION_OFFAXIS"):
                    conflict = False
                    x1 = dpos['DRILLPOS']['xDrill'][indx1]
                    x2 = dpos['DRILLPOS']['xDrill'][indx2]
                    y1 = dpos['DRILLPOS']['yDrill'][indx1]
                    y2 = dpos['DRILLPOS']['yDrill'][indx2]
                    if(holeType1 == "ACQUISITION_OFFAXIS"):
                        holeDiam = dpos['DRILLPOS']['holeDiam'][indx2]
                    else:
                        holeDiam = dpos['DRILLPOS']['holeDiam'][indx1]
                    if(np.abs(x1 - x2) < 0.5 * (offaxis_xsize + holeDiam) and
                       np.abs(y1 - y2) < 0.5 * (offaxis_ysize + holeDiam)):
                        conflict = True
                if(conflict is True):
                    return(False)
    return(True)
Beispiel #13
0
    def _Build(self, field, dr_gama=3, dr_legacy=7, silent=True):
        ''' Get Legacy Survey photometry for objects in the GAMA DR`dr_gama`
        photo+spec objects from the sweep files. This is meant to run on nersc
        but you can also manually download the sweep files and specify the dir
        where the sweep files are located in. 
        '''
        from pydl.pydlutils.spheregroup import spherematch
        if dr_legacy == 5:
            sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
            sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
            tractor_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/'
        elif dr_legacy == 7:
            sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
            sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
            tractor_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
            tractor_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
        elif dr_legacy == 8:
            sweep_n_dir = \
                    '/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/sweep/8.0/'
            sweep_s_dir = \
                    '/global/project/projectdirs/cosmo/data/legacysurvey/dr8/south/sweep/8.0/'
            tractor_n_dir = \
                    '/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/tractor/'
            tractor_s_dir = \
                    '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/south/tractor/'

        # read in the names of the sweep files
        fsweep = ''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat'])
        if not os.path.isfile(fsweep):
            _ = self._getSweeps(field, silent=silent)
        sweep_files = np.loadtxt(fsweep, unpack=True, usecols=[0], dtype='S')
        if not silent:
            print("there are %i sweep files in the %s GAMA region" %
                  (len(sweep_files), field))

        # read in GAMA objects
        gama = GAMA()
        gama_data = gama.Read(field, data_release=dr_gama, silent=silent)

        sweep_dict = {}
        gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict = {}, {}, {}, {}
        # loop through the files and only keep ones that spherematch with GAMA objects
        for i_f, f in enumerate(sweep_files):
            # read in sweep object
            for sweep_dir in [sweep_n_dir, sweep_s_dir]:
                fsweep = os.path.join(sweep_dir, f.decode('unicode_escape'))
                if os.path.isfile(fsweep): break
            sweep = fits.open(fsweep)[1].data
            if not silent: print('matching %s' % fsweep)

            # spherematch the sweep objects with GAMA objects
            if len(sweep['ra']) > len(gama_data['photo']['ra']):
                match = spherematch(sweep['ra'], sweep['dec'],
                                    gama_data['photo']['ra'],
                                    gama_data['photo']['dec'], 0.000277778)
            else:
                match_inv = spherematch(gama_data['photo']['ra'],
                                        gama_data['photo']['dec'], sweep['ra'],
                                        sweep['dec'], 0.000277778)
                match = [match_inv[1], match_inv[0], match_inv[2]]

            if not silent:
                print('%i matches from the %s sweep file' % (len(match[0]), f))

            # save sweep photometry to `sweep_dict`
            for key in sweep.names:
                if i_f == 0:
                    sweep_dict[key.lower()] = sweep[key][match[0]]
                else:
                    sweep_dict[key.lower()] = np.concatenate(
                        [sweep_dict[key.lower()], sweep[key][match[0]]])

            # save matching GAMA data ('photo', 'spec', and kcorrects)
            for gkey, gdict in zip(
                ['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'], [
                    gama_photo_dict, gama_spec_dict, gama_kcorr0_dict,
                    gama_kcorr1_dict
                ]):
                for key in gama_data[gkey].keys():
                    if i_f == 0:
                        gdict[key] = gama_data[gkey][key][match[1]]
                    else:
                        gdict[key] = np.concatenate(
                            [gdict[key], gama_data[gkey][key][match[1]]])

            del sweep  # free memory? (apparently not really)

        if not silent:
            print('========================')
            print('%i objects out of %i GAMA objects mached' %
                  (len(sweep_dict['ra']), len(gama_data['photo']['dec'])))

        assert len(sweep_dict['ra']) == len(gama_photo_dict['ra'])
        assert len(sweep_dict['ra']) == len(gama_spec_dict['ra'])
        assert len(sweep_dict['ra']) == len(gama_kcorr0_dict['mass'])
        assert len(sweep_dict['ra']) == len(gama_kcorr1_dict['mass'])

        # writeout all the GAMA objects without sweep objects
        if not silent:
            nosweep = ~np.in1d(gama_data['photo']['objid'],
                               gama_photo_dict['objid'])
            f_nosweep = ''.join([
                UT.dat_dir(), 'GAMAdr',
                str(dr_gama), '.', field, '.LEGACYdr',
                str(dr_legacy), '.nosweep_match.fits'
            ])
            print('========================')
            print(
                'Writing out RA, Dec of %i GAMA objects without Legacy sweep objects to %s'
                % (np.sum(nosweep), f_nosweep))
            tb = aTable([
                gama_data['photo']['ra'][nosweep],
                gama_data['photo']['dec'][nosweep]
            ],
                        names=('ra', 'dec'))
            tb.meta[
                'COMMENTS'] = 'RA, Dec of GAMA objects without matches in Legacy DR5 sweep'
            tb.write(f_nosweep, format='fits', overwrite=True)
            #np.savetxt(f_nosweep, np.array([gama_data['photo']['ra'], gama_data['photo']['dec']]).T, header='RA, Dec')

        # read apfluxes from tractor catalogs
        try:
            apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
                                                 sweep_dict['objid'],
                                                 tractor_dir=tractor_n_dir)
        except ValueError:
            apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
                                                 sweep_dict['objid'],
                                                 tractor_dir=tractor_s_dir)
        assert apflux_dict['apflux_g'].shape[0] == len(sweep_dict['brickname'])

        # save data to hdf5 file
        if not silent:
            print('writing to %s' %
                  self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy))
        f = h5py.File(self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy),
                      'w')
        grp_gp = f.create_group('gama-photo')
        grp_gs = f.create_group('gama-spec')
        grp_k0 = f.create_group('gama-kcorr-z0.0')
        grp_k1 = f.create_group('gama-kcorr-z0.1')
        grp_lp = f.create_group('legacy-photo')

        for key in sweep_dict.keys():
            self._h5py_create_dataset(grp_lp, key, sweep_dict[key])
        for key in apflux_dict.keys():  # additional apflux data.
            self._h5py_create_dataset(grp_lp, key, apflux_dict[key])
        for key in gama_photo_dict.keys():
            grp_gp.create_dataset(key, data=gama_photo_dict[key])
        for key in gama_spec_dict.keys():
            grp_gs.create_dataset(key, data=gama_spec_dict[key])
        for key in gama_kcorr0_dict.keys():
            grp_k0.create_dataset(key, data=gama_kcorr0_dict[key])
        for key in gama_kcorr1_dict.keys():
            grp_k1.create_dataset(key, data=gama_kcorr1_dict[key])
        f.close()
        return None
Beispiel #14
0
    def _construct(self, overwrite=False, silent=True):
        ''' construct postprocessed catalog from the ascii files described in
        http://sdss.physics.nyu.edu/lss/dr72/bright/34/lss/README.dr72bright34
        '''
        if not overwrite and os.path.isfile(self.file):
            print("%s already exists; to overwrite specify `overwrite=True`" %
                  self.file)
            return None

        # lss catalog
        flss = os.path.join(os.path.dirname(self.file), 'lss.dr72bright34.dat')
        # photometric data
        fphoto = os.path.join(os.path.dirname(self.file),
                              'photoinfo.dr72bright34.dat')
        # vmax data
        fvmax = os.path.join(os.path.dirname(self.file),
                             'vmax.dr72bright34.dat')

        # read the data
        ra, dec, cz = np.loadtxt(flss, unpack=True, usecols=[3, 4, 5])

        Mu, Mg, Mr, Mi, Mz, mu_50, r50_r90 = np.loadtxt(
            fphoto, unpack=True, usecols=[1, 2, 3, 4, 5, 6, 7])

        vmax, zmin, zmax = np.loadtxt(fvmax, unpack=True, usecols=[1, 2, 3])

        cat, des = {}, {}
        cat['ra'] = ra
        des['ra'] = ['deg', 'right ascension']
        cat['dec'] = dec
        des['dec'] = ['deg', 'declination']
        cat['cz'] = cz
        des['cz'] = ['km/s', 'c * redshift']
        cat['redshift'] = cz / 2.998e5
        des['redshift'] = ['', 'redshift']
        cat['M_u'] = Mu
        des['M_u'] = ['mag', 'u-band absolute magnitude kcorrected to z=0.1']
        cat['M_g'] = Mg
        des['M_g'] = ['mag', 'g-band absolute magnitude kcorrected to z=0.1']
        cat['M_r'] = Mr
        des['M_r'] = ['mag', 'r-band absolute magnitude kcorrected to z=0.1']
        cat['M_i'] = Mi
        des['M_i'] = ['mag', 'i-band absolute magnitude kcorrected to z=0.1']
        cat['M_z'] = Mz
        des['M_z'] = ['mag', 'z-band absolute magnitude kcorrected to z=0.1']
        cat['mu_50'] = mu_50
        des['mu_50'] = [
            '',
            'r-band half-light Petrosian surface brightness, (1+z)^4- and K-corrected'
        ]
        cat['r50_r90'] = r50_r90
        des['r50_r90'] = ['', 'inverse concentration parameter']
        cat['vmax'] = vmax
        des['vmax'] = ['', 'total volume over which this galaxy is observable']
        cat['zmin'] = zmin
        des['zmin'] = ['', 'min redshift observable for a galaxy with absmag']
        cat['zmax'] = zmax
        des['zmax'] = ['', 'max redshift observable for a galaxy with absmag']

        if self.cross_nsa:
            from astropy import units as U
            from pydl.pydlutils.spheregroup import spherematch

            # read NSA
            nsa = Astrologs('nsa')

            # now lets spherematch VAGC with NSA with 3'' match length
            match_length = (3 * U.arcsec).to(U.degree).value
            m_nsa, m_vagc, dmatch = spherematch(nsa.data['ra'],
                                                nsa.data['dec'],
                                                cat['ra'],
                                                cat['dec'],
                                                match_length,
                                                maxmatch=0)
            if not silent:
                print('%i of %i VAGC galaxies have NSA matches' %
                      (len(m_vagc), len(cat['ra'])))

            i_nsa = np.zeros(len(ra)).astype(int)
            for i in range(len(cat['ra'])):
                targ = (m_vagc == i)
                if np.sum(targ) > 1:
                    zmatch = (np.abs(nsa.data['redshift'][m_nsa[targ]] -
                                     cat['redshift'][i]) < 0.001)
                    i_nsa[i] = m_nsa[targ][zmatch][0]
                elif np.sum(targ) == 0:
                    i_nsa[i] = -999
                else:
                    i_nsa[i] = m_nsa[targ]
            hasmatch = (i_nsa != -999)

            for name in nsa.data.keys():
                nsa_col_data = nsa.data[name]

                if 'S' in str(nsa_col_data.dtype):
                    blank = np.array(['-999'])
                    blank.astype(nsa_col_data.dtype)
                else:
                    blank = np.array([-999.])
                    blank.astype(nsa_col_data.dtype)

                empty_shape = list(nsa_col_data.shape)
                empty_shape[0] = len(cat['ra'])
                empty = np.tile(blank[0], empty_shape)
                empty[hasmatch] = nsa_col_data[i_nsa[hasmatch]]

                cat['NSA_%s' % name] = empty
                des['NSA_%s' % name] = nsa.meta[name]
                des['NSA_%s' %
                    name][1] = 'from NSA; ' + des['NSA_%s' % name][1]

        # save to hdf5
        self._save_to_hdf5(cat, self.file, meta=des, silent=silent)
        return None
Beispiel #15
0
    def _construct(self, overwrite=False, silent=True):
        ''' construct postprocessed catalogs from Jeremy's *.galdata_corr and *.prob
        files 

        *.prob is the galaxy catalog with probability of being a satellite
        (official stats assume >0.5 is a satellite).

        *.galdata_corr are galaxy properties. This file is in the same order
         as the *.prob file

        '''
        if not overwrite and os.path.isfile(self.file):
            print("%s already exists; to overwrite specify `overwrite=True`" %
                  self.file)
            return None

        fprob = os.path.join(os.path.dirname(self.file),
                             'pca_groups_M%s.prob' % self.mlim)
        fgald = os.path.join(os.path.dirname(self.file),
                             'pca_groups_M%s.galdata_corr' % self.mlim)

        # read in .prob file, which has the following data columns
        # 1) foo
        # 2) gal id
        # 3) group id
        # 4) id of central galaxy
        # 5) r-band magnitude h=1
        # 6) P_sat (>0.5 means a satellite)
        # 7) halo mass [Msol/h]
        # 8) foo
        # 9) foo
        # 10) foo
        # 11) projected separation of gal from central, in units of Rhalo
        # 12) projected separation of gal in units of radians
        # 13) angular radius of halo
        galid, grpid, cenid, rmag, psat, mhalo, sep_rhalo, sep_rad, r_halo = \
                np.loadtxt(fprob, unpack=True, usecols=[1, 2, 3, 4, 5, 6, 10, 11, 12])

        # read in .galdata_corr file, which has the following data columns
        # this file is line matched to the prob file
        # 1) foo
        # 2) gal id
        # 3) M_r
        # 4) M_g
        # 5) cz [km/s]
        # 6) Dn4000  (from MPA/JHU catalog)
        # 7) H_delta EW (same source)
        # 8) log sSFR (1/yr same source, but uses kcorrect stellar mass for "s")
        # 9) stellar mass (Msol/h^2, from kcorrect)
        # 10) ra
        # 11) dec
        # 12) velocity dispersion (from VAGC)
        # 13) signal to noise of spectrum (from VAGC)
        # 14) sersic index (from VAGC)
        Mg, cz, Dn4000, Hdel_ew, logSSFR, Ms, ra, dec, vdisp, sn, sersic = \
                np.loadtxt(fgald, unpack=True, usecols=range(3,14))

        cat, des = {}, {}
        cat['galid'] = galid
        des['galid'] = ['', 'galaxy id']
        cat['groupid'] = grpid
        des['groupid'] = ['', 'group id']
        cat['centralid'] = cenid
        des['centralid'] = ['', 'id of central galaxy']
        cat['M_r'] = rmag
        des['M_r'] = ['mag', 'r-band magnitude h=1']
        cat['M_g'] = Mg
        des['M_g'] = ['mag', 'g-band magnitude h=1']
        cat['P_sat'] = psat
        des['P_sat'] = ['', 'P_sat > 0.5 are satellite']
        cat['iscen'] = (psat <= 0.5)  # is central
        des['iscen'] = ['', 'True: central; False: satellite']
        cat['M_halo'] = mhalo
        des['M_halo'] = ['Msun/h', 'halo mass']
        cat['sep_rhalo'] = sep_rhalo
        des['sep_rhalo'] = ['R_halo', 'proj. sep. from central']
        cat['sep_rad'] = sep_rhalo
        des['sep_rad'] = ['radian', 'proj. sep. from central']
        cat['r_halo'] = r_halo
        des['r_halo'] = ['?', 'angular radius of halo']
        cat['cz'] = cz
        des['cz'] = ['km/s', 'c * redshift']
        cat['redshift'] = cz / 2.998e5
        des['redshift'] = ['', 'redshift']
        cat['Dn4000'] = Dn4000
        des['Dn4000'] = ['', 'Dn4000 from MPAJHU']
        cat['H_delta_EW'] = Hdel_ew
        des['H_delta_EW'] = ['', 'H_delta EW MPAJHU']
        cat['log.ssfr'] = logSSFR
        des['log.ssfr'] = ['dex', 'MPAJHU but using kcorrect M*']
        cat['M_star'] = Ms * 0.7**2
        des['M_star'] = ['Msun', 'kcorrect stellar mass']
        cat['log.M_star'] = np.log10(cat['M_star'])
        des['log.M_star'] = ['dex', 'log stellar mass']
        cat['ra'] = ra * (180. / np.pi)
        des['ra'] = ['deg', 'right ascension']
        cat['dec'] = dec * (180. / np.pi)
        des['dec'] = ['deg', 'declination']
        cat['vdisp'] = vdisp
        des['vdisp'] = ['km/s', 'velocity dispersion from VAGC']
        cat['s2n'] = sn
        des['s2n'] = ['', 'signal-to-noise of spectrum from VAGC']
        cat['sersic'] = sersic
        des['sersic'] = ['', 'sersic index from VAGC']

        if self.cross_nsa:
            from astropy import units as U
            from pydl.pydlutils.spheregroup import spherematch

            # read NSA
            nsa = Astrologs('nsa')

            # now lets spherematch VAGC with NSA with 3'' match length
            match_length = (3 * U.arcsec).to(U.degree).value
            m_nsa, m_vagc, dmatch = spherematch(nsa.data['ra'],
                                                nsa.data['dec'],
                                                cat['ra'],
                                                cat['dec'],
                                                match_length,
                                                maxmatch=0)
            if not silent:
                print('%i of %i VAGC galaxies have NSA matches' %
                      (len(m_vagc), len(cat['ra'])))

            i_nsa = np.zeros(len(ra)).astype(int)
            for i in range(len(cat['ra'])):
                targ = (m_vagc == i)
                if np.sum(targ) > 1:
                    zmatch = (np.abs(nsa.data['redshift'][m_nsa[targ]] -
                                     cat['redshift'][i]) < 0.001)
                    i_nsa[i] = m_nsa[targ][zmatch][0]
                elif np.sum(targ) == 0:
                    i_nsa[i] = -999
                else:
                    i_nsa[i] = m_nsa[targ]
            hasmatch = (i_nsa != -999)

            for name in nsa.data.keys():
                nsa_col_data = nsa.data[name]

                if 'S' in str(nsa_col_data.dtype):
                    blank = np.array(['-999'])
                    blank.astype(nsa_col_data.dtype)
                else:
                    blank = np.array([-999.])
                    blank.astype(nsa_col_data.dtype)

                empty_shape = list(nsa_col_data.shape)
                empty_shape[0] = len(cat['ra'])
                empty = np.tile(blank[0], empty_shape)
                empty[hasmatch] = nsa_col_data[i_nsa[hasmatch]]

                cat['NSA_%s' % name] = empty
                des['NSA_%s' % name] = nsa.meta[name]
                des['NSA_%s' %
                    name][1] = 'from NSA; ' + des['NSA_%s' % name][1]

        # save to hdf5
        self._save_to_hdf5(cat, self.file, meta=des, silent=silent)
        return None