Ejemplo n.º 1
0
def G(age, Z, iso):
    """G for a given age and metallicity"""
    Zs = iso.Zs()
    tZ = Zs[numpy.argmin(numpy.fabs(Z - Zs))]
    p = iso(age, tZ)
    jk = p["J"] - p["Ks"]
    indx = (
        (jk < 0.8)
        * (jk > 0.5)
        * (tZ <= 0.06)
        * (tZ <= rc.jkzcut(jk, upper=True))
        * (tZ >= rc.jkzcut(jk))
        * (p["logg"] >= rc.loggteffcut(10.0 ** p["logTe"], tZ, upper=False))
        * (p["logg"] <= rc.loggteffcut(10.0 ** p["logTe"], tZ, upper=True))
    )
    outG = G_jordi(p["g"], p["g"] - p["z"])
    # Average over the IMF
    sindx = numpy.argsort(p["M_ini"])
    outG = outG[sindx]
    int_IMF = p["int_IMF"][sindx]
    w = (numpy.roll(int_IMF, -1) - int_IMF) / (int_IMF[-1] - int_IMF[0])
    w = w[indx]
    outG = outG[indx]
    return (
        numpy.nansum(w * outG) / numpy.nansum(w),
        numpy.sqrt(numpy.nansum(w * outG ** 2.0) / numpy.nansum(w) - (numpy.nansum(w * outG) / numpy.nansum(w)) ** 2.0),
    )
Ejemplo n.º 2
0
def rgsample(dr='13'):
    data= apread.allStar(main=True,exclude_star_bad=True,exclude_star_warn=True,rmdups=True)
    jk= data['J0']-data['K0']
    z= isodist.FEH2Z(data['METALS'],zsolar=0.017)
    z[z > 0.024]= 0.024
    logg= data['LOGG']
    indx= ((jk >= 0.8)#+(z < rcmodel.jkzcut(jk-0.1,upper=False))
            +(logg > rcmodel.loggteffcut(data['TEFF'],z,upper=True)))
    rgindx=indx*(data['METALS'] > -.8)
    return data[rgindx]
Ejemplo n.º 3
0
def rgsample(dr="13"):
    data = apread.allStar(main=True, exclude_star_bad=True, exclude_star_warn=True, dr=dr)  # rmdups=True,dr=dr)
    jk = data["J0"] - data["K0"]
    z = isodist.FEH2Z(data["METALS"], zsolar=0.017)
    z[z > 0.024] = 0.024
    logg = data["LOGG"]
    indx = (jk >= 0.8) + (  # +(z < rcmodel.jkzcut(jk-0.1,upper=False))
        logg > rcmodel.loggteffcut(data["TEFF"], z, upper=True)
    )
    rgindx = indx * (data["METALS"] > -0.8)
    return data[rgindx]
Ejemplo n.º 4
0
def G(age, Z, iso):
    """G for a given age and metallicity"""
    Zs = iso.Zs()
    tZ = Zs[numpy.argmin(numpy.fabs(Z - Zs))]
    p = iso(age, tZ)
    jk = p['J'] - p['Ks']
    indx= (jk < 0.8)*(jk > 0.5)\
        *(tZ <= 0.06)\
        *(tZ <= rc.jkzcut(jk,upper=True))\
        *(tZ >= rc.jkzcut(jk))\
        *(p['logg'] >= rc.loggteffcut(10.**p['logTe'],tZ,upper=False))\
        *(p['logg'] <= rc.loggteffcut(10.**p['logTe'],tZ,upper=True))
    outG = G_jordi(p['g'], p['g'] - p['z'])
    # Average over the IMF
    sindx = numpy.argsort(p['M_ini'])
    outG = outG[sindx]
    int_IMF = p['int_IMF'][sindx]
    w = (numpy.roll(int_IMF, -1) - int_IMF) / (int_IMF[-1] - int_IMF[0])
    w = w[indx]
    outG = outG[indx]
    return (numpy.nansum(w * outG) / numpy.nansum(w),
            numpy.sqrt(
                numpy.nansum(w * outG**2.) / numpy.nansum(w) -
                (numpy.nansum(w * outG) / numpy.nansum(w))**2.))
Ejemplo n.º 5
0
def make_rcsample(parser):
    options,args= parser.parse_args()
    savefilename= options.savefilename
    if savefilename is None:
        #Create savefilename if not given
        savefilename= os.path.join(appath._APOGEE_DATA,
                                   'rcsample_'+appath._APOGEE_REDUX+'.fits')
        print "Saving to %s ..." % savefilename
    #Read the base-sample
    data= apread.allStar(adddist=_ADDHAYDENDIST,rmdups=options.rmdups)
    #Remove a bunch of fields that we do not want to keep
    data= esutil.numpy_util.remove_fields(data,
                                          ['TARGET_ID',
                                           'FILE',
                                           'AK_WISE',
                                           'SFD_EBV',
                                           'SYNTHVHELIO_AVG',
                                           'SYNTHVSCATTER',
                                           'SYNTHVERR',
                                           'SYNTHVERR_MED',
                                           'RV_TEFF',
                                           'RV_LOGG',
                                           'RV_FEH',
                                           'RV_CCFWHM',
                                           'RV_AUTOFWHM',
                                           'SYNTHSCATTER',
                                           'CHI2_THRESHOLD',
                                           'APSTAR_VERSION',
                                           'ASPCAP_VERSION',
                                           'RESULTS_VERSION',
                                           'REDUCTION_ID',
                                           'SRC_H',
                                           'PM_SRC'])
    if not appath._APOGEE_REDUX.lower() == 'current' \
            and int(appath._APOGEE_REDUX[1:]) < 500:
        data= esutil.numpy_util.remove_fields(data,
                                              ['ELEM'])
    #Select red-clump stars
    jk= data['J0']-data['K0']
    z= isodist.FEH2Z(data['METALS'],zsolar=0.017)
    if appath._APOGEE_REDUX.lower() == 'current' \
            or int(appath._APOGEE_REDUX[1:]) > 600:
        from apogee.tools import paramIndx
        if False:
            #Use my custom logg calibration that's correct for the RC
            logg= (1.-0.042)*data['FPARAM'][:,paramIndx('logg')]-0.213
            lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1.
            logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.255
            hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8
            logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.3726
        else:
            #Use my custom logg calibration that's correct on average
            logg= (1.+0.03)*data['FPARAM'][:,paramIndx('logg')]-0.37
            lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1.
            logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.34
            hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8
            logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.256
    else:
        logg= data['LOGG']
    indx= (jk < 0.8)*(jk >= 0.5)\
        *(z <= 0.06)\
        *(z <= rcmodel.jkzcut(jk,upper=True))\
        *(z >= rcmodel.jkzcut(jk))\
        *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\
        *(logg <= rcmodel.loggteffcut(data['TEFF'],z,upper=True))
    data= data[indx]
    #Add more aggressive flag cut
    data= esutil.numpy_util.add_fields(data,[('ADDL_LOGG_CUT',numpy.int32)])
    data['ADDL_LOGG_CUT']= ((data['TEFF']-4800.)/1000.+2.75) > data['LOGG']
    if options.loggcut:
        data= data[data['ADDL_LOGG_CUT'] == 1]
    print "Making catalog of %i objects ..." % len(data)
    #Add distances
    data= esutil.numpy_util.add_fields(data,[('RC_DIST', float),
                                             ('RC_DM', float),
                                             ('RC_GALR', float),
                                             ('RC_GALPHI', float),
                                             ('RC_GALZ', float)])
    rcd= rcmodel.rcdist()
    jk= data['J0']-data['K0']
    z= isodist.FEH2Z(data['METALS'],zsolar=0.017)
    data['RC_DIST']= rcd(jk,z,appmag=data['K0'])*options.distfac
    data['RC_DM']= 5.*numpy.log10(data['RC_DIST'])+10.
    XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
                                data['GLAT'],
                                data['RC_DIST'],
                                degree=True)
    R,phi,Z= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
                                          XYZ[:,1],
                                          XYZ[:,2],
                                          Xsun=8.,Zsun=0.025)
    data['RC_GALR']= R
    data['RC_GALPHI']= phi
    data['RC_GALZ']= Z
    #Save
    fitsio.write(savefilename,data,clobber=True)
    if not options.nostat:
        #Determine statistical sample and add flag
        apo= apogee.select.apogeeSelect()
        statIndx= apo.determine_statistical(data)
        mainIndx= apread.mainIndx(data)
        data= esutil.numpy_util.add_fields(data,[('STAT',numpy.int32),
                                                 ('INVSF',float)])
        data['STAT']= 0
        data['STAT'][statIndx*mainIndx]= 1
        for ii in range(len(data)):
            if (statIndx*mainIndx)[ii]:
                data['INVSF'][ii]= 1./apo(data['LOCATION_ID'][ii],
                                          data['H'][ii])
            else:
                data['INVSF'][ii]= -1.
    if options.nopm:
        fitsio.write(savefilename,data,clobber=True)       
        return None
    #Get proper motions
    from astroquery.vizier import Vizier
    import astroquery
    from astropy import units as u
    import astropy.coordinates as coord
    pmfile= savefilename.split('.')[0]+'_pms.fits'
    if os.path.exists(pmfile):
        pmdata= fitsio.read(pmfile,1)
    else:
        pmdata= numpy.recarray(len(data),
                               formats=['f8','f8','f8','f8','f8','f8','i4'],
                               names=['RA','DEC','PMRA','PMDEC',
                                      'PMRA_ERR','PMDEC_ERR','PMMATCH'])
        rad= u.Quantity(4./3600.,u.degree)
        v= Vizier(columns=['RAJ2000','DEJ2000','pmRA','pmDE','e_pmRA','e_pmDE'])
        for ii in range(len(data)):
            #if ii > 100: break
            sys.stdout.write('\r'+"Getting pm data for point %i / %i" % (ii+1,len(data)))
            sys.stdout.flush()
            pmdata.RA[ii]= data['RA'][ii]
            pmdata.DEC[ii]= data['DEC'][ii]
            co= coord.ICRS(ra=data['RA'][ii],
                           dec=data['DEC'][ii],
                           unit=(u.degree, u.degree))
            trying= True
            while trying:
                try:
                    tab= v.query_region(co,rad,catalog='I/322') #UCAC-4 catalog
                except astroquery.exceptions.TimeoutError:
                    pass
                else:
                    trying= False
            if len(tab) == 0:
                pmdata.PMMATCH[ii]= 0
                print "Didn't find a match for %i ..." % ii
                continue
            else:
                pmdata.PMMATCH[ii]= len(tab)
                if len(tab[0]['pmRA']) > 1:
                    print "Found more than 1 match for %i ..." % ii
            try:
                pmdata.PMRA[ii]= float(tab[0]['pmRA'])
            except TypeError:
                jj= 1
                while len(tab[0]['pmRA']) > 1 and jj < 4: 
                    trad= u.Quantity((4.-jj)/3600.,u.degree)
                    trying= True
                    while trying:
                        try:
                            tab= v.query_region(co,trad,catalog='I/322') #UCAC-4 catalog
                        except astroquery.exceptions.TimeoutError:
                            pass
                        else:
                            trying= False
                    jj+= 1
                if len(tab) == 0:
                    pmdata.PMMATCH[ii]= 0
                    print "Didn't find a unambiguous match for %i ..." % ii
                    continue               
                pmdata.PMRA[ii]= float(tab[0]['pmRA'])
            pmdata.PMDEC[ii]= float(tab[0]['pmDE'])
            pmdata.PMRA_ERR[ii]= float(tab[0]['e_pmRA'])
            pmdata.PMDEC_ERR[ii]= float(tab[0]['e_pmDE'])
            if numpy.isnan(float(tab[0]['pmRA'])): pmdata.PMMATCH[ii]= 0
        sys.stdout.write('\r'+_ERASESTR+'\r')
        sys.stdout.flush()
        fitsio.write(pmfile,pmdata,clobber=True)
        #To make sure we're using the same format below
        pmdata= fitsio.read(pmfile,1)
    #Match proper motions
    try: #These already exist currently, but may not always exist
        data= esutil.numpy_util.remove_fields(data,['PMRA','PMDEC'])
    except ValueError:
        pass
    data= esutil.numpy_util.add_fields(data,[('PMRA', numpy.float),
                                             ('PMDEC', numpy.float),
                                             ('PMRA_ERR', numpy.float),
                                             ('PMDEC_ERR', numpy.float),
                                             ('PMMATCH',numpy.int32)])
    data['PMMATCH']= 0
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'],
                        data['RA'],data['DEC'],
                        2./3600.,maxmatch=1)
    data['PMRA'][m2]= pmdata['PMRA'][m1]
    data['PMDEC'][m2]= pmdata['PMDEC'][m1]
    data['PMRA_ERR'][m2]= pmdata['PMRA_ERR'][m1]
    data['PMDEC_ERR'][m2]= pmdata['PMDEC_ERR'][m1]
    data['PMMATCH'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32)
    pmindx= data['PMMATCH'] == 1
    data['PMRA'][True-pmindx]= -9999.99
    data['PMDEC'][True-pmindx]= -9999.99
    data['PMRA_ERR'][True-pmindx]= -9999.99
    data['PMDEC_ERR'][True-pmindx]= -9999.99
    #Calculate Galactocentric velocities
    data= esutil.numpy_util.add_fields(data,[('GALVR', numpy.float),
                                             ('GALVT', numpy.float),
                                             ('GALVZ', numpy.float)])
    lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True)
    XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True)
    pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'],data['PMDEC'],
                                                data['RA'],data['DEC'],
                                                degree=True)
    vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
                                             pmllpmbb[:,0],
                                             pmllpmbb[:,1],
                                             lb[:,0],lb[:,1],data['RC_DIST'],
                                             degree=True)
    vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0],
                                                vxvyvz[:,1],
                                                vxvyvz[:,2],
                                                8.-XYZ[:,0],
                                                XYZ[:,1],
                                                XYZ[:,2]+0.025,
                                                vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
    data['GALVR']= vR
    data['GALVT']= vT
    data['GALVZ']= vZ
    data['GALVR'][True-pmindx]= -9999.99
    data['GALVT'][True-pmindx]= -9999.99
    data['GALVZ'][True-pmindx]= -9999.99
    #Get proper motions
    pmfile= savefilename.split('.')[0]+'_pms_ppmxl.fits'
    if os.path.exists(pmfile):
        pmdata= fitsio.read(pmfile,1)
    else:
        pmdata= numpy.recarray(len(data),
                               formats=['f8','f8','f8','f8','f8','f8','i4'],
                               names=['RA','DEC','PMRA','PMDEC',
                                      'PMRA_ERR','PMDEC_ERR','PMMATCH'])
        rad= u.Quantity(4./3600.,u.degree)
        v= Vizier(columns=['RAJ2000','DEJ2000','pmRA','pmDE','e_pmRA','e_pmDE'])
        for ii in range(len(data)):
            #if ii > 100: break
            sys.stdout.write('\r'+"Getting pm data for point %i / %i" % (ii+1,len(data)))
            sys.stdout.flush()
            pmdata.RA[ii]= data['RA'][ii]
            pmdata.DEC[ii]= data['DEC'][ii]
            co= coord.ICRS(ra=data['RA'][ii],
                           dec=data['DEC'][ii],
                           unit=(u.degree, u.degree))
            trying= True
            while trying:
                try:
                    tab= v.query_region(co,rad,catalog='I/317') #PPMXL catalog
                except astroquery.exceptions.TimeoutError:
                    pass
                else:
                    trying= False
            if len(tab) == 0:
                pmdata.PMMATCH[ii]= 0
                print "Didn't find a match for %i ..." % ii
                continue
            else:
                pmdata.PMMATCH[ii]= len(tab)
                if len(tab[0]['pmRA']) > 1:
                    pass
                    #print "Found more than 1 match for %i ..." % ii
            try:
                pmdata.PMRA[ii]= float(tab[0]['pmRA'])
            except TypeError:
                #Find nearest
                cosdists= numpy.zeros(len(tab[0]['pmRA']))
                for jj in range(len(tab[0]['pmRA'])):
                    cosdists[jj]= cos_sphere_dist(tab[0]['RAJ2000'][jj],
                                                  tab[0]['DEJ2000'][jj],
                                                  data['RA'][ii],
                                                  data['DEC'][ii])
                closest= numpy.argmax(cosdists)
                pmdata.PMRA[ii]= float(tab[0]['pmRA'][closest])
                pmdata.PMDEC[ii]= float(tab[0]['pmDE'][closest])
                pmdata.PMRA_ERR[ii]= float(tab[0]['e_pmRA'][closest])
                pmdata.PMDEC_ERR[ii]= float(tab[0]['e_pmDE'][closest])
                if numpy.isnan(float(tab[0]['pmRA'][closest])): pmdata.PMMATCH[ii]= 0
            else:
                pmdata.PMDEC[ii]= float(tab[0]['pmDE'])
                pmdata.PMRA_ERR[ii]= float(tab[0]['e_pmRA'])
                pmdata.PMDEC_ERR[ii]= float(tab[0]['e_pmDE'])
                if numpy.isnan(float(tab[0]['pmRA'])): pmdata.PMMATCH[ii]= 0
        sys.stdout.write('\r'+_ERASESTR+'\r')
        sys.stdout.flush()
        fitsio.write(pmfile,pmdata,clobber=True)
        #To make sure we're using the same format below
        pmdata= fitsio.read(pmfile,1)
    #Match proper motions to ppmxl
    data= esutil.numpy_util.add_fields(data,[('PMRA_PPMXL', numpy.float),
                                             ('PMDEC_PPMXL', numpy.float),
                                             ('PMRA_ERR_PPMXL', numpy.float),
                                             ('PMDEC_ERR_PPMXL', numpy.float),
                                             ('PMMATCH_PPMXL',numpy.int32)])
    data['PMMATCH_PPMXL']= 0
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'],
                        data['RA'],data['DEC'],
                        2./3600.,maxmatch=1)
    data['PMRA_PPMXL'][m2]= pmdata['PMRA'][m1]
    data['PMDEC_PPMXL'][m2]= pmdata['PMDEC'][m1]
    data['PMRA_ERR_PPMXL'][m2]= pmdata['PMRA_ERR'][m1]
    data['PMDEC_ERR_PPMXL'][m2]= pmdata['PMDEC_ERR'][m1]
    data['PMMATCH_PPMXL'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32)
    pmindx= data['PMMATCH_PPMXL'] == 1
    data['PMRA_PPMXL'][True-pmindx]= -9999.99
    data['PMDEC_PPMXL'][True-pmindx]= -9999.99
    data['PMRA_ERR_PPMXL'][True-pmindx]= -9999.99
    data['PMDEC_ERR_PPMXL'][True-pmindx]= -9999.99
    #Calculate Galactocentric velocities
    data= esutil.numpy_util.add_fields(data,[('GALVR_PPMXL', numpy.float),
                                             ('GALVT_PPMXL', numpy.float),
                                             ('GALVZ_PPMXL', numpy.float)])
    lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True)
    XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True)
    pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA_PPMXL'],
                                                data['PMDEC_PPMXL'],
                                                data['RA'],data['DEC'],
                                                degree=True)
    vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
                                             pmllpmbb[:,0],
                                             pmllpmbb[:,1],
                                             lb[:,0],lb[:,1],data['RC_DIST'],
                                             degree=True)
    vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0],
                                                vxvyvz[:,1],
                                                vxvyvz[:,2],
                                                8.-XYZ[:,0],
                                                XYZ[:,1],
                                                XYZ[:,2]+0.025,
                                                vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
    data['GALVR_PPMXL']= vR
    data['GALVT_PPMXL']= vT
    data['GALVZ_PPMXL']= vZ
    data['GALVR_PPMXL'][True-pmindx]= -9999.99
    data['GALVT_PPMXL'][True-pmindx]= -9999.99
    data['GALVZ_PPMXL'][True-pmindx]= -9999.99
    #Save
    fitsio.write(savefilename,data,clobber=True)
    return None
Ejemplo n.º 6
0
       (numpy.sum(rcclumpseismo) + numpy.sum(rcnoclumpseismo)))
 print(
     "%i / %i = %i%% APOKASC CLUMP stars are in the RC catalog (COMPLETENESS)"
     % (numpy.sum(rcclumpseismo), numpy.sum(clumpseismo),
        float(numpy.sum(rcclumpseismo)) / numpy.sum(clumpseismo) * 100.))
 print(
     "%i / %i = %i%% APOKASC non-CLUMP stars out of all stars in the RC catalog with evolutionary measurements are in the RC catalog (CONTAMINATION)"
     % (numpy.sum(rcnoclumpseismo), numpy.sum(rcnoclumpseismo) +
        numpy.sum(rcclumpseismo), float(numpy.sum(rcnoclumpseismo)) /
        (numpy.sum(rcnoclumpseismo) + numpy.sum(rcclumpseismo)) * 100.))
 rcindx = data['RC'] == 1
 #Statistics using simple seismo logg cut
 kascLoggTag = 'KASC_RG_LOGG_SCALE_2'
 try:
     clumplogg= (data[kascLoggTag] > 1.8)\
         *(data[kascLoggTag] < rcmodel.loggteffcut(data['TEFF'],
                                                   isodist.FEH2Z(data['METALS'],zsolar=0.017),upper=True))#2.8)
 except ValueError:
     kascLoggTag = 'LOGGRG'
     clumplogg= (data[kascLoggTag] > 1.8)\
         *(data[kascLoggTag] < rcmodel.loggteffcut(data['TEFF'],
                                                   isodist.FEH2Z(data['METALS'],zsolar=0.017),upper=True))#2.8)
 if False:
     rcclumplogg = clumplogg * (data['RC'] == 1)
     print("%i / %i = %i%% APOKASC logg clump stars are in the RC catalog" %
           (numpy.sum(rcclumplogg), numpy.sum(clumplogg),
            float(numpy.sum(rcclumplogg)) / numpy.sum(clumplogg) * 100))
     rcnoclumplogg = (True ^ clumplogg) * (data['RC'] == 1)
     print(
         "%i / %i = %i%% APOKASC logg non-clump stars are in the RC catalog"
         % (numpy.sum(rcnoclumplogg), numpy.sum(True ^ clumplogg),
            float(numpy.sum(rcnoclumplogg)) / numpy.sum(True ^ clumplogg) *
Ejemplo n.º 7
0
def make_rcsample(parser):
    options, args = parser.parse_args()
    savefilename = options.savefilename
    if savefilename is None:
        #Create savefilename if not given
        savefilename = os.path.join(
            appath._APOGEE_DATA, 'rcsample_' + appath._APOGEE_REDUX + '.fits')
        print("Saving to %s ..." % savefilename)
    #Read the base-sample
    data = apread.allStar(adddist=_ADDHAYDENDIST, rmdups=options.rmdups)
    #Remove a bunch of fields that we do not want to keep
    data = esutil.numpy_util.remove_fields(data, [
        'TARGET_ID', 'FILE', 'AK_WISE', 'SFD_EBV', 'SYNTHVHELIO_AVG',
        'SYNTHVSCATTER', 'SYNTHVERR', 'SYNTHVERR_MED', 'RV_TEFF', 'RV_LOGG',
        'RV_FEH', 'RV_ALPHA', 'RV_CARB', 'RV_CCFWHM', 'RV_AUTOFWHM',
        'SYNTHSCATTER', 'STABLERV_CHI2', 'STABLERV_RCHI2',
        'STABLERV_CHI2_PROB', 'CHI2_THRESHOLD', 'APSTAR_VERSION',
        'ASPCAP_VERSION', 'RESULTS_VERSION', 'WASH_M', 'WASH_M_ERR', 'WASH_T2',
        'WASH_T2_ERR', 'DDO51', 'DDO51_ERR', 'IRAC_3_6', 'IRAC_3_6_ERR',
        'IRAC_4_5', 'IRAC_4_5_ERR', 'IRAC_5_8', 'IRAC_5_8_ERR', 'IRAC_8_0',
        'IRAC_8_0_ERR', 'WISE_4_5', 'WISE_4_5_ERR', 'TARG_4_5', 'TARG_4_5_ERR',
        'WASH_DDO51_GIANT_FLAG', 'WASH_DDO51_STAR_FLAG', 'REDUCTION_ID',
        'SRC_H', 'PM_SRC'
    ])
    if not appath._APOGEE_REDUX.lower() == 'current' \
            and not 'l30' in appath._APOGEE_REDUX \
            and int(appath._APOGEE_REDUX[1:]) < 500:
        data = esutil.numpy_util.remove_fields(data, ['ELEM'])
    #Select red-clump stars
    jk = data['J0'] - data['K0']
    z = isodist.FEH2Z(data['METALS'], zsolar=0.017)
    if 'l30' in appath._APOGEE_REDUX:
        logg = data['LOGG']
    elif appath._APOGEE_REDUX.lower() == 'current' \
            or int(appath._APOGEE_REDUX[1:]) > 600:
        from apogee.tools import paramIndx
        if False:
            #Use my custom logg calibration that's correct for the RC
            logg = (1. - 0.042) * data['FPARAM'][:, paramIndx('logg')] - 0.213
            lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1.
            logg[lowloggindx] = data['FPARAM'][lowloggindx,
                                               paramIndx('logg')] - 0.255
            hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8
            logg[hiloggindx] = data['FPARAM'][hiloggindx,
                                              paramIndx('logg')] - 0.3726
        else:
            #Use my custom logg calibration that's correct on average
            logg = (1. + 0.03) * data['FPARAM'][:, paramIndx('logg')] - 0.37
            lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1.
            logg[lowloggindx] = data['FPARAM'][lowloggindx,
                                               paramIndx('logg')] - 0.34
            hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8
            logg[hiloggindx] = data['FPARAM'][hiloggindx,
                                              paramIndx('logg')] - 0.256
    else:
        logg = data['LOGG']
    indx= (jk < 0.8)*(jk >= 0.5)\
        *(z <= 0.06)\
        *(z <= rcmodel.jkzcut(jk,upper=True))\
        *(z >= rcmodel.jkzcut(jk))\
        *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\
        *(logg <= rcmodel.loggteffcut(data['TEFF'],z,upper=True))
    data = data[indx]
    #Add more aggressive flag cut
    data = esutil.numpy_util.add_fields(data, [('ADDL_LOGG_CUT', numpy.int32)])
    data['ADDL_LOGG_CUT'] = (
        (data['TEFF'] - 4800.) / 1000. + 2.75) > data['LOGG']
    if options.loggcut:
        data = data[data['ADDL_LOGG_CUT'] == 1]
    print("Making catalog of %i objects ..." % len(data))
    #Add distances
    data = esutil.numpy_util.add_fields(data, [('RC_DIST', float),
                                               ('RC_DM', float),
                                               ('RC_GALR', float),
                                               ('RC_GALPHI', float),
                                               ('RC_GALZ', float)])
    rcd = rcmodel.rcdist()
    jk = data['J0'] - data['K0']
    z = isodist.FEH2Z(data['METALS'], zsolar=0.017)
    data['RC_DIST'] = rcd(jk, z, appmag=data['K0']) * options.distfac
    data['RC_DM'] = 5. * numpy.log10(data['RC_DIST']) + 10.
    XYZ = bovy_coords.lbd_to_XYZ(data['GLON'],
                                 data['GLAT'],
                                 data['RC_DIST'],
                                 degree=True)
    R, phi, Z = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0],
                                             XYZ[:, 1],
                                             XYZ[:, 2],
                                             Xsun=8.,
                                             Zsun=0.025)
    data['RC_GALR'] = R
    data['RC_GALPHI'] = phi
    data['RC_GALZ'] = Z
    #Save
    fitsio.write(savefilename, data, clobber=True)
    # Add Tycho-2 matches
    if options.tyc2:
        data = esutil.numpy_util.add_fields(data, [('TYC2MATCH', numpy.int32),
                                                   ('TYC1', numpy.int32),
                                                   ('TYC2', numpy.int32),
                                                   ('TYC3', numpy.int32)])
        data['TYC2MATCH'] = 0
        data['TYC1'] = -1
        data['TYC2'] = -1
        data['TYC3'] = -1
        # Write positions
        posfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        resultfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        with open(posfilename, 'w') as csvfile:
            wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
            wr.writerow(['RA', 'DEC'])
            for ii in range(len(data)):
                wr.writerow([data[ii]['RA'], data[ii]['DEC']])
        # Send to CDS for matching
        result = open(resultfilename, 'w')
        try:
            subprocess.check_call([
                'curl', '-X', 'POST', '-F', 'request=xmatch', '-F',
                'distMaxArcsec=2', '-F', 'RESPONSEFORMAT=csv', '-F',
                'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA',
                '-F', 'colDec1=DEC', '-F', 'cat2=vizier:Tycho2',
                'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'
            ],
                                  stdout=result)
        except subprocess.CalledProcessError:
            os.remove(posfilename)
            if os.path.exists(resultfilename):
                result.close()
                os.remove(resultfilename)
        result.close()
        # Directly match on input RA
        ma = numpy.loadtxt(resultfilename,
                           delimiter=',',
                           skiprows=1,
                           usecols=(1, 2, 7, 8, 9))
        iis = numpy.arange(len(data))
        mai = [iis[data['RA'] == ma[ii, 0]][0] for ii in range(len(ma))]
        data['TYC2MATCH'][mai] = 1
        data['TYC1'][mai] = ma[:, 2]
        data['TYC2'][mai] = ma[:, 3]
        data['TYC3'][mai] = ma[:, 4]
        os.remove(posfilename)
        os.remove(resultfilename)
    if not options.nostat:
        #Determine statistical sample and add flag
        apo = apogee.select.apogeeSelect()
        statIndx = apo.determine_statistical(data)
        mainIndx = apread.mainIndx(data)
        data = esutil.numpy_util.add_fields(data, [('STAT', numpy.int32),
                                                   ('INVSF', float)])
        data['STAT'] = 0
        data['STAT'][statIndx * mainIndx] = 1
        for ii in range(len(data)):
            if (statIndx * mainIndx)[ii]:
                data['INVSF'][ii] = 1. / apo(data['LOCATION_ID'][ii],
                                             data['H'][ii])
            else:
                data['INVSF'][ii] = -1.
    if options.nopm:
        fitsio.write(savefilename, data, clobber=True)
        return None
    #Get proper motions, in a somewhat roundabout way
    pmfile = savefilename.split('.')[0] + '_pms.fits'
    if os.path.exists(pmfile):
        pmdata = fitsio.read(pmfile, 1)
    else:
        pmdata = numpy.recarray(
            len(data),
            formats=['f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'i4'],
            names=[
                'RA', 'DEC', 'PMRA', 'PMDEC', 'PMRA_ERR', 'PMDEC_ERR',
                'PMMATCH'
            ])
        # Write positions, again ...
        posfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        resultfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        with open(posfilename, 'w') as csvfile:
            wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
            wr.writerow(['RA', 'DEC'])
            for ii in range(len(data)):
                wr.writerow([data[ii]['RA'], data[ii]['DEC']])
        # Send to CDS for matching
        result = open(resultfilename, 'w')
        try:
            subprocess.check_call([
                'curl', '-X', 'POST', '-F', 'request=xmatch', '-F',
                'distMaxArcsec=4', '-F', 'RESPONSEFORMAT=csv', '-F',
                'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA',
                '-F', 'colDec1=DEC', '-F', 'cat2=vizier:UCAC4',
                'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'
            ],
                                  stdout=result)
        except subprocess.CalledProcessError:
            os.remove(posfilename)
            if os.path.exists(resultfilename):
                result.close()
                os.remove(resultfilename)
        result.close()
        # Match back and only keep the closest one
        ma = numpy.loadtxt(resultfilename,
                           delimiter=',',
                           skiprows=1,
                           converters={
                               15: lambda s: float(s.strip() or -9999),
                               16: lambda s: float(s.strip() or -9999),
                               17: lambda s: float(s.strip() or -9999),
                               18: lambda s: float(s.strip() or -9999)
                           },
                           usecols=(4, 5, 15, 16, 17, 18))
        h = esutil.htm.HTM()
        m1, m2, d12 = h.match(data['RA'],
                              data['DEC'],
                              ma[:, 0],
                              ma[:, 1],
                              4. / 3600.,
                              maxmatch=1)
        pmdata['PMMATCH'] = 0
        pmdata['RA'] = data['RA']
        pmdata['DEC'] = data['DEC']
        pmdata['PMMATCH'][m1] = 1
        pmdata['PMRA'][m1] = ma[m2, 2]
        pmdata['PMDEC'][m1] = ma[m2, 3]
        pmdata['PMRA_ERR'][m1] = ma[m2, 4]
        pmdata['PMDEC_ERR'][m1] = ma[m2, 5]
        pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \
                          +(pmdata['PMDEC'] == -9999) \
                          +(pmdata['PMRA_ERR'] == -9999) \
                          +(pmdata['PMDEC_ERR'] == -9999)]= 0
        fitsio.write(pmfile, pmdata, clobber=True)
        #To make sure we're using the same format below
        pmdata = fitsio.read(pmfile, 1)
        os.remove(posfilename)
        os.remove(resultfilename)
    #Match proper motions
    try:  #These already exist currently, but may not always exist
        data = esutil.numpy_util.remove_fields(data, ['PMRA', 'PMDEC'])
    except ValueError:
        pass
    data = esutil.numpy_util.add_fields(data, [('PMRA', numpy.float),
                                               ('PMDEC', numpy.float),
                                               ('PMRA_ERR', numpy.float),
                                               ('PMDEC_ERR', numpy.float),
                                               ('PMMATCH', numpy.int32)])
    data['PMMATCH'] = 0
    h = esutil.htm.HTM()
    m1, m2, d12 = h.match(pmdata['RA'],
                          pmdata['DEC'],
                          data['RA'],
                          data['DEC'],
                          2. / 3600.,
                          maxmatch=1)
    data['PMRA'][m2] = pmdata['PMRA'][m1]
    data['PMDEC'][m2] = pmdata['PMDEC'][m1]
    data['PMRA_ERR'][m2] = pmdata['PMRA_ERR'][m1]
    data['PMDEC_ERR'][m2] = pmdata['PMDEC_ERR'][m1]
    data['PMMATCH'][m2] = pmdata['PMMATCH'][m1].astype(numpy.int32)
    pmindx = data['PMMATCH'] == 1
    data['PMRA'][True - pmindx] = -9999.99
    data['PMDEC'][True - pmindx] = -9999.99
    data['PMRA_ERR'][True - pmindx] = -9999.99
    data['PMDEC_ERR'][True - pmindx] = -9999.99
    #Calculate Galactocentric velocities
    data = esutil.numpy_util.add_fields(data, [('GALVR', numpy.float),
                                               ('GALVT', numpy.float),
                                               ('GALVZ', numpy.float)])
    lb = bovy_coords.radec_to_lb(data['RA'], data['DEC'], degree=True)
    XYZ = bovy_coords.lbd_to_XYZ(lb[:, 0],
                                 lb[:, 1],
                                 data['RC_DIST'],
                                 degree=True)
    pmllpmbb = bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'],
                                                 data['PMDEC'],
                                                 data['RA'],
                                                 data['DEC'],
                                                 degree=True)
    vxvyvz = bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
                                              pmllpmbb[:, 0],
                                              pmllpmbb[:, 1],
                                              lb[:, 0],
                                              lb[:, 1],
                                              data['RC_DIST'],
                                              degree=True)
    vR, vT, vZ = bovy_coords.vxvyvz_to_galcencyl(
        vxvyvz[:, 0],
        vxvyvz[:, 1],
        vxvyvz[:, 2],
        8. - XYZ[:, 0],
        XYZ[:, 1],
        XYZ[:, 2] + 0.025,
        vsun=[-11.1, 30.24 * 8.,
              7.25])  #Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
    data['GALVR'] = vR
    data['GALVT'] = vT
    data['GALVZ'] = vZ
    data['GALVR'][True - pmindx] = -9999.99
    data['GALVT'][True - pmindx] = -9999.99
    data['GALVZ'][True - pmindx] = -9999.99
    #Get PPMXL proper motions, in a somewhat roundabout way
    pmfile = savefilename.split('.')[0] + '_pms_ppmxl.fits'
    if os.path.exists(pmfile):
        pmdata = fitsio.read(pmfile, 1)
    else:
        pmdata = numpy.recarray(
            len(data),
            formats=['f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'i4'],
            names=[
                'RA', 'DEC', 'PMRA', 'PMDEC', 'PMRA_ERR', 'PMDEC_ERR',
                'PMMATCH'
            ])
        # Write positions, again ...
        posfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        resultfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        with open(posfilename, 'w') as csvfile:
            wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
            wr.writerow(['RA', 'DEC'])
            for ii in range(len(data)):
                wr.writerow([data[ii]['RA'], data[ii]['DEC']])
        # Send to CDS for matching
        result = open(resultfilename, 'w')
        try:
            subprocess.check_call([
                'curl', '-X', 'POST', '-F', 'request=xmatch', '-F',
                'distMaxArcsec=4', '-F', 'RESPONSEFORMAT=csv', '-F',
                'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA',
                '-F', 'colDec1=DEC', '-F', 'cat2=vizier:PPMXL',
                'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'
            ],
                                  stdout=result)
        except subprocess.CalledProcessError:
            os.remove(posfilename)
            if os.path.exists(resultfilename):
                result.close()
                os.remove(resultfilename)
        result.close()
        # Match back and only keep the closest one
        ma = numpy.loadtxt(resultfilename,
                           delimiter=',',
                           skiprows=1,
                           converters={
                               15: lambda s: float(s.strip() or -9999),
                               16: lambda s: float(s.strip() or -9999),
                               17: lambda s: float(s.strip() or -9999),
                               18: lambda s: float(s.strip() or -9999)
                           },
                           usecols=(4, 5, 15, 16, 19, 20))
        h = esutil.htm.HTM()
        m1, m2, d12 = h.match(data['RA'],
                              data['DEC'],
                              ma[:, 0],
                              ma[:, 1],
                              4. / 3600.,
                              maxmatch=1)
        pmdata['PMMATCH'] = 0
        pmdata['RA'] = data['RA']
        pmdata['DEC'] = data['DEC']
        pmdata['PMMATCH'][m1] = 1
        pmdata['PMRA'][m1] = ma[m2, 2]
        pmdata['PMDEC'][m1] = ma[m2, 3]
        pmdata['PMRA_ERR'][m1] = ma[m2, 4]
        pmdata['PMDEC_ERR'][m1] = ma[m2, 5]
        pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \
                          +(pmdata['PMDEC'] == -9999) \
                          +(pmdata['PMRA_ERR'] == -9999) \
                          +(pmdata['PMDEC_ERR'] == -9999)]= 0
        fitsio.write(pmfile, pmdata, clobber=True)
        #To make sure we're using the same format below
        pmdata = fitsio.read(pmfile, 1)
        os.remove(posfilename)
        os.remove(resultfilename)
    #Match proper motions to ppmxl
    data = esutil.numpy_util.add_fields(data,
                                        [('PMRA_PPMXL', numpy.float),
                                         ('PMDEC_PPMXL', numpy.float),
                                         ('PMRA_ERR_PPMXL', numpy.float),
                                         ('PMDEC_ERR_PPMXL', numpy.float),
                                         ('PMMATCH_PPMXL', numpy.int32)])
    data['PMMATCH_PPMXL'] = 0
    h = esutil.htm.HTM()
    m1, m2, d12 = h.match(pmdata['RA'],
                          pmdata['DEC'],
                          data['RA'],
                          data['DEC'],
                          2. / 3600.,
                          maxmatch=1)
    data['PMRA_PPMXL'][m2] = pmdata['PMRA'][m1]
    data['PMDEC_PPMXL'][m2] = pmdata['PMDEC'][m1]
    data['PMRA_ERR_PPMXL'][m2] = pmdata['PMRA_ERR'][m1]
    data['PMDEC_ERR_PPMXL'][m2] = pmdata['PMDEC_ERR'][m1]
    data['PMMATCH_PPMXL'][m2] = pmdata['PMMATCH'][m1].astype(numpy.int32)
    pmindx = data['PMMATCH_PPMXL'] == 1
    data['PMRA_PPMXL'][True - pmindx] = -9999.99
    data['PMDEC_PPMXL'][True - pmindx] = -9999.99
    data['PMRA_ERR_PPMXL'][True - pmindx] = -9999.99
    data['PMDEC_ERR_PPMXL'][True - pmindx] = -9999.99
    #Calculate Galactocentric velocities
    data = esutil.numpy_util.add_fields(data, [('GALVR_PPMXL', numpy.float),
                                               ('GALVT_PPMXL', numpy.float),
                                               ('GALVZ_PPMXL', numpy.float)])
    lb = bovy_coords.radec_to_lb(data['RA'], data['DEC'], degree=True)
    XYZ = bovy_coords.lbd_to_XYZ(lb[:, 0],
                                 lb[:, 1],
                                 data['RC_DIST'],
                                 degree=True)
    pmllpmbb = bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA_PPMXL'],
                                                 data['PMDEC_PPMXL'],
                                                 data['RA'],
                                                 data['DEC'],
                                                 degree=True)
    vxvyvz = bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
                                              pmllpmbb[:, 0],
                                              pmllpmbb[:, 1],
                                              lb[:, 0],
                                              lb[:, 1],
                                              data['RC_DIST'],
                                              degree=True)
    vR, vT, vZ = bovy_coords.vxvyvz_to_galcencyl(
        vxvyvz[:, 0],
        vxvyvz[:, 1],
        vxvyvz[:, 2],
        8. - XYZ[:, 0],
        XYZ[:, 1],
        XYZ[:, 2] + 0.025,
        vsun=[-11.1, 30.24 * 8.,
              7.25])  #Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
    data['GALVR_PPMXL'] = vR
    data['GALVT_PPMXL'] = vT
    data['GALVZ_PPMXL'] = vZ
    data['GALVR_PPMXL'][True - pmindx] = -9999.99
    data['GALVT_PPMXL'][True - pmindx] = -9999.99
    data['GALVZ_PPMXL'][True - pmindx] = -9999.99
    #Save
    fitsio.write(savefilename, data, clobber=True)
    return None
Ejemplo n.º 8
0
def make_rcsample(parser):
    options,args= parser.parse_args()
    savefilename= options.savefilename
    if savefilename is None:
        #Create savefilename if not given
        savefilename= os.path.join(appath._APOGEE_DATA,
                                   'rcsample_'+appath._APOGEE_REDUX+'.fits')
        print("Saving to %s ..." % savefilename)
    #Read the base-sample
    data= apread.allStar(adddist=_ADDHAYDENDIST,rmdups=options.rmdups)
    #Remove a bunch of fields that we do not want to keep
    data= esutil.numpy_util.remove_fields(data,
                                          ['TARGET_ID',
                                           'FILE',
                                           'AK_WISE',
                                           'SFD_EBV',
                                           'SYNTHVHELIO_AVG',
                                           'SYNTHVSCATTER',
                                           'SYNTHVERR',
                                           'SYNTHVERR_MED',
                                           'RV_TEFF',
                                           'RV_LOGG',
                                           'RV_FEH',
                                           'RV_ALPHA',
                                           'RV_CARB',
                                           'RV_CCFWHM',
                                           'RV_AUTOFWHM',
                                           'SYNTHSCATTER',
                                           'STABLERV_CHI2',
                                           'STABLERV_RCHI2',
                                           'STABLERV_CHI2_PROB',
                                           'CHI2_THRESHOLD',
                                           'APSTAR_VERSION',
                                           'ASPCAP_VERSION',
                                           'RESULTS_VERSION',
                                           'WASH_M',
                                           'WASH_M_ERR',
                                           'WASH_T2',
                                           'WASH_T2_ERR',
                                           'DDO51',
                                           'DDO51_ERR',
                                           'IRAC_3_6',
                                           'IRAC_3_6_ERR',
                                           'IRAC_4_5',
                                           'IRAC_4_5_ERR',
                                           'IRAC_5_8',
                                           'IRAC_5_8_ERR',
                                           'IRAC_8_0',
                                           'IRAC_8_0_ERR',
                                           'WISE_4_5',
                                           'WISE_4_5_ERR',
                                           'TARG_4_5',
                                           'TARG_4_5_ERR',
                                           'WASH_DDO51_GIANT_FLAG',
                                           'WASH_DDO51_STAR_FLAG',
                                           'REDUCTION_ID',
                                           'SRC_H',
                                           'PM_SRC'])
    if not appath._APOGEE_REDUX.lower() == 'current' \
            and not 'l30' in appath._APOGEE_REDUX \
            and int(appath._APOGEE_REDUX[1:]) < 500:
        data= esutil.numpy_util.remove_fields(data,
                                              ['ELEM'])
    #Select red-clump stars
    jk= data['J0']-data['K0']
    z= isodist.FEH2Z(data['METALS'],zsolar=0.017)
    if 'l30' in appath._APOGEE_REDUX:
        logg= data['LOGG']
    elif appath._APOGEE_REDUX.lower() == 'current' \
            or int(appath._APOGEE_REDUX[1:]) > 600:
        from apogee.tools import paramIndx
        if False:
            #Use my custom logg calibration that's correct for the RC
            logg= (1.-0.042)*data['FPARAM'][:,paramIndx('logg')]-0.213
            lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1.
            logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.255
            hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8
            logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.3726
        else:
            #Use my custom logg calibration that's correct on average
            logg= (1.+0.03)*data['FPARAM'][:,paramIndx('logg')]-0.37
            lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1.
            logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.34
            hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8
            logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.256
    else:
        logg= data['LOGG']
    indx= (jk < 0.8)*(jk >= 0.5)\
        *(z <= 0.06)\
        *(z <= rcmodel.jkzcut(jk,upper=True))\
        *(z >= rcmodel.jkzcut(jk))\
        *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\
        *(logg <= rcmodel.loggteffcut(data['TEFF'],z,upper=True))
    data= data[indx]
    #Add more aggressive flag cut
    data= esutil.numpy_util.add_fields(data,[('ADDL_LOGG_CUT',numpy.int32)])
    data['ADDL_LOGG_CUT']= ((data['TEFF']-4800.)/1000.+2.75) > data['LOGG']
    if options.loggcut:
        data= data[data['ADDL_LOGG_CUT'] == 1]
    print("Making catalog of %i objects ..." % len(data))
    #Add distances
    data= esutil.numpy_util.add_fields(data,[('RC_DIST', float),
                                             ('RC_DM', float),
                                             ('RC_GALR', float),
                                             ('RC_GALPHI', float),
                                             ('RC_GALZ', float)])
    rcd= rcmodel.rcdist()
    jk= data['J0']-data['K0']
    z= isodist.FEH2Z(data['METALS'],zsolar=0.017)
    data['RC_DIST']= rcd(jk,z,appmag=data['K0'])*options.distfac
    data['RC_DM']= 5.*numpy.log10(data['RC_DIST'])+10.
    XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
                                data['GLAT'],
                                data['RC_DIST'],
                                degree=True)
    R,phi,Z= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
                                          XYZ[:,1],
                                          XYZ[:,2],
                                          Xsun=8.,Zsun=0.025)
    data['RC_GALR']= R
    data['RC_GALPHI']= phi
    data['RC_GALZ']= Z
    #Save
    fitsio.write(savefilename,data,clobber=True)
    # Add Tycho-2 matches
    if options.tyc2:
        data= esutil.numpy_util.add_fields(data,[('TYC2MATCH',numpy.int32),
                                                 ('TYC1',numpy.int32),
                                                 ('TYC2',numpy.int32),
                                                 ('TYC3',numpy.int32)])
        data['TYC2MATCH']= 0
        data['TYC1']= -1
        data['TYC2']= -1
        data['TYC3']= -1
        # Write positions
        posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
        resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
        with open(posfilename,'w') as csvfile:
            wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
            wr.writerow(['RA','DEC'])
            for ii in range(len(data)):
                wr.writerow([data[ii]['RA'],data[ii]['DEC']])
        # Send to CDS for matching
        result= open(resultfilename,'w')
        try:
            subprocess.check_call(['curl',
                                   '-X','POST',
                                   '-F','request=xmatch',
                                   '-F','distMaxArcsec=2',
                                   '-F','RESPONSEFORMAT=csv',
                                   '-F','cat1=@%s' % os.path.basename(posfilename),
                                   '-F','colRA1=RA',
                                   '-F','colDec1=DEC',
                                   '-F','cat2=vizier:Tycho2',
                                   'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
                                  stdout=result)
        except subprocess.CalledProcessError:
            os.remove(posfilename)
            if os.path.exists(resultfilename):
                result.close()
                os.remove(resultfilename)
        result.close()
        # Directly match on input RA
        ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1,
                          usecols=(1,2,7,8,9))
        iis= numpy.arange(len(data))
        mai= [iis[data['RA'] == ma[ii,0]][0] for ii in range(len(ma))]
        data['TYC2MATCH'][mai]= 1
        data['TYC1'][mai]= ma[:,2]
        data['TYC2'][mai]= ma[:,3]
        data['TYC3'][mai]= ma[:,4]
        os.remove(posfilename)
        os.remove(resultfilename)
    if not options.nostat:
        #Determine statistical sample and add flag
        apo= apogee.select.apogeeSelect()
        statIndx= apo.determine_statistical(data)
        mainIndx= apread.mainIndx(data)
        data= esutil.numpy_util.add_fields(data,[('STAT',numpy.int32),
                                                 ('INVSF',float)])
        data['STAT']= 0
        data['STAT'][statIndx*mainIndx]= 1
        for ii in range(len(data)):
            if (statIndx*mainIndx)[ii]:
                data['INVSF'][ii]= 1./apo(data['LOCATION_ID'][ii],
                                          data['H'][ii])
            else:
                data['INVSF'][ii]= -1.
    if options.nopm:
        fitsio.write(savefilename,data,clobber=True)       
        return None
    #Get proper motions, in a somewhat roundabout way
    pmfile= savefilename.split('.')[0]+'_pms.fits'
    if os.path.exists(pmfile):
        pmdata= fitsio.read(pmfile,1)
    else:
        pmdata= numpy.recarray(len(data),
                               formats=['f8','f8','f8','f8','f8','f8','i4'],
                               names=['RA','DEC','PMRA','PMDEC',
                                      'PMRA_ERR','PMDEC_ERR','PMMATCH'])
        # Write positions, again ...
        posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
        resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
        with open(posfilename,'w') as csvfile:
            wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
            wr.writerow(['RA','DEC'])
            for ii in range(len(data)):
                wr.writerow([data[ii]['RA'],data[ii]['DEC']])
        # Send to CDS for matching
        result= open(resultfilename,'w')
        try:
            subprocess.check_call(['curl',
                                   '-X','POST',
                                   '-F','request=xmatch',
                                   '-F','distMaxArcsec=4',
                                   '-F','RESPONSEFORMAT=csv',
                                   '-F','cat1=@%s' % os.path.basename(posfilename),
                                   '-F','colRA1=RA',
                                   '-F','colDec1=DEC',
                                   '-F','cat2=vizier:UCAC4',
                                   'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
                                  stdout=result)
        except subprocess.CalledProcessError:
            os.remove(posfilename)
            if os.path.exists(resultfilename):
                result.close()
                os.remove(resultfilename)
        result.close()
        # Match back and only keep the closest one
        ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1,
                          converters={15: lambda s: float(s.strip() or -9999),
                                      16: lambda s: float(s.strip() or -9999),
                                      17: lambda s: float(s.strip() or -9999),
                                      18: lambda s: float(s.strip() or -9999)},
                          usecols=(4,5,15,16,17,18))
        h=esutil.htm.HTM()
        m1,m2,d12 = h.match(data['RA'],data['DEC'],
                            ma[:,0],ma[:,1],4./3600.,maxmatch=1)
        pmdata['PMMATCH']= 0
        pmdata['RA']= data['RA']
        pmdata['DEC']= data['DEC']
        pmdata['PMMATCH'][m1]= 1
        pmdata['PMRA'][m1]= ma[m2,2]
        pmdata['PMDEC'][m1]= ma[m2,3]
        pmdata['PMRA_ERR'][m1]= ma[m2,4]
        pmdata['PMDEC_ERR'][m1]= ma[m2,5]
        pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \
                          +(pmdata['PMDEC'] == -9999) \
                          +(pmdata['PMRA_ERR'] == -9999) \
                          +(pmdata['PMDEC_ERR'] == -9999)]= 0
        fitsio.write(pmfile,pmdata,clobber=True)
        #To make sure we're using the same format below
        pmdata= fitsio.read(pmfile,1)
        os.remove(posfilename)
        os.remove(resultfilename)
    #Match proper motions
    try: #These already exist currently, but may not always exist
        data= esutil.numpy_util.remove_fields(data,['PMRA','PMDEC'])
    except ValueError:
        pass
    data= esutil.numpy_util.add_fields(data,[('PMRA', numpy.float),
                                             ('PMDEC', numpy.float),
                                             ('PMRA_ERR', numpy.float),
                                             ('PMDEC_ERR', numpy.float),
                                             ('PMMATCH',numpy.int32)])
    data['PMMATCH']= 0
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'],
                        data['RA'],data['DEC'],
                        2./3600.,maxmatch=1)
    data['PMRA'][m2]= pmdata['PMRA'][m1]
    data['PMDEC'][m2]= pmdata['PMDEC'][m1]
    data['PMRA_ERR'][m2]= pmdata['PMRA_ERR'][m1]
    data['PMDEC_ERR'][m2]= pmdata['PMDEC_ERR'][m1]
    data['PMMATCH'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32)
    pmindx= data['PMMATCH'] == 1
    data['PMRA'][True-pmindx]= -9999.99
    data['PMDEC'][True-pmindx]= -9999.99
    data['PMRA_ERR'][True-pmindx]= -9999.99
    data['PMDEC_ERR'][True-pmindx]= -9999.99
    #Calculate Galactocentric velocities
    data= esutil.numpy_util.add_fields(data,[('GALVR', numpy.float),
                                             ('GALVT', numpy.float),
                                             ('GALVZ', numpy.float)])
    lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True)
    XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True)
    pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'],data['PMDEC'],
                                                data['RA'],data['DEC'],
                                                degree=True)
    vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
                                             pmllpmbb[:,0],
                                             pmllpmbb[:,1],
                                             lb[:,0],lb[:,1],data['RC_DIST'],
                                             degree=True)
    vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0],
                                                vxvyvz[:,1],
                                                vxvyvz[:,2],
                                                8.-XYZ[:,0],
                                                XYZ[:,1],
                                                XYZ[:,2]+0.025,
                                                vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
    data['GALVR']= vR
    data['GALVT']= vT
    data['GALVZ']= vZ
    data['GALVR'][True-pmindx]= -9999.99
    data['GALVT'][True-pmindx]= -9999.99
    data['GALVZ'][True-pmindx]= -9999.99
    #Get PPMXL proper motions, in a somewhat roundabout way
    pmfile= savefilename.split('.')[0]+'_pms_ppmxl.fits'
    if os.path.exists(pmfile):
        pmdata= fitsio.read(pmfile,1)
    else:
        pmdata= numpy.recarray(len(data),
                               formats=['f8','f8','f8','f8','f8','f8','i4'],
                               names=['RA','DEC','PMRA','PMDEC',
                                      'PMRA_ERR','PMDEC_ERR','PMMATCH'])
        # Write positions, again ...
        posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
        resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
        with open(posfilename,'w') as csvfile:
            wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
            wr.writerow(['RA','DEC'])
            for ii in range(len(data)):
                wr.writerow([data[ii]['RA'],data[ii]['DEC']])
        # Send to CDS for matching
        result= open(resultfilename,'w')
        try:
            subprocess.check_call(['curl',
                                   '-X','POST',
                                   '-F','request=xmatch',
                                   '-F','distMaxArcsec=4',
                                   '-F','RESPONSEFORMAT=csv',
                                   '-F','cat1=@%s' % os.path.basename(posfilename),
                                   '-F','colRA1=RA',
                                   '-F','colDec1=DEC',
                                   '-F','cat2=vizier:PPMXL',
                                   'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
                                  stdout=result)
        except subprocess.CalledProcessError:
            os.remove(posfilename)
            if os.path.exists(resultfilename):
                result.close()
                os.remove(resultfilename)
        result.close()
        # Match back and only keep the closest one
        ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1,
                          converters={15: lambda s: float(s.strip() or -9999),
                                      16: lambda s: float(s.strip() or -9999),
                                      17: lambda s: float(s.strip() or -9999),
                                      18: lambda s: float(s.strip() or -9999)},
                          usecols=(4,5,15,16,19,20))
        h=esutil.htm.HTM()
        m1,m2,d12 = h.match(data['RA'],data['DEC'],
                            ma[:,0],ma[:,1],4./3600.,maxmatch=1)
        pmdata['PMMATCH']= 0
        pmdata['RA']= data['RA']
        pmdata['DEC']= data['DEC']
        pmdata['PMMATCH'][m1]= 1
        pmdata['PMRA'][m1]= ma[m2,2]
        pmdata['PMDEC'][m1]= ma[m2,3]
        pmdata['PMRA_ERR'][m1]= ma[m2,4]
        pmdata['PMDEC_ERR'][m1]= ma[m2,5]
        pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \
                          +(pmdata['PMDEC'] == -9999) \
                          +(pmdata['PMRA_ERR'] == -9999) \
                          +(pmdata['PMDEC_ERR'] == -9999)]= 0
        fitsio.write(pmfile,pmdata,clobber=True)
        #To make sure we're using the same format below
        pmdata= fitsio.read(pmfile,1)
        os.remove(posfilename)
        os.remove(resultfilename)
    #Match proper motions to ppmxl
    data= esutil.numpy_util.add_fields(data,[('PMRA_PPMXL', numpy.float),
                                             ('PMDEC_PPMXL', numpy.float),
                                             ('PMRA_ERR_PPMXL', numpy.float),
                                             ('PMDEC_ERR_PPMXL', numpy.float),
                                             ('PMMATCH_PPMXL',numpy.int32)])
    data['PMMATCH_PPMXL']= 0
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'],
                        data['RA'],data['DEC'],
                        2./3600.,maxmatch=1)
    data['PMRA_PPMXL'][m2]= pmdata['PMRA'][m1]
    data['PMDEC_PPMXL'][m2]= pmdata['PMDEC'][m1]
    data['PMRA_ERR_PPMXL'][m2]= pmdata['PMRA_ERR'][m1]
    data['PMDEC_ERR_PPMXL'][m2]= pmdata['PMDEC_ERR'][m1]
    data['PMMATCH_PPMXL'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32)
    pmindx= data['PMMATCH_PPMXL'] == 1
    data['PMRA_PPMXL'][True-pmindx]= -9999.99
    data['PMDEC_PPMXL'][True-pmindx]= -9999.99
    data['PMRA_ERR_PPMXL'][True-pmindx]= -9999.99
    data['PMDEC_ERR_PPMXL'][True-pmindx]= -9999.99
    #Calculate Galactocentric velocities
    data= esutil.numpy_util.add_fields(data,[('GALVR_PPMXL', numpy.float),
                                             ('GALVT_PPMXL', numpy.float),
                                             ('GALVZ_PPMXL', numpy.float)])
    lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True)
    XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True)
    pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA_PPMXL'],
                                                data['PMDEC_PPMXL'],
                                                data['RA'],data['DEC'],
                                                degree=True)
    vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
                                             pmllpmbb[:,0],
                                             pmllpmbb[:,1],
                                             lb[:,0],lb[:,1],data['RC_DIST'],
                                             degree=True)
    vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0],
                                                vxvyvz[:,1],
                                                vxvyvz[:,2],
                                                8.-XYZ[:,0],
                                                XYZ[:,1],
                                                XYZ[:,2]+0.025,
                                                vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
    data['GALVR_PPMXL']= vR
    data['GALVT_PPMXL']= vT
    data['GALVZ_PPMXL']= vZ
    data['GALVR_PPMXL'][True-pmindx]= -9999.99
    data['GALVT_PPMXL'][True-pmindx]= -9999.99
    data['GALVZ_PPMXL'][True-pmindx]= -9999.99
    #Save
    fitsio.write(savefilename,data,clobber=True)
    return None
Ejemplo n.º 9
0
 def __init__(self,
              loggmin=None,loggmax=None,
              imfmodel='lognormalChabrier2001',
              Z=None,
              expsfh=False,band='Ks',
              dontgather=False,
              basti=False,
              parsec=True,
              minage=None,
              maxage=10.,
              jkmin=None,
              stage=None,
              eta=None):
     """
     NAME:
        __init__
     PURPOSE:
        initialize isocmodel
     INPUT:
        Z= metallicity (if not set, use flat prior in Z over all Z; can be list)
        loggmin= if set, cut logg at this minimum
        loggmax= if set, cut logg at this maximum, if 'rc', then this is the function of teff and z appropriate for the APOGEE RC sample
        imfmodel= (default: 'lognormalChabrier2001') IMF model to use (see isodist.imf code for options)
        band= band to use for M_X (JHK)
        expsfh= if True, use an exponentially-declining star-formation history (can be set to a decay time-scale in Gyr)
        dontgather= if True, don't gather surrounding Zs
        basti= if True, use Basti isochrones (if False, use PARSEC)
        parsec= if True (=default), use PARSEC isochrones, if False, use Padova
        stage= if True, only use this evolutionary stage
        minage= (None) minimum log10 of age/yr
        maxage= (10.) maximum log10 of age/yr
        jkmin= (None) if set, only consider J-Ks greater than this
        eta= (None) mass-loss efficiency parameter
     OUTPUT:
        object
     HISTORY:
        2012-11-07 - Written - Bovy (IAS)
     """
     self._band= band
     self._loggmin= loggmin
     self._loggmax= loggmax
     if isinstance(expsfh,(int,float,numpy.float32,numpy.float64)):
         self._expsfh= expsfh
     elif expsfh:
         self._expsfh= 8.
     else:
         self._expsfh= False
     self._Z= Z
     self._imfmodel= imfmodel
     self._basti= basti
     self._eta= eta
     if isinstance(loggmax,str) and loggmax.lower() == 'rc':
         from apogee.samples.rc import loggteffcut
     #Read isochrones
     if basti:
         zs= numpy.array([0.0001,0.0003,0.0006,0.001,0.002,0.004,0.008,
                          0.01,0.0198,0.03,0.04])
     elif parsec:
         zs= numpy.arange(0.0005,0.06005,0.0005)
     else:
         zs= numpy.arange(0.0005,0.03005,0.0005)
     if Z is None:
         Zs= zs
     elif isinstance(Z,float):
         if basti or dontgather:
             Zs= [Z]
         elif Z < 0.001 or Z > 0.0295:
             Zs= [Z] 
         elif Z < 0.0015 or Z > 0.029:
             Zs= [Z-0.0005,Z,Z+0.0005] #build up statistics
         elif Z < 0.01:
             Zs= [Z-0.001,Z-0.0005,Z,Z+0.0005,Z+0.001] #build up statistics
         else:
             Zs= [Z-0.0005,Z,Z+0.0005] #build up statistics
     if basti:
         p= isodist.BastiIsochrone(Z=Zs,eta=eta)
     else:
         p= isodist.PadovaIsochrone(Z=Zs,parsec=parsec,eta=eta)
     if basti:
         #Force BaSTI to have equal age sampling
         lages= list(numpy.log10(numpy.arange(0.1,1.,0.1))+9.)
         lages.extend(list(numpy.log10(numpy.arange(1.0,10.5,0.5))+9.))
         lages= numpy.array(lages)
     #Get relevant data
     sample= []
     weights= []
     massweights= []
     loggs= []
     teffs= []
     pmasses= []
     plages= []
     pjks= []
     for logage in p.logages():
         if logage > maxage: continue
         if not minage is None and logage < minage: continue
         if basti and numpy.sum((logage == lages)) == 0: continue
         for zz in range(len(Zs)):
             thisiso= p(logage,Zs[zz],asrecarray=True,stage=stage)
             if len(thisiso.M_ini) == 0: continue
             #Calculate int_IMF for this IMF model
             if not imfmodel == 'lognormalChabrier2001': #That would be the default
                 if imfmodel == 'exponentialChabrier2001':
                     int_IMF= isodist.imf.exponentialChabrier2001(thisiso.M_ini,int=True)
                 elif imfmodel == 'kroupa2003':
                     int_IMF= isodist.imf.kroupa2003(thisiso.M_ini,int=True)
                 elif imfmodel == 'chabrier2003':
                     int_IMF= isodist.imf.chabrier2003(thisiso.M_ini,int=True)
                 else:
                     raise IOError("imfmodel option not understood (non-existing model)")
             elif basti:
                 int_IMF= isodist.imf.lognormalChabrier2001(thisiso.M_ini,int=True)
             else:
                 int_IMF= thisiso.int_IMF
             dN= (numpy.roll(int_IMF,-1)-int_IMF)/(int_IMF[-1]-int_IMF[0])/10**(logage-7.)
             dmass= thisiso.M_ini*(numpy.roll(int_IMF,-1)-int_IMF)/numpy.sum((thisiso.M_ini*(numpy.roll(int_IMF,-1)-int_IMF))[:-1])/10**(logage-7.)
             for ii in range(1,len(int_IMF)-1):
                 if basti:
                     JK= 0.996*(thisiso.J[ii]-thisiso.K[ii])+0.00923
                 else:
                     JK= thisiso.J[ii]-thisiso.Ks[ii]
                 if not jkmin is None and JK < jkmin: continue
                 if band.lower() == 'h':
                     if basti:
                         raise NotImplementedError("'H' not implemented for BaSTI yet")
                         J= JK+thisiso.K[ii]-0.044
                         H= J-(0.980*(thisiso.J[ii]-thisiso.H[ii])-0.045)
                     else:
                         H= thisiso.H[ii]
                 elif band.lower() == 'j':
                     if basti:
                         raise NotImplementedError("'J' not implemented for BaSTI yet")
                         J= JK+thisiso.K[ii]-0.044
                     else:
                         H= thisiso.J[ii]
                 elif band.lower() == 'k' or band.lower() == 'ks':
                     if basti:
                         H= thisiso.K[ii]-0.046
                     else:
                         H= thisiso.Ks[ii]
                 if JK < 0.3 \
                         or (isinstance(loggmax,str) and loggmax == 'rc' and (thisiso['logg'][ii] > loggteffcut(10.**thisiso['logTe'][ii],Zs[zz],upper=True))) \
                         or (not isinstance(loggmax,str) and not loggmax is None and thisiso['logg'][ii] > loggmax) \
                         or (not loggmin is None and thisiso['logg'][ii] < loggmin):
                     continue
                 if dN[ii] > 0.: 
                     sample.append([JK,H])
                     loggs.append([thisiso.logg[ii]])
                     teffs.append([10.**thisiso.logTe[ii]])
                     pmasses.append(thisiso.M_ini[ii])
                     plages.append(logage)
                     pjks.append(JK)
                     if basti: #BaSTI is sampled uniformly in age, not logage, but has a finer sampling below 1 Gyr
                         if logage < 9.:
                             if self._expsfh:
                                 weights.append(dN[ii]/5.*numpy.exp((10.**(logage-7.))/self._expsfh/100.)) #e.g., Binney (2010)
                                 massweights.append(dmass[ii]/5.*numpy.exp((10.**(logage-7.))/self._expsfh/100.)) #e.g., Binney (2010)
                             else:
                                 weights.append(dN[ii]/5.)
                                 massweights.append(dmass[ii]/5.)
                         else:
                             if self._expsfh:
                                 weights.append(dN[ii]*numpy.exp((10.**(logage-7.))/self._expsfh/100.)) #e.g., Binney (2010)
                                 massweights.append(dmass[ii]*numpy.exp((10.**(logage-7.))/self._expsfh/100.)) #e.g., Binney (2010)
                             else:
                                 weights.append(dN[ii])
                                 massweights.append(dmass[ii])
                     else:
                         if self._expsfh:
                             weights.append(dN[ii]*10**(logage-7.)*numpy.exp((10.**(logage-7.))/self._expsfh/100.)) #e.g., Binney (2010)
                             massweights.append(dmass[ii]*10**(logage-7.)*numpy.exp((10.**(logage-7.))/self._expsfh/100.)) #e.g., Binney (2010)
                         else:
                             weights.append(dN[ii]*10**(logage-7.))
                             massweights.append(dmass[ii]*10**(logage-7.))
                 else: 
                     continue #no use in continuing here   
     #Form array
     sample= numpy.array(sample)
     loggs= numpy.array(loggs)
     teffs= numpy.array(teffs)
     pmasses= numpy.array(pmasses)
     plages= numpy.array(plages)-9.
     pjks= numpy.array(pjks)
     weights= numpy.array(weights)
     massweights= numpy.array(massweights)
     #Cut out low weights
     if False:
         indx= (weights > 10.**-5.*numpy.sum(weights))
     else:
         indx= numpy.ones(len(weights),dtype='bool')
     self._sample= sample[indx,:]
     self._weights= weights[indx]
     self._massweights= massweights[indx]
     self._loggs= loggs[indx]
     self._teffs= teffs[indx]
     self._masses= pmasses[indx]
     self._lages= plages[indx]
     self._jks= pjks[indx]
     #Setup KDE
     self._kde= dens_kde.densKDE(self._sample,w=self._weights,
                                 h=2.*self._sample.shape[0]**(-1./5.),#h='scott',
                                 kernel='biweight',
                                 variable=True,variablenitt=3,
                                 variableexp=0.5)
     return None
Ejemplo n.º 10
0
 def __init__(self,
              loggmin=None,
              loggmax=None,
              imfmodel='lognormalChabrier2001',
              Z=None,
              expsfh=False,
              band='Ks',
              dontgather=False,
              basti=False,
              parsec=True,
              minage=None,
              maxage=10.,
              jkmin=None,
              stage=None,
              eta=None):
     """
     NAME:
        __init__
     PURPOSE:
        initialize isocmodel
     INPUT:
        Z= metallicity (if not set, use flat prior in Z over all Z; can be list)
        loggmin= if set, cut logg at this minimum
        loggmax= if set, cut logg at this maximum, if 'rc', then this is the function of teff and z appropriate for the APOGEE RC sample
        imfmodel= (default: 'lognormalChabrier2001') IMF model to use (see isodist.imf code for options)
        band= band to use for M_X (JHK)
        expsfh= if True, use an exponentially-declining star-formation history (can be set to a decay time-scale in Gyr)
        dontgather= if True, don't gather surrounding Zs
        basti= if True, use Basti isochrones (if False, use PARSEC)
        parsec= if True (=default), use PARSEC isochrones, if False, use Padova
        stage= if True, only use this evolutionary stage
        minage= (None) minimum log10 of age/yr
        maxage= (10.) maximum log10 of age/yr
        jkmin= (None) if set, only consider J-Ks greater than this
        eta= (None) mass-loss efficiency parameter
     OUTPUT:
        object
     HISTORY:
        2012-11-07 - Written - Bovy (IAS)
     """
     self._band = band
     self._loggmin = loggmin
     self._loggmax = loggmax
     if isinstance(expsfh, (int, float, numpy.float32, numpy.float64)):
         self._expsfh = expsfh
     elif expsfh:
         self._expsfh = 8.
     else:
         self._expsfh = False
     self._Z = Z
     self._imfmodel = imfmodel
     self._basti = basti
     self._eta = eta
     if isinstance(loggmax, str) and loggmax.lower() == 'rc':
         from apogee.samples.rc import loggteffcut
     #Read isochrones
     if basti:
         zs = numpy.array([
             0.0001, 0.0003, 0.0006, 0.001, 0.002, 0.004, 0.008, 0.01,
             0.0198, 0.03, 0.04
         ])
     elif parsec:
         zs = numpy.arange(0.0005, 0.06005, 0.0005)
     else:
         zs = numpy.arange(0.0005, 0.03005, 0.0005)
     if Z is None:
         Zs = zs
     elif isinstance(Z, float):
         if basti or dontgather:
             Zs = [Z]
         elif Z < 0.001 or Z > 0.0295:
             Zs = [Z]
         elif Z < 0.0015 or Z > 0.029:
             Zs = [Z - 0.0005, Z, Z + 0.0005]  #build up statistics
         elif Z < 0.01:
             Zs = [Z - 0.001, Z - 0.0005, Z, Z + 0.0005,
                   Z + 0.001]  #build up statistics
         else:
             Zs = [Z - 0.0005, Z, Z + 0.0005]  #build up statistics
     if basti:
         p = isodist.BastiIsochrone(Z=Zs, eta=eta)
     else:
         p = isodist.PadovaIsochrone(Z=Zs, parsec=parsec, eta=eta)
     if basti:
         #Force BaSTI to have equal age sampling
         lages = list(numpy.log10(numpy.arange(0.1, 1., 0.1)) + 9.)
         lages.extend(list(numpy.log10(numpy.arange(1.0, 10.5, 0.5)) + 9.))
         lages = numpy.array(lages)
     #Get relevant data
     sample = []
     weights = []
     massweights = []
     loggs = []
     teffs = []
     pmasses = []
     plages = []
     pjks = []
     for logage in p.logages():
         if logage > maxage: continue
         if not minage is None and logage < minage: continue
         if basti and numpy.sum((logage == lages)) == 0: continue
         for zz in range(len(Zs)):
             thisiso = p(logage, Zs[zz], asrecarray=True, stage=stage)
             if len(thisiso.M_ini) == 0: continue
             #Calculate int_IMF for this IMF model
             if not imfmodel == 'lognormalChabrier2001':  #That would be the default
                 if imfmodel == 'exponentialChabrier2001':
                     int_IMF = isodist.imf.exponentialChabrier2001(
                         thisiso.M_ini, int=True)
                 elif imfmodel == 'kroupa2003':
                     int_IMF = isodist.imf.kroupa2003(thisiso.M_ini,
                                                      int=True)
                 elif imfmodel == 'chabrier2003':
                     int_IMF = isodist.imf.chabrier2003(thisiso.M_ini,
                                                        int=True)
                 else:
                     raise IOError(
                         "imfmodel option not understood (non-existing model)"
                     )
             elif basti:
                 int_IMF = isodist.imf.lognormalChabrier2001(thisiso.M_ini,
                                                             int=True)
             else:
                 int_IMF = thisiso.int_IMF
             dN = (numpy.roll(int_IMF, -1) -
                   int_IMF) / (int_IMF[-1] - int_IMF[0]) / 10**(logage - 7.)
             dmass = thisiso.M_ini * (
                 numpy.roll(int_IMF, -1) - int_IMF) / numpy.sum(
                     (thisiso.M_ini *
                      (numpy.roll(int_IMF, -1) - int_IMF))[:-1]) / 10**(
                          logage - 7.)
             for ii in range(1, len(int_IMF) - 1):
                 if basti:
                     JK = 0.996 * (thisiso.J[ii] - thisiso.K[ii]) + 0.00923
                 else:
                     JK = thisiso.J[ii] - thisiso.Ks[ii]
                 if not jkmin is None and JK < jkmin: continue
                 if band.lower() == 'h':
                     if basti:
                         raise NotImplementedError(
                             "'H' not implemented for BaSTI yet")
                         J = JK + thisiso.K[ii] - 0.044
                         H = J - (0.980 *
                                  (thisiso.J[ii] - thisiso.H[ii]) - 0.045)
                     else:
                         H = thisiso.H[ii]
                 elif band.lower() == 'j':
                     if basti:
                         raise NotImplementedError(
                             "'J' not implemented for BaSTI yet")
                         J = JK + thisiso.K[ii] - 0.044
                     else:
                         H = thisiso.J[ii]
                 elif band.lower() == 'k' or band.lower() == 'ks':
                     if basti:
                         H = thisiso.K[ii] - 0.046
                     else:
                         H = thisiso.Ks[ii]
                 if JK < 0.3 \
                         or (isinstance(loggmax,str) and loggmax == 'rc' and (thisiso['logg'][ii] > loggteffcut(10.**thisiso['logTe'][ii],Zs[zz],upper=True))) \
                         or (not isinstance(loggmax,str) and not loggmax is None and thisiso['logg'][ii] > loggmax) \
                         or (not loggmin is None and thisiso['logg'][ii] < loggmin):
                     continue
                 if dN[ii] > 0.:
                     sample.append([JK, H])
                     loggs.append([thisiso.logg[ii]])
                     teffs.append([10.**thisiso.logTe[ii]])
                     pmasses.append(thisiso.M_ini[ii])
                     plages.append(logage)
                     pjks.append(JK)
                     if basti:  #BaSTI is sampled uniformly in age, not logage, but has a finer sampling below 1 Gyr
                         if logage < 9.:
                             if self._expsfh:
                                 weights.append(dN[ii] / 5. * numpy.exp(
                                     (10.**(logage - 7.)) / self._expsfh /
                                     100.))  #e.g., Binney (2010)
                                 massweights.append(
                                     dmass[ii] / 5. * numpy.exp(
                                         (10.**(logage - 7.)) / self._expsfh
                                         / 100.))  #e.g., Binney (2010)
                             else:
                                 weights.append(dN[ii] / 5.)
                                 massweights.append(dmass[ii] / 5.)
                         else:
                             if self._expsfh:
                                 weights.append(dN[ii] * numpy.exp(
                                     (10.**(logage - 7.)) / self._expsfh /
                                     100.))  #e.g., Binney (2010)
                                 massweights.append(dmass[ii] * numpy.exp(
                                     (10.**(logage - 7.)) / self._expsfh /
                                     100.))  #e.g., Binney (2010)
                             else:
                                 weights.append(dN[ii])
                                 massweights.append(dmass[ii])
                     else:
                         if self._expsfh:
                             weights.append(
                                 dN[ii] * 10**(logage - 7.) * numpy.exp(
                                     (10.**(logage - 7.)) / self._expsfh /
                                     100.))  #e.g., Binney (2010)
                             massweights.append(
                                 dmass[ii] * 10**(logage - 7.) * numpy.exp(
                                     (10.**(logage - 7.)) / self._expsfh /
                                     100.))  #e.g., Binney (2010)
                         else:
                             weights.append(dN[ii] * 10**(logage - 7.))
                             massweights.append(dmass[ii] *
                                                10**(logage - 7.))
                 else:
                     continue  #no use in continuing here
     #Form array
     sample = numpy.array(sample)
     loggs = numpy.array(loggs)
     teffs = numpy.array(teffs)
     pmasses = numpy.array(pmasses)
     plages = numpy.array(plages) - 9.
     pjks = numpy.array(pjks)
     weights = numpy.array(weights)
     massweights = numpy.array(massweights)
     #Cut out low weights
     if False:
         indx = (weights > 10.**-5. * numpy.sum(weights))
     else:
         indx = numpy.ones(len(weights), dtype='bool')
     self._sample = sample[indx, :]
     self._weights = weights[indx]
     self._massweights = massweights[indx]
     self._loggs = loggs[indx]
     self._teffs = teffs[indx]
     self._masses = pmasses[indx]
     self._lages = plages[indx]
     self._jks = pjks[indx]
     #Setup KDE
     self._kde = dens_kde.densKDE(
         self._sample,
         w=self._weights,
         h=2. * self._sample.shape[0]**(-1. / 5.),  #h='scott',
         kernel='biweight',
         variable=True,
         variablenitt=3,
         variableexp=0.5)
     return None
 noseismo= seismoState == 'UNKNOWN'
 noclumpseismo= (seismoState == 'RGB') \
     + (seismoState == 'DWARF/SUBGIANT')
 rcclumpseismo= clumpseismo*(data['RC'] == 1)#*(((data['TEFF']-4800.)/1000.+2.75) > data['LOGG'])
 rcnoclumpseismo= noclumpseismo*(data['RC'] == 1)#*(((data['TEFF']-4800.)/1000.+2.75) > data['LOGG'])
 #Statistics using evolutionary state measurements
 print "%i APOKASC stars have evolutionary state measurements" % (numpy.sum(clumpseismo)+numpy.sum(noclumpseismo))
 print "%i APOKASC RC stars have evolutionary state measurements" % (numpy.sum(rcclumpseismo)+numpy.sum(rcnoclumpseismo))
 print "%i / %i = %i%% APOKASC CLUMP stars are in the RC catalog (COMPLETENESS)" % (numpy.sum(rcclumpseismo),numpy.sum(clumpseismo),float(numpy.sum(rcclumpseismo))/numpy.sum(clumpseismo)*100.)
 print "%i / %i = %i%% APOKASC non-CLUMP stars out of all stars in the RC catalog with evolutionary measurements are in the RC catalog (CONTAMINATION)" % (numpy.sum(rcnoclumpseismo),numpy.sum(rcnoclumpseismo)+numpy.sum(rcclumpseismo),float(numpy.sum(rcnoclumpseismo))/(numpy.sum(rcnoclumpseismo)+numpy.sum(rcclumpseismo))*100.)
 rcindx= data['RC'] == 1
 #Statistics using simple seismo logg cut
 kascLoggTag= 'KASC_RG_LOGG_SCALE_2'
 try:
     clumplogg= (data[kascLoggTag] > 1.8)\
         *(data[kascLoggTag] < rcmodel.loggteffcut(data['TEFF'],
                                                   isodist.FEH2Z(data['METALS'],zsolar=0.017),upper=True))#2.8)
 except ValueError:
     kascLoggTag= 'LOGGRG'
     clumplogg= (data[kascLoggTag] > 1.8)\
         *(data[kascLoggTag] < rcmodel.loggteffcut(data['TEFF'],
                                                   isodist.FEH2Z(data['METALS'],zsolar=0.017),upper=True))#2.8)
 if False:
     rcclumplogg= clumplogg*(data['RC'] == 1)
     print "%i / %i = %i%% APOKASC logg clump stars are in the RC catalog" % (numpy.sum(rcclumplogg),numpy.sum(clumplogg),float(numpy.sum(rcclumplogg))/numpy.sum(clumplogg)*100)
     rcnoclumplogg= (True-clumplogg)*(data['RC'] == 1)
     print "%i / %i = %i%% APOKASC logg non-clump stars are in the RC catalog" % (numpy.sum(rcnoclumplogg),numpy.sum(True-clumplogg),float(numpy.sum(rcnoclumplogg))/numpy.sum(True-clumplogg)*100.)
     print "%i / %i = %i%% APOKASC logg non-clump stars out of all stars are in the RC catalog" % (numpy.sum(rcnoclumplogg),numpy.sum(data['RC'] == 1),float(numpy.sum(rcnoclumplogg))/numpy.sum(data['RC'] == 1)*100.)
 bloggindx= (data['LOGG'] >= 1.8)*\
     (data['LOGG'] <= rcmodel.loggteffcut(data['TEFF'],data['METALS'],
                                          upper=True))
 gloggindx= (data[kascLoggTag] < 1.8)+\
Ejemplo n.º 12
0
def make_rcsample(parser):
    options, args = parser.parse_args()
    savefilename = options.savefilename
    if savefilename is None:
        #Create savefilename if not given
        savefilename = os.path.join(
            appath._APOGEE_DATA, 'rcsample_' + appath._APOGEE_REDUX + '.fits')
        print("Saving to %s ..." % savefilename)
    #Read the base-sample
    data = apread.allStar(adddist=_ADDHAYDENDIST, rmdups=options.rmdups)
    #Remove a bunch of fields that we do not want to keep
    data = esutil.numpy_util.remove_fields(data, [
        'TARGET_ID', 'FILE', 'AK_WISE', 'SFD_EBV', 'SYNTHVHELIO_AVG',
        'SYNTHVSCATTER', 'SYNTHVERR', 'SYNTHVERR_MED', 'RV_TEFF', 'RV_LOGG',
        'RV_FEH', 'RV_ALPHA', 'RV_CARB', 'RV_CCFWHM', 'RV_AUTOFWHM',
        'SYNTHSCATTER', 'STABLERV_CHI2', 'STABLERV_RCHI2',
        'STABLERV_CHI2_PROB', 'CHI2_THRESHOLD', 'APSTAR_VERSION',
        'ASPCAP_VERSION', 'RESULTS_VERSION', 'WASH_M', 'WASH_M_ERR', 'WASH_T2',
        'WASH_T2_ERR', 'DDO51', 'DDO51_ERR', 'IRAC_3_6', 'IRAC_3_6_ERR',
        'IRAC_4_5', 'IRAC_4_5_ERR', 'IRAC_5_8', 'IRAC_5_8_ERR', 'IRAC_8_0',
        'IRAC_8_0_ERR', 'WISE_4_5', 'WISE_4_5_ERR', 'TARG_4_5', 'TARG_4_5_ERR',
        'WASH_DDO51_GIANT_FLAG', 'WASH_DDO51_STAR_FLAG', 'REDUCTION_ID',
        'SRC_H', 'PM_SRC'
    ])
    # More
    if appath._APOGEE_REDUX.lower() == 'l33':
        data = esutil.numpy_util.remove_fields(data, [
            'GAIA_SOURCE_ID', 'GAIA_PARALLAX', 'GAIA_PARALLAX_ERROR',
            'GAIA_PMRA', 'GAIA_PMRA_ERROR', 'GAIA_PMDEC', 'GAIA_PMDEC_ERROR',
            'GAIA_PHOT_G_MEAN_MAG', 'GAIA_PHOT_BP_MEAN_MAG',
            'GAIA_PHOT_RP_MEAN_MAG', 'GAIA_RADIAL_VELOCITY',
            'GAIA_RADIAL_VELOCITY_ERROR', 'GAIA_R_EST', 'GAIA_R_LO',
            'GAIA_R_HI', 'TEFF_SPEC', 'LOGG_SPEC'
        ])
    if not appath._APOGEE_REDUX.lower() == 'current' \
            and not 'l3' in appath._APOGEE_REDUX \
            and int(appath._APOGEE_REDUX[1:]) < 500:
        data = esutil.numpy_util.remove_fields(data, ['ELEM'])
    #Select red-clump stars
    jk = data['J0'] - data['K0']
    z = isodist.FEH2Z(data['METALS'], zsolar=0.017)
    if 'l31' in appath._APOGEE_REDUX:
        logg = data['LOGG']
    elif 'l30' in appath._APOGEE_REDUX:
        logg = data['LOGG']
    elif appath._APOGEE_REDUX.lower() == 'current' \
            or int(appath._APOGEE_REDUX[1:]) > 600:
        if False:
            #Use my custom logg calibration that's correct for the RC
            logg = (1. - 0.042) * data['FPARAM'][:, paramIndx('logg')] - 0.213
            lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1.
            logg[lowloggindx] = data['FPARAM'][lowloggindx,
                                               paramIndx('logg')] - 0.255
            hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8
            logg[hiloggindx] = data['FPARAM'][hiloggindx,
                                              paramIndx('logg')] - 0.3726
        else:
            #Use my custom logg calibration that's correct on average
            logg = (1. + 0.03) * data['FPARAM'][:, paramIndx('logg')] - 0.37
            lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1.
            logg[lowloggindx] = data['FPARAM'][lowloggindx,
                                               paramIndx('logg')] - 0.34
            hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8
            logg[hiloggindx] = data['FPARAM'][hiloggindx,
                                              paramIndx('logg')] - 0.256
    else:
        logg = data['LOGG']
    indx= (jk < 0.8)*(jk >= 0.5)\
        *(z <= 0.06)\
        *(z <= rcmodel.jkzcut(jk,upper=True))\
        *(z >= rcmodel.jkzcut(jk))\
        *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\
        *(logg+0.1*('l31' in appath._APOGEE_REDUX
                    or 'l33' in appath._APOGEE_REDUX) \
              <= rcmodel.loggteffcut(data['TEFF'],z,upper=True))
    data = data[indx]
    #Add more aggressive flag cut
    data = esutil.numpy_util.add_fields(data, [('ADDL_LOGG_CUT', numpy.int32)])
    data['ADDL_LOGG_CUT'] = (
        (data['TEFF'] - 4800.) / 1000. + 2.75) > data['LOGG']
    if options.loggcut:
        data = data[data['ADDL_LOGG_CUT'] == 1]
    print("Making catalog of %i objects ..." % len(data))
    #Add distances
    data = esutil.numpy_util.add_fields(data, [('RC_DIST', float),
                                               ('RC_DM', float),
                                               ('RC_GALR', float),
                                               ('RC_GALPHI', float),
                                               ('RC_GALZ', float)])
    rcd = rcmodel.rcdist()
    jk = data['J0'] - data['K0']
    z = isodist.FEH2Z(data['METALS'], zsolar=0.017)
    data['RC_DIST'] = rcd(jk, z, appmag=data['K0']) * options.distfac
    data['RC_DM'] = 5. * numpy.log10(data['RC_DIST']) + 10.
    XYZ = bovy_coords.lbd_to_XYZ(data['GLON'],
                                 data['GLAT'],
                                 data['RC_DIST'],
                                 degree=True)
    RphiZ = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0],
                                         XYZ[:, 1],
                                         XYZ[:, 2],
                                         Xsun=8.15,
                                         Zsun=0.0208)
    R = RphiZ[:, 0]
    phi = RphiZ[:, 1]
    Z = RphiZ[:, 2]
    data['RC_GALR'] = R
    data['RC_GALPHI'] = phi
    data['RC_GALZ'] = Z
    #Save
    fitswrite(savefilename, data, clobber=True)
    # Add Tycho-2 matches
    if options.tyc2:
        data = esutil.numpy_util.add_fields(data, [('TYC2MATCH', numpy.int32),
                                                   ('TYC1', numpy.int32),
                                                   ('TYC2', numpy.int32),
                                                   ('TYC3', numpy.int32)])
        data['TYC2MATCH'] = 0
        data['TYC1'] = -1
        data['TYC2'] = -1
        data['TYC3'] = -1
        # Write positions
        posfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        resultfilename = tempfile.mktemp('.csv', dir=os.getcwd())
        with open(posfilename, 'w') as csvfile:
            wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
            wr.writerow(['RA', 'DEC'])
            for ii in range(len(data)):
                wr.writerow([data[ii]['RA'], data[ii]['DEC']])
        # Send to CDS for matching
        result = open(resultfilename, 'w')
        try:
            subprocess.check_call([
                'curl', '-X', 'POST', '-F', 'request=xmatch', '-F',
                'distMaxArcsec=2', '-F', 'RESPONSEFORMAT=csv', '-F',
                'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA',
                '-F', 'colDec1=DEC', '-F', 'cat2=vizier:Tycho2',
                'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'
            ],
                                  stdout=result)
        except subprocess.CalledProcessError:
            os.remove(posfilename)
            if os.path.exists(resultfilename):
                result.close()
                os.remove(resultfilename)
        result.close()
        # Directly match on input RA
        ma = numpy.loadtxt(resultfilename,
                           delimiter=',',
                           skiprows=1,
                           usecols=(1, 2, 7, 8, 9))
        iis = numpy.arange(len(data))
        mai = [iis[data['RA'] == ma[ii, 0]][0] for ii in range(len(ma))]
        data['TYC2MATCH'][mai] = 1
        data['TYC1'][mai] = ma[:, 2]
        data['TYC2'][mai] = ma[:, 3]
        data['TYC3'][mai] = ma[:, 4]
        os.remove(posfilename)
        os.remove(resultfilename)
    if not options.nostat:
        #Determine statistical sample and add flag
        apo = apogee.select.apogeeSelect()
        statIndx = apo.determine_statistical(data)
        mainIndx = apread.mainIndx(data)
        data = esutil.numpy_util.add_fields(data, [('STAT', numpy.int32),
                                                   ('INVSF', float)])
        data['STAT'] = 0
        data['STAT'][statIndx * mainIndx] = 1
        for ii in range(len(data)):
            if (statIndx * mainIndx)[ii]:
                data['INVSF'][ii] = 1. / apo(data['LOCATION_ID'][ii],
                                             data['H'][ii])
            else:
                data['INVSF'][ii] = -1.
    if options.nopm:
        fitswrite(savefilename, data, clobber=True)
        return None
    data = _add_proper_motions(data, savefilename)
    # Save
    fitswrite(savefilename, data, clobber=True)
    return None