def get_apogee(dr='16',use_astroNN=True): """ dr: dr to select. astronn: boolean flag for whether to use astroNN abundances, defaults to ASPCAP abundances or not. Returns APOGEE allStar file without duplicates. """ dr = str(dr) # Change to relevant data release change_dr(dr) # Only use astroNN values if they are available for this data release. if use_astroNN: if int(dr)<14: use_astroNN=False elif int(dr)>=14: allStar=apread.allStar(rmcommissioning=True, rmdups=False, use_astroNN=True) if not use_astroNN: allStar=apread.allStar(rmcommissioning=True, rmdups=False, use_astroNN=False) # Manually remove duplicates apids,inds=np.unique(allStar['APOGEE_ID'],return_index=True) return allStar[inds]
def getAllTargets(): ''' Returns all targets from the allStar file :return: locationIDs, apogeeIDs ''' data = apread.allStar(dr='13') return data['APOGEE_ID'], data['LOCATION_ID']
def apogee(**kwargs): """ PURPOSE: read the APOGEE allStar file INPUT: IF the apogee package is not installed: dr= (13) SDSS data release ELSE you can use the same keywords as apogee.tools.read.allstar: rmcommissioning= (default: True) if True, only use data obtained after commissioning main= (default: False) if True, only select stars in the main survey exclude_star_bad= (False) if True, remove stars with the STAR_BAD flag set in ASPCAPFLAG exclude_star_warn= (False) if True, remove stars with the STAR_WARN flag set in ASPCAPFLAG ak= (default: True) only use objects for which dereddened mags exist akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE) rmnovisits= (False) if True, remove stars with no good visits (to go into the combined spectrum); shouldn't be necessary adddist= (default: False) add distances (DR10/11 Hayden distances, DR12 combined distances) distredux= (default: DR default) reduction on which the distances are based rmdups= (False) if True, remove duplicates (very slow) raw= (False) if True, just return the raw file, read w/ fitsio OUTPUT: allStar data HISTORY: 2013-09-06 - Written - Bovy (IAS) """ if not _APOGEE_LOADED: warnings.warn("Falling back on simple APOGEE interface; for more functionality, install the jobovy/apogee package") dr= kwargs.get('dr',13) filePath= path.apogeePath(dr=dr) if not os.path.exists(filePath): download.apogee(dr=dr) return fitsio.read(filePath,1) else: return apread.allStar(**kwargs)
def read_caldata(filename='../cldata/aj485195t4_mrt.txt'): data = astropy.io.ascii.read(filename) data.rename_column('Cluster', 'CLUSTER') data.remove_column('Teff') data.rename_column('TeffC', 'TEFF') data.remove_column('logg') data.rename_column('loggC', 'LOGG') data.remove_column('[M/H]') data.rename_column('[M/H]C', 'FEH') data.rename_column('2MASS', 'ID') # Now match to allStar to get the location_ids, and abundances alldata = apread.allStar(raw=True) locids = numpy.empty(len(data), dtype='int') fehs = numpy.empty(len(data), dtype='float') afes = numpy.empty(len(data), dtype='float') for ii in range(len(data)): if 'Pleiades' in data['CLUSTER'][ii]: continue indx = alldata['APOGEE_ID'] == data['ID'][ii] if numpy.sum(indx) == 0: raise ValueError('allStar match for %s not found ...' % (data['ID'][ii])) if len(list(set(alldata['LOCATION_ID'][indx]))) > 1: raise ValueError('Multiple matches found for for %s ...' % (data['ID'][ii])) locids[ii] = alldata['LOCATION_ID'][indx][0] fehs[ii] = alldata['FE_H'][indx][0] afes[ii] = define_rcsample.avg_alphafe(alldata[indx])[0] data['LOCATION_ID'] = locids data['FE_H'] = fehs data[define_rcsample._AFETAG] = afes return data
def rgsample(dr='13'): data= apread.allStar(main=True,exclude_star_bad=True,exclude_star_warn=True,rmdups=True) jk= data['J0']-data['K0'] z= isodist.FEH2Z(data['METALS'],zsolar=0.017) z[z > 0.024]= 0.024 logg= data['LOGG'] indx= ((jk >= 0.8)#+(z < rcmodel.jkzcut(jk-0.1,upper=False)) +(logg > rcmodel.loggteffcut(data['TEFF'],z,upper=True))) rgindx=indx*(data['METALS'] > -.8) return data[rgindx]
def rgsample(dr="13"): data = apread.allStar(main=True, exclude_star_bad=True, exclude_star_warn=True, dr=dr) # rmdups=True,dr=dr) jk = data["J0"] - data["K0"] z = isodist.FEH2Z(data["METALS"], zsolar=0.017) z[z > 0.024] = 0.024 logg = data["LOGG"] indx = (jk >= 0.8) + ( # +(z < rcmodel.jkzcut(jk-0.1,upper=False)) logg > rcmodel.loggteffcut(data["TEFF"], z, upper=True) ) rgindx = indx * (data["METALS"] > -0.8) return data[rgindx]
def readAndHackHoltz(): alldata = apread.allStar(adddist=True, distredux="v402") jk = alldata["J0"] - alldata["K0"] data = alldata[(jk > 0.8) * (alldata["DISO_GAL"] > 0.0)] # To allow for XY pixelization, we will hack these data = esutil.numpy_util.add_fields(data, [("RC_GALR", float), ("RC_GALPHI", float), ("RC_GALZ", float)]) XYZ = bovy_coords.lbd_to_XYZ(data["GLON"], data["GLAT"], data["DISO_GAL"], degree=True) R, phi, Z = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=8.0, Zsun=0.025) data["RC_GALR"] = R data["RC_GALPHI"] = phi data["RC_GALZ"] = Z return data
def read_meszarosgcdata(filename=os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', 'data', 'clusterdata', 'aj509073t2_mrt.txt')): """ NAME: read_meszarosgcdata PURPOSE: Read the data on globular clusters from Meszaros et al. (2015) INPUT: filename= Name of the file that has the ApJ machine-readable table OUTPUT: data structure with the data HISTORY: 2015-02-11 - Started - Bovy (IAS@KITP) 2015-08-13 - Re-written for new data format - Bovy (UofT) """ data = astropy.io.ascii.read(filename) data.rename_column('Clust', 'CLUSTER') data.rename_column('Teff', 'TEFF') data.rename_column('log(g)', 'LOGG') data.rename_column('[Fe/H]', 'FEH') data.rename_column('2MASS', 'ID') # Now match to allStar to get the location_ids and H magnitudes alldata = apread.allStar(raw=True) locids = numpy.zeros(len(data), dtype='int') - 1 hmags = numpy.zeros(len(data), dtype='float') - 1 # and match to allVisit for the fibers that each star was observed in allvdata = apread.allVisit(raw=True) fibers = numpy.zeros( (len(data), numpy.nanmax(alldata['NVISITS'])), dtype='int') - 1 for ii in range(len(data)): if 'Pleiades' in data['CLUSTER'][ii]: continue indx = alldata['APOGEE_ID'] == data['ID'][ii] if numpy.sum(indx) == 0: raise ValueError('allStar match for %s not found ...' % (data['ID'][ii])) if len(list(set(alldata['LOCATION_ID'][indx]))) > 1: raise ValueError('Multiple matches found for for %s ...' % (data['ID'][ii])) locids[ii] = alldata['LOCATION_ID'][indx][0] hmags[ii] = alldata['H'][indx][0] for jj in range(alldata['NVISITS'][indx][0]): fibers[ii, jj] = allvdata[alldata['VISIT_PK'][indx][0, jj]]['FIBERID'] data['LOCATION_ID'] = locids data['H'] = hmags data['FIBERID'] = fibers data['APOGEE_ID'] = data['ID'] data['FE_H'] = data['FEH'] return data
def apogee(xmatch=None, **kwargs): """ PURPOSE: read the APOGEE allStar file INPUT: IF the apogee package is not installed: dr= (14) SDSS data release ELSE you can use the same keywords as apogee.tools.read.allstar: rmcommissioning= (default: True) if True, only use data obtained after commissioning main= (default: False) if True, only select stars in the main survey exclude_star_bad= (False) if True, remove stars with the STAR_BAD flag set in ASPCAPFLAG exclude_star_warn= (False) if True, remove stars with the STAR_WARN flag set in ASPCAPFLAG ak= (default: True) only use objects for which dereddened mags exist akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE) rmnovisits= (False) if True, remove stars with no good visits (to go into the combined spectrum); shouldn't be necessary adddist= (default: False) add distances (DR10/11 Hayden distances, DR12 combined distances) distredux= (default: DR default) reduction on which the distances are based rmdups= (False) if True, remove duplicates (very slow) raw= (False) if True, just return the raw file, read w/ fitsio ALWAYS ALSO xmatch= (None) if set, cross-match against a Vizier catalog (e.g., vizier:I/345/gaia2 for Gaia DR2) using gaia_tools.xmatch.cds and return the overlap +gaia_tools.xmatch.cds keywords OUTPUT: allStar data[,xmatched table] HISTORY: 2013-09-06 - Written - Bovy (IAS) 2018-05-09 - Add xmatch - Bovy (UofT) """ if not _APOGEE_LOADED: warnings.warn( "Falling back on simple APOGEE interface; for more functionality, install the jobovy/apogee package" ) dr = kwargs.get('dr', 14) filePath = path.apogeePath(dr=dr) if not os.path.exists(filePath): download.apogee(dr=dr) data = fitsread(filePath, 1) if not xmatch is None: ma, mai = _xmatch_cds(data, xmatch, filePath, **kwargs) return (data[mai], ma) else: return data else: kwargs['xmatch'] = xmatch return apread.allStar(**kwargs)
def read_meszarosgcdata(filename='../clusterdata/aj509073t2_mrt.txt'): """ NAME: read_meszarosgcdata PURPOSE: Read the data on globular clusters from Meszaros et al. (2015) INPUT: filename= Name of the file that has the ApJ machine-readable table OUTPUT: data structure with the data HISTORY: 2015-02-11 - Started - Bovy (IAS@KITP) 2015-08-13 - Re-written for new data format - Bovy (UofT) """ data= astropy.io.ascii.read(filename) data.rename_column('Clust','CLUSTER') data.rename_column('Teff','TEFF') data.rename_column('log(g)','LOGG') data.rename_column('[Fe/H]','FEH') data.rename_column('2MASS','ID') # Now match to allStar to get the location_ids and H magnitudes alldata= apread.allStar(raw=True) locids= numpy.zeros(len(data),dtype='int')-1 hmags= numpy.zeros(len(data),dtype='float')-1 # and match to allVisit for the fibers that each star was observed in allvdata= apread.allVisit(raw=True) fibers= numpy.zeros((len(data),numpy.nanmax(alldata['NVISITS'])), dtype='int')-1 for ii in range(len(data)): if 'Pleiades' in data['CLUSTER'][ii]: continue indx= alldata['APOGEE_ID'] == data['ID'][ii] if numpy.sum(indx) == 0: raise ValueError('allStar match for %s not found ...' % (data['ID'][ii])) if len(list(set(alldata['LOCATION_ID'][indx]))) > 1: raise ValueError('Multiple matches found for for %s ...' % (data['ID'][ii])) locids[ii]= alldata['LOCATION_ID'][indx][0] hmags[ii]= alldata['H'][indx][0] for jj in range(alldata['NVISITS'][indx][0]): fibers[ii,jj]= allvdata[alldata['VISIT_PK'][indx][0,jj]]['FIBERID'] data['LOCATION_ID']= locids data['H']= hmags data['FIBERID']= fibers data['APOGEE_ID'] = data['ID'] data['FE_H'] = data['FEH'] return data
def read_apogee_catalog(): ''' read in the catalog of info for all stars in a data release. ''' all_star_catalog = apread.allStar(rmcommissioning=False, rmdups=False, main=False, raw=True) # and match to allVisit for the fibers that each star was observed in allvdata = apread.allVisit(raw=True) fibers = np.zeros( (len(all_star_catalog), np.nanmax(all_star_catalog['NVISITS'])), dtype='int') - 1 for ii in range(len(all_star_catalog)): for jj in range(all_star_catalog['NVISITS'][ii]): fibers[ii, jj] = allvdata[all_star_catalog['VISIT_PK'][ii] [jj]]['FIBERID'] return all_star_catalog, fibers
def apogee(**kwargs): """ PURPOSE: read the APOGEE allStar file INPUT: rmcommissioning= (default: True) if True, only use data obtained after commissioning main= (default: False) if True, only select stars in the main survey exclude_star_bad= (False) if True, remove stars with the STAR_BAD flag set in ASPCAPFLAG exclude_star_warn= (False) if True, remove stars with the STAR_WARN flag set in ASPCAPFLAG ak= (default: True) only use objects for which dereddened mags exist akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE) rmnovisits= (False) if True, remove stars with no good visits (to go into the combined spectrum); shouldn't be necessary adddist= (default: False) add distances (DR10/11 Hayden distances, DR12 combined distances) distredux= (default: DR default) reduction on which the distances are based rmdups= (False) if True, remove duplicates (very slow) raw= (False) if True, just return the raw file, read w/ fitsio OUTPUT: allStar data HISTORY: 2013-09-06 - Written - Bovy (IAS) """ if not _APOGEE_LOADED: raise ImportError("Loading the APOGEE data requires the jobovy/apogee module to be installed") return apread.allStar(**kwargs)
def make_rcsample(parser): options, args = parser.parse_args() savefilename = options.savefilename if savefilename is None: #Create savefilename if not given savefilename = os.path.join( appath._APOGEE_DATA, 'rcsample_' + appath._APOGEE_REDUX + '.fits') print("Saving to %s ..." % savefilename) #Read the base-sample data = apread.allStar(adddist=_ADDHAYDENDIST, rmdups=options.rmdups) #Remove a bunch of fields that we do not want to keep data = esutil.numpy_util.remove_fields(data, [ 'TARGET_ID', 'FILE', 'AK_WISE', 'SFD_EBV', 'SYNTHVHELIO_AVG', 'SYNTHVSCATTER', 'SYNTHVERR', 'SYNTHVERR_MED', 'RV_TEFF', 'RV_LOGG', 'RV_FEH', 'RV_ALPHA', 'RV_CARB', 'RV_CCFWHM', 'RV_AUTOFWHM', 'SYNTHSCATTER', 'STABLERV_CHI2', 'STABLERV_RCHI2', 'STABLERV_CHI2_PROB', 'CHI2_THRESHOLD', 'APSTAR_VERSION', 'ASPCAP_VERSION', 'RESULTS_VERSION', 'WASH_M', 'WASH_M_ERR', 'WASH_T2', 'WASH_T2_ERR', 'DDO51', 'DDO51_ERR', 'IRAC_3_6', 'IRAC_3_6_ERR', 'IRAC_4_5', 'IRAC_4_5_ERR', 'IRAC_5_8', 'IRAC_5_8_ERR', 'IRAC_8_0', 'IRAC_8_0_ERR', 'WISE_4_5', 'WISE_4_5_ERR', 'TARG_4_5', 'TARG_4_5_ERR', 'WASH_DDO51_GIANT_FLAG', 'WASH_DDO51_STAR_FLAG', 'REDUCTION_ID', 'SRC_H', 'PM_SRC' ]) if not appath._APOGEE_REDUX.lower() == 'current' \ and not 'l30' in appath._APOGEE_REDUX \ and int(appath._APOGEE_REDUX[1:]) < 500: data = esutil.numpy_util.remove_fields(data, ['ELEM']) #Select red-clump stars jk = data['J0'] - data['K0'] z = isodist.FEH2Z(data['METALS'], zsolar=0.017) if 'l30' in appath._APOGEE_REDUX: logg = data['LOGG'] elif appath._APOGEE_REDUX.lower() == 'current' \ or int(appath._APOGEE_REDUX[1:]) > 600: from apogee.tools import paramIndx if False: #Use my custom logg calibration that's correct for the RC logg = (1. - 0.042) * data['FPARAM'][:, paramIndx('logg')] - 0.213 lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1. logg[lowloggindx] = data['FPARAM'][lowloggindx, paramIndx('logg')] - 0.255 hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8 logg[hiloggindx] = data['FPARAM'][hiloggindx, paramIndx('logg')] - 0.3726 else: #Use my custom logg calibration that's correct on average logg = (1. + 0.03) * data['FPARAM'][:, paramIndx('logg')] - 0.37 lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1. logg[lowloggindx] = data['FPARAM'][lowloggindx, paramIndx('logg')] - 0.34 hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8 logg[hiloggindx] = data['FPARAM'][hiloggindx, paramIndx('logg')] - 0.256 else: logg = data['LOGG'] indx= (jk < 0.8)*(jk >= 0.5)\ *(z <= 0.06)\ *(z <= rcmodel.jkzcut(jk,upper=True))\ *(z >= rcmodel.jkzcut(jk))\ *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\ *(logg <= rcmodel.loggteffcut(data['TEFF'],z,upper=True)) data = data[indx] #Add more aggressive flag cut data = esutil.numpy_util.add_fields(data, [('ADDL_LOGG_CUT', numpy.int32)]) data['ADDL_LOGG_CUT'] = ( (data['TEFF'] - 4800.) / 1000. + 2.75) > data['LOGG'] if options.loggcut: data = data[data['ADDL_LOGG_CUT'] == 1] print("Making catalog of %i objects ..." % len(data)) #Add distances data = esutil.numpy_util.add_fields(data, [('RC_DIST', float), ('RC_DM', float), ('RC_GALR', float), ('RC_GALPHI', float), ('RC_GALZ', float)]) rcd = rcmodel.rcdist() jk = data['J0'] - data['K0'] z = isodist.FEH2Z(data['METALS'], zsolar=0.017) data['RC_DIST'] = rcd(jk, z, appmag=data['K0']) * options.distfac data['RC_DM'] = 5. * numpy.log10(data['RC_DIST']) + 10. XYZ = bovy_coords.lbd_to_XYZ(data['GLON'], data['GLAT'], data['RC_DIST'], degree=True) R, phi, Z = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=8., Zsun=0.025) data['RC_GALR'] = R data['RC_GALPHI'] = phi data['RC_GALZ'] = Z #Save fitsio.write(savefilename, data, clobber=True) # Add Tycho-2 matches if options.tyc2: data = esutil.numpy_util.add_fields(data, [('TYC2MATCH', numpy.int32), ('TYC1', numpy.int32), ('TYC2', numpy.int32), ('TYC3', numpy.int32)]) data['TYC2MATCH'] = 0 data['TYC1'] = -1 data['TYC2'] = -1 data['TYC3'] = -1 # Write positions posfilename = tempfile.mktemp('.csv', dir=os.getcwd()) resultfilename = tempfile.mktemp('.csv', dir=os.getcwd()) with open(posfilename, 'w') as csvfile: wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) wr.writerow(['RA', 'DEC']) for ii in range(len(data)): wr.writerow([data[ii]['RA'], data[ii]['DEC']]) # Send to CDS for matching result = open(resultfilename, 'w') try: subprocess.check_call([ 'curl', '-X', 'POST', '-F', 'request=xmatch', '-F', 'distMaxArcsec=2', '-F', 'RESPONSEFORMAT=csv', '-F', 'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA', '-F', 'colDec1=DEC', '-F', 'cat2=vizier:Tycho2', 'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync' ], stdout=result) except subprocess.CalledProcessError: os.remove(posfilename) if os.path.exists(resultfilename): result.close() os.remove(resultfilename) result.close() # Directly match on input RA ma = numpy.loadtxt(resultfilename, delimiter=',', skiprows=1, usecols=(1, 2, 7, 8, 9)) iis = numpy.arange(len(data)) mai = [iis[data['RA'] == ma[ii, 0]][0] for ii in range(len(ma))] data['TYC2MATCH'][mai] = 1 data['TYC1'][mai] = ma[:, 2] data['TYC2'][mai] = ma[:, 3] data['TYC3'][mai] = ma[:, 4] os.remove(posfilename) os.remove(resultfilename) if not options.nostat: #Determine statistical sample and add flag apo = apogee.select.apogeeSelect() statIndx = apo.determine_statistical(data) mainIndx = apread.mainIndx(data) data = esutil.numpy_util.add_fields(data, [('STAT', numpy.int32), ('INVSF', float)]) data['STAT'] = 0 data['STAT'][statIndx * mainIndx] = 1 for ii in range(len(data)): if (statIndx * mainIndx)[ii]: data['INVSF'][ii] = 1. / apo(data['LOCATION_ID'][ii], data['H'][ii]) else: data['INVSF'][ii] = -1. if options.nopm: fitsio.write(savefilename, data, clobber=True) return None #Get proper motions, in a somewhat roundabout way pmfile = savefilename.split('.')[0] + '_pms.fits' if os.path.exists(pmfile): pmdata = fitsio.read(pmfile, 1) else: pmdata = numpy.recarray( len(data), formats=['f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'i4'], names=[ 'RA', 'DEC', 'PMRA', 'PMDEC', 'PMRA_ERR', 'PMDEC_ERR', 'PMMATCH' ]) # Write positions, again ... posfilename = tempfile.mktemp('.csv', dir=os.getcwd()) resultfilename = tempfile.mktemp('.csv', dir=os.getcwd()) with open(posfilename, 'w') as csvfile: wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) wr.writerow(['RA', 'DEC']) for ii in range(len(data)): wr.writerow([data[ii]['RA'], data[ii]['DEC']]) # Send to CDS for matching result = open(resultfilename, 'w') try: subprocess.check_call([ 'curl', '-X', 'POST', '-F', 'request=xmatch', '-F', 'distMaxArcsec=4', '-F', 'RESPONSEFORMAT=csv', '-F', 'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA', '-F', 'colDec1=DEC', '-F', 'cat2=vizier:UCAC4', 'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync' ], stdout=result) except subprocess.CalledProcessError: os.remove(posfilename) if os.path.exists(resultfilename): result.close() os.remove(resultfilename) result.close() # Match back and only keep the closest one ma = numpy.loadtxt(resultfilename, delimiter=',', skiprows=1, converters={ 15: lambda s: float(s.strip() or -9999), 16: lambda s: float(s.strip() or -9999), 17: lambda s: float(s.strip() or -9999), 18: lambda s: float(s.strip() or -9999) }, usecols=(4, 5, 15, 16, 17, 18)) h = esutil.htm.HTM() m1, m2, d12 = h.match(data['RA'], data['DEC'], ma[:, 0], ma[:, 1], 4. / 3600., maxmatch=1) pmdata['PMMATCH'] = 0 pmdata['RA'] = data['RA'] pmdata['DEC'] = data['DEC'] pmdata['PMMATCH'][m1] = 1 pmdata['PMRA'][m1] = ma[m2, 2] pmdata['PMDEC'][m1] = ma[m2, 3] pmdata['PMRA_ERR'][m1] = ma[m2, 4] pmdata['PMDEC_ERR'][m1] = ma[m2, 5] pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \ +(pmdata['PMDEC'] == -9999) \ +(pmdata['PMRA_ERR'] == -9999) \ +(pmdata['PMDEC_ERR'] == -9999)]= 0 fitsio.write(pmfile, pmdata, clobber=True) #To make sure we're using the same format below pmdata = fitsio.read(pmfile, 1) os.remove(posfilename) os.remove(resultfilename) #Match proper motions try: #These already exist currently, but may not always exist data = esutil.numpy_util.remove_fields(data, ['PMRA', 'PMDEC']) except ValueError: pass data = esutil.numpy_util.add_fields(data, [('PMRA', numpy.float), ('PMDEC', numpy.float), ('PMRA_ERR', numpy.float), ('PMDEC_ERR', numpy.float), ('PMMATCH', numpy.int32)]) data['PMMATCH'] = 0 h = esutil.htm.HTM() m1, m2, d12 = h.match(pmdata['RA'], pmdata['DEC'], data['RA'], data['DEC'], 2. / 3600., maxmatch=1) data['PMRA'][m2] = pmdata['PMRA'][m1] data['PMDEC'][m2] = pmdata['PMDEC'][m1] data['PMRA_ERR'][m2] = pmdata['PMRA_ERR'][m1] data['PMDEC_ERR'][m2] = pmdata['PMDEC_ERR'][m1] data['PMMATCH'][m2] = pmdata['PMMATCH'][m1].astype(numpy.int32) pmindx = data['PMMATCH'] == 1 data['PMRA'][True - pmindx] = -9999.99 data['PMDEC'][True - pmindx] = -9999.99 data['PMRA_ERR'][True - pmindx] = -9999.99 data['PMDEC_ERR'][True - pmindx] = -9999.99 #Calculate Galactocentric velocities data = esutil.numpy_util.add_fields(data, [('GALVR', numpy.float), ('GALVT', numpy.float), ('GALVZ', numpy.float)]) lb = bovy_coords.radec_to_lb(data['RA'], data['DEC'], degree=True) XYZ = bovy_coords.lbd_to_XYZ(lb[:, 0], lb[:, 1], data['RC_DIST'], degree=True) pmllpmbb = bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'], data['PMDEC'], data['RA'], data['DEC'], degree=True) vxvyvz = bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'], pmllpmbb[:, 0], pmllpmbb[:, 1], lb[:, 0], lb[:, 1], data['RC_DIST'], degree=True) vR, vT, vZ = bovy_coords.vxvyvz_to_galcencyl( vxvyvz[:, 0], vxvyvz[:, 1], vxvyvz[:, 2], 8. - XYZ[:, 0], XYZ[:, 1], XYZ[:, 2] + 0.025, vsun=[-11.1, 30.24 * 8., 7.25]) #Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc data['GALVR'] = vR data['GALVT'] = vT data['GALVZ'] = vZ data['GALVR'][True - pmindx] = -9999.99 data['GALVT'][True - pmindx] = -9999.99 data['GALVZ'][True - pmindx] = -9999.99 #Get PPMXL proper motions, in a somewhat roundabout way pmfile = savefilename.split('.')[0] + '_pms_ppmxl.fits' if os.path.exists(pmfile): pmdata = fitsio.read(pmfile, 1) else: pmdata = numpy.recarray( len(data), formats=['f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'i4'], names=[ 'RA', 'DEC', 'PMRA', 'PMDEC', 'PMRA_ERR', 'PMDEC_ERR', 'PMMATCH' ]) # Write positions, again ... posfilename = tempfile.mktemp('.csv', dir=os.getcwd()) resultfilename = tempfile.mktemp('.csv', dir=os.getcwd()) with open(posfilename, 'w') as csvfile: wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) wr.writerow(['RA', 'DEC']) for ii in range(len(data)): wr.writerow([data[ii]['RA'], data[ii]['DEC']]) # Send to CDS for matching result = open(resultfilename, 'w') try: subprocess.check_call([ 'curl', '-X', 'POST', '-F', 'request=xmatch', '-F', 'distMaxArcsec=4', '-F', 'RESPONSEFORMAT=csv', '-F', 'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA', '-F', 'colDec1=DEC', '-F', 'cat2=vizier:PPMXL', 'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync' ], stdout=result) except subprocess.CalledProcessError: os.remove(posfilename) if os.path.exists(resultfilename): result.close() os.remove(resultfilename) result.close() # Match back and only keep the closest one ma = numpy.loadtxt(resultfilename, delimiter=',', skiprows=1, converters={ 15: lambda s: float(s.strip() or -9999), 16: lambda s: float(s.strip() or -9999), 17: lambda s: float(s.strip() or -9999), 18: lambda s: float(s.strip() or -9999) }, usecols=(4, 5, 15, 16, 19, 20)) h = esutil.htm.HTM() m1, m2, d12 = h.match(data['RA'], data['DEC'], ma[:, 0], ma[:, 1], 4. / 3600., maxmatch=1) pmdata['PMMATCH'] = 0 pmdata['RA'] = data['RA'] pmdata['DEC'] = data['DEC'] pmdata['PMMATCH'][m1] = 1 pmdata['PMRA'][m1] = ma[m2, 2] pmdata['PMDEC'][m1] = ma[m2, 3] pmdata['PMRA_ERR'][m1] = ma[m2, 4] pmdata['PMDEC_ERR'][m1] = ma[m2, 5] pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \ +(pmdata['PMDEC'] == -9999) \ +(pmdata['PMRA_ERR'] == -9999) \ +(pmdata['PMDEC_ERR'] == -9999)]= 0 fitsio.write(pmfile, pmdata, clobber=True) #To make sure we're using the same format below pmdata = fitsio.read(pmfile, 1) os.remove(posfilename) os.remove(resultfilename) #Match proper motions to ppmxl data = esutil.numpy_util.add_fields(data, [('PMRA_PPMXL', numpy.float), ('PMDEC_PPMXL', numpy.float), ('PMRA_ERR_PPMXL', numpy.float), ('PMDEC_ERR_PPMXL', numpy.float), ('PMMATCH_PPMXL', numpy.int32)]) data['PMMATCH_PPMXL'] = 0 h = esutil.htm.HTM() m1, m2, d12 = h.match(pmdata['RA'], pmdata['DEC'], data['RA'], data['DEC'], 2. / 3600., maxmatch=1) data['PMRA_PPMXL'][m2] = pmdata['PMRA'][m1] data['PMDEC_PPMXL'][m2] = pmdata['PMDEC'][m1] data['PMRA_ERR_PPMXL'][m2] = pmdata['PMRA_ERR'][m1] data['PMDEC_ERR_PPMXL'][m2] = pmdata['PMDEC_ERR'][m1] data['PMMATCH_PPMXL'][m2] = pmdata['PMMATCH'][m1].astype(numpy.int32) pmindx = data['PMMATCH_PPMXL'] == 1 data['PMRA_PPMXL'][True - pmindx] = -9999.99 data['PMDEC_PPMXL'][True - pmindx] = -9999.99 data['PMRA_ERR_PPMXL'][True - pmindx] = -9999.99 data['PMDEC_ERR_PPMXL'][True - pmindx] = -9999.99 #Calculate Galactocentric velocities data = esutil.numpy_util.add_fields(data, [('GALVR_PPMXL', numpy.float), ('GALVT_PPMXL', numpy.float), ('GALVZ_PPMXL', numpy.float)]) lb = bovy_coords.radec_to_lb(data['RA'], data['DEC'], degree=True) XYZ = bovy_coords.lbd_to_XYZ(lb[:, 0], lb[:, 1], data['RC_DIST'], degree=True) pmllpmbb = bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA_PPMXL'], data['PMDEC_PPMXL'], data['RA'], data['DEC'], degree=True) vxvyvz = bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'], pmllpmbb[:, 0], pmllpmbb[:, 1], lb[:, 0], lb[:, 1], data['RC_DIST'], degree=True) vR, vT, vZ = bovy_coords.vxvyvz_to_galcencyl( vxvyvz[:, 0], vxvyvz[:, 1], vxvyvz[:, 2], 8. - XYZ[:, 0], XYZ[:, 1], XYZ[:, 2] + 0.025, vsun=[-11.1, 30.24 * 8., 7.25]) #Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc data['GALVR_PPMXL'] = vR data['GALVT_PPMXL'] = vT data['GALVZ_PPMXL'] = vZ data['GALVR_PPMXL'][True - pmindx] = -9999.99 data['GALVT_PPMXL'][True - pmindx] = -9999.99 data['GALVZ_PPMXL'][True - pmindx] = -9999.99 #Save fitsio.write(savefilename, data, clobber=True) return None
def make_rcsample(parser): options,args= parser.parse_args() savefilename= options.savefilename if savefilename is None: #Create savefilename if not given savefilename= os.path.join(appath._APOGEE_DATA, 'rcsample_'+appath._APOGEE_REDUX+'.fits') print("Saving to %s ..." % savefilename) #Read the base-sample data= apread.allStar(adddist=_ADDHAYDENDIST,rmdups=options.rmdups) #Remove a bunch of fields that we do not want to keep data= esutil.numpy_util.remove_fields(data, ['TARGET_ID', 'FILE', 'AK_WISE', 'SFD_EBV', 'SYNTHVHELIO_AVG', 'SYNTHVSCATTER', 'SYNTHVERR', 'SYNTHVERR_MED', 'RV_TEFF', 'RV_LOGG', 'RV_FEH', 'RV_ALPHA', 'RV_CARB', 'RV_CCFWHM', 'RV_AUTOFWHM', 'SYNTHSCATTER', 'STABLERV_CHI2', 'STABLERV_RCHI2', 'STABLERV_CHI2_PROB', 'CHI2_THRESHOLD', 'APSTAR_VERSION', 'ASPCAP_VERSION', 'RESULTS_VERSION', 'WASH_M', 'WASH_M_ERR', 'WASH_T2', 'WASH_T2_ERR', 'DDO51', 'DDO51_ERR', 'IRAC_3_6', 'IRAC_3_6_ERR', 'IRAC_4_5', 'IRAC_4_5_ERR', 'IRAC_5_8', 'IRAC_5_8_ERR', 'IRAC_8_0', 'IRAC_8_0_ERR', 'WISE_4_5', 'WISE_4_5_ERR', 'TARG_4_5', 'TARG_4_5_ERR', 'WASH_DDO51_GIANT_FLAG', 'WASH_DDO51_STAR_FLAG', 'REDUCTION_ID', 'SRC_H', 'PM_SRC']) if not appath._APOGEE_REDUX.lower() == 'current' \ and not 'l30' in appath._APOGEE_REDUX \ and int(appath._APOGEE_REDUX[1:]) < 500: data= esutil.numpy_util.remove_fields(data, ['ELEM']) #Select red-clump stars jk= data['J0']-data['K0'] z= isodist.FEH2Z(data['METALS'],zsolar=0.017) if 'l30' in appath._APOGEE_REDUX: logg= data['LOGG'] elif appath._APOGEE_REDUX.lower() == 'current' \ or int(appath._APOGEE_REDUX[1:]) > 600: from apogee.tools import paramIndx if False: #Use my custom logg calibration that's correct for the RC logg= (1.-0.042)*data['FPARAM'][:,paramIndx('logg')]-0.213 lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1. logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.255 hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8 logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.3726 else: #Use my custom logg calibration that's correct on average logg= (1.+0.03)*data['FPARAM'][:,paramIndx('logg')]-0.37 lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1. logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.34 hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8 logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.256 else: logg= data['LOGG'] indx= (jk < 0.8)*(jk >= 0.5)\ *(z <= 0.06)\ *(z <= rcmodel.jkzcut(jk,upper=True))\ *(z >= rcmodel.jkzcut(jk))\ *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\ *(logg <= rcmodel.loggteffcut(data['TEFF'],z,upper=True)) data= data[indx] #Add more aggressive flag cut data= esutil.numpy_util.add_fields(data,[('ADDL_LOGG_CUT',numpy.int32)]) data['ADDL_LOGG_CUT']= ((data['TEFF']-4800.)/1000.+2.75) > data['LOGG'] if options.loggcut: data= data[data['ADDL_LOGG_CUT'] == 1] print("Making catalog of %i objects ..." % len(data)) #Add distances data= esutil.numpy_util.add_fields(data,[('RC_DIST', float), ('RC_DM', float), ('RC_GALR', float), ('RC_GALPHI', float), ('RC_GALZ', float)]) rcd= rcmodel.rcdist() jk= data['J0']-data['K0'] z= isodist.FEH2Z(data['METALS'],zsolar=0.017) data['RC_DIST']= rcd(jk,z,appmag=data['K0'])*options.distfac data['RC_DM']= 5.*numpy.log10(data['RC_DIST'])+10. XYZ= bovy_coords.lbd_to_XYZ(data['GLON'], data['GLAT'], data['RC_DIST'], degree=True) R,phi,Z= bovy_coords.XYZ_to_galcencyl(XYZ[:,0], XYZ[:,1], XYZ[:,2], Xsun=8.,Zsun=0.025) data['RC_GALR']= R data['RC_GALPHI']= phi data['RC_GALZ']= Z #Save fitsio.write(savefilename,data,clobber=True) # Add Tycho-2 matches if options.tyc2: data= esutil.numpy_util.add_fields(data,[('TYC2MATCH',numpy.int32), ('TYC1',numpy.int32), ('TYC2',numpy.int32), ('TYC3',numpy.int32)]) data['TYC2MATCH']= 0 data['TYC1']= -1 data['TYC2']= -1 data['TYC3']= -1 # Write positions posfilename= tempfile.mktemp('.csv',dir=os.getcwd()) resultfilename= tempfile.mktemp('.csv',dir=os.getcwd()) with open(posfilename,'w') as csvfile: wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL) wr.writerow(['RA','DEC']) for ii in range(len(data)): wr.writerow([data[ii]['RA'],data[ii]['DEC']]) # Send to CDS for matching result= open(resultfilename,'w') try: subprocess.check_call(['curl', '-X','POST', '-F','request=xmatch', '-F','distMaxArcsec=2', '-F','RESPONSEFORMAT=csv', '-F','cat1=@%s' % os.path.basename(posfilename), '-F','colRA1=RA', '-F','colDec1=DEC', '-F','cat2=vizier:Tycho2', 'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'], stdout=result) except subprocess.CalledProcessError: os.remove(posfilename) if os.path.exists(resultfilename): result.close() os.remove(resultfilename) result.close() # Directly match on input RA ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1, usecols=(1,2,7,8,9)) iis= numpy.arange(len(data)) mai= [iis[data['RA'] == ma[ii,0]][0] for ii in range(len(ma))] data['TYC2MATCH'][mai]= 1 data['TYC1'][mai]= ma[:,2] data['TYC2'][mai]= ma[:,3] data['TYC3'][mai]= ma[:,4] os.remove(posfilename) os.remove(resultfilename) if not options.nostat: #Determine statistical sample and add flag apo= apogee.select.apogeeSelect() statIndx= apo.determine_statistical(data) mainIndx= apread.mainIndx(data) data= esutil.numpy_util.add_fields(data,[('STAT',numpy.int32), ('INVSF',float)]) data['STAT']= 0 data['STAT'][statIndx*mainIndx]= 1 for ii in range(len(data)): if (statIndx*mainIndx)[ii]: data['INVSF'][ii]= 1./apo(data['LOCATION_ID'][ii], data['H'][ii]) else: data['INVSF'][ii]= -1. if options.nopm: fitsio.write(savefilename,data,clobber=True) return None #Get proper motions, in a somewhat roundabout way pmfile= savefilename.split('.')[0]+'_pms.fits' if os.path.exists(pmfile): pmdata= fitsio.read(pmfile,1) else: pmdata= numpy.recarray(len(data), formats=['f8','f8','f8','f8','f8','f8','i4'], names=['RA','DEC','PMRA','PMDEC', 'PMRA_ERR','PMDEC_ERR','PMMATCH']) # Write positions, again ... posfilename= tempfile.mktemp('.csv',dir=os.getcwd()) resultfilename= tempfile.mktemp('.csv',dir=os.getcwd()) with open(posfilename,'w') as csvfile: wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL) wr.writerow(['RA','DEC']) for ii in range(len(data)): wr.writerow([data[ii]['RA'],data[ii]['DEC']]) # Send to CDS for matching result= open(resultfilename,'w') try: subprocess.check_call(['curl', '-X','POST', '-F','request=xmatch', '-F','distMaxArcsec=4', '-F','RESPONSEFORMAT=csv', '-F','cat1=@%s' % os.path.basename(posfilename), '-F','colRA1=RA', '-F','colDec1=DEC', '-F','cat2=vizier:UCAC4', 'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'], stdout=result) except subprocess.CalledProcessError: os.remove(posfilename) if os.path.exists(resultfilename): result.close() os.remove(resultfilename) result.close() # Match back and only keep the closest one ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1, converters={15: lambda s: float(s.strip() or -9999), 16: lambda s: float(s.strip() or -9999), 17: lambda s: float(s.strip() or -9999), 18: lambda s: float(s.strip() or -9999)}, usecols=(4,5,15,16,17,18)) h=esutil.htm.HTM() m1,m2,d12 = h.match(data['RA'],data['DEC'], ma[:,0],ma[:,1],4./3600.,maxmatch=1) pmdata['PMMATCH']= 0 pmdata['RA']= data['RA'] pmdata['DEC']= data['DEC'] pmdata['PMMATCH'][m1]= 1 pmdata['PMRA'][m1]= ma[m2,2] pmdata['PMDEC'][m1]= ma[m2,3] pmdata['PMRA_ERR'][m1]= ma[m2,4] pmdata['PMDEC_ERR'][m1]= ma[m2,5] pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \ +(pmdata['PMDEC'] == -9999) \ +(pmdata['PMRA_ERR'] == -9999) \ +(pmdata['PMDEC_ERR'] == -9999)]= 0 fitsio.write(pmfile,pmdata,clobber=True) #To make sure we're using the same format below pmdata= fitsio.read(pmfile,1) os.remove(posfilename) os.remove(resultfilename) #Match proper motions try: #These already exist currently, but may not always exist data= esutil.numpy_util.remove_fields(data,['PMRA','PMDEC']) except ValueError: pass data= esutil.numpy_util.add_fields(data,[('PMRA', numpy.float), ('PMDEC', numpy.float), ('PMRA_ERR', numpy.float), ('PMDEC_ERR', numpy.float), ('PMMATCH',numpy.int32)]) data['PMMATCH']= 0 h=esutil.htm.HTM() m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'], data['RA'],data['DEC'], 2./3600.,maxmatch=1) data['PMRA'][m2]= pmdata['PMRA'][m1] data['PMDEC'][m2]= pmdata['PMDEC'][m1] data['PMRA_ERR'][m2]= pmdata['PMRA_ERR'][m1] data['PMDEC_ERR'][m2]= pmdata['PMDEC_ERR'][m1] data['PMMATCH'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32) pmindx= data['PMMATCH'] == 1 data['PMRA'][True-pmindx]= -9999.99 data['PMDEC'][True-pmindx]= -9999.99 data['PMRA_ERR'][True-pmindx]= -9999.99 data['PMDEC_ERR'][True-pmindx]= -9999.99 #Calculate Galactocentric velocities data= esutil.numpy_util.add_fields(data,[('GALVR', numpy.float), ('GALVT', numpy.float), ('GALVZ', numpy.float)]) lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True) XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True) pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'],data['PMDEC'], data['RA'],data['DEC'], degree=True) vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'], pmllpmbb[:,0], pmllpmbb[:,1], lb[:,0],lb[:,1],data['RC_DIST'], degree=True) vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0], vxvyvz[:,1], vxvyvz[:,2], 8.-XYZ[:,0], XYZ[:,1], XYZ[:,2]+0.025, vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc data['GALVR']= vR data['GALVT']= vT data['GALVZ']= vZ data['GALVR'][True-pmindx]= -9999.99 data['GALVT'][True-pmindx]= -9999.99 data['GALVZ'][True-pmindx]= -9999.99 #Get PPMXL proper motions, in a somewhat roundabout way pmfile= savefilename.split('.')[0]+'_pms_ppmxl.fits' if os.path.exists(pmfile): pmdata= fitsio.read(pmfile,1) else: pmdata= numpy.recarray(len(data), formats=['f8','f8','f8','f8','f8','f8','i4'], names=['RA','DEC','PMRA','PMDEC', 'PMRA_ERR','PMDEC_ERR','PMMATCH']) # Write positions, again ... posfilename= tempfile.mktemp('.csv',dir=os.getcwd()) resultfilename= tempfile.mktemp('.csv',dir=os.getcwd()) with open(posfilename,'w') as csvfile: wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL) wr.writerow(['RA','DEC']) for ii in range(len(data)): wr.writerow([data[ii]['RA'],data[ii]['DEC']]) # Send to CDS for matching result= open(resultfilename,'w') try: subprocess.check_call(['curl', '-X','POST', '-F','request=xmatch', '-F','distMaxArcsec=4', '-F','RESPONSEFORMAT=csv', '-F','cat1=@%s' % os.path.basename(posfilename), '-F','colRA1=RA', '-F','colDec1=DEC', '-F','cat2=vizier:PPMXL', 'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'], stdout=result) except subprocess.CalledProcessError: os.remove(posfilename) if os.path.exists(resultfilename): result.close() os.remove(resultfilename) result.close() # Match back and only keep the closest one ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1, converters={15: lambda s: float(s.strip() or -9999), 16: lambda s: float(s.strip() or -9999), 17: lambda s: float(s.strip() or -9999), 18: lambda s: float(s.strip() or -9999)}, usecols=(4,5,15,16,19,20)) h=esutil.htm.HTM() m1,m2,d12 = h.match(data['RA'],data['DEC'], ma[:,0],ma[:,1],4./3600.,maxmatch=1) pmdata['PMMATCH']= 0 pmdata['RA']= data['RA'] pmdata['DEC']= data['DEC'] pmdata['PMMATCH'][m1]= 1 pmdata['PMRA'][m1]= ma[m2,2] pmdata['PMDEC'][m1]= ma[m2,3] pmdata['PMRA_ERR'][m1]= ma[m2,4] pmdata['PMDEC_ERR'][m1]= ma[m2,5] pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \ +(pmdata['PMDEC'] == -9999) \ +(pmdata['PMRA_ERR'] == -9999) \ +(pmdata['PMDEC_ERR'] == -9999)]= 0 fitsio.write(pmfile,pmdata,clobber=True) #To make sure we're using the same format below pmdata= fitsio.read(pmfile,1) os.remove(posfilename) os.remove(resultfilename) #Match proper motions to ppmxl data= esutil.numpy_util.add_fields(data,[('PMRA_PPMXL', numpy.float), ('PMDEC_PPMXL', numpy.float), ('PMRA_ERR_PPMXL', numpy.float), ('PMDEC_ERR_PPMXL', numpy.float), ('PMMATCH_PPMXL',numpy.int32)]) data['PMMATCH_PPMXL']= 0 h=esutil.htm.HTM() m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'], data['RA'],data['DEC'], 2./3600.,maxmatch=1) data['PMRA_PPMXL'][m2]= pmdata['PMRA'][m1] data['PMDEC_PPMXL'][m2]= pmdata['PMDEC'][m1] data['PMRA_ERR_PPMXL'][m2]= pmdata['PMRA_ERR'][m1] data['PMDEC_ERR_PPMXL'][m2]= pmdata['PMDEC_ERR'][m1] data['PMMATCH_PPMXL'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32) pmindx= data['PMMATCH_PPMXL'] == 1 data['PMRA_PPMXL'][True-pmindx]= -9999.99 data['PMDEC_PPMXL'][True-pmindx]= -9999.99 data['PMRA_ERR_PPMXL'][True-pmindx]= -9999.99 data['PMDEC_ERR_PPMXL'][True-pmindx]= -9999.99 #Calculate Galactocentric velocities data= esutil.numpy_util.add_fields(data,[('GALVR_PPMXL', numpy.float), ('GALVT_PPMXL', numpy.float), ('GALVZ_PPMXL', numpy.float)]) lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True) XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True) pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA_PPMXL'], data['PMDEC_PPMXL'], data['RA'],data['DEC'], degree=True) vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'], pmllpmbb[:,0], pmllpmbb[:,1], lb[:,0],lb[:,1],data['RC_DIST'], degree=True) vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0], vxvyvz[:,1], vxvyvz[:,2], 8.-XYZ[:,0], XYZ[:,1], XYZ[:,2]+0.025, vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc data['GALVR_PPMXL']= vR data['GALVT_PPMXL']= vT data['GALVZ_PPMXL']= vZ data['GALVR_PPMXL'][True-pmindx]= -9999.99 data['GALVT_PPMXL'][True-pmindx]= -9999.99 data['GALVZ_PPMXL'][True-pmindx]= -9999.99 #Save fitsio.write(savefilename,data,clobber=True) return None
def read_caldata(filename='../clusterdata/aj485195t4_mrt.txt',dr='13'): """ NAME: read_caldata PURPOSE: Read the data on calibration clusters from Meszaros et al. (2013) INPUT: filename= Name of the file that has the ApJ machine-readable table OUTPUT: data structure with the data HISTORY: 2015-02-11 - Written - Bovy (IAS@KITP) """ data= astropy.io.ascii.read(filename) data.rename_column('Cluster','CLUSTER') data.remove_column('Teff') data.rename_column('TeffC','TEFF') data.remove_column('logg') data.rename_column('loggC','LOGG') data.remove_column('[M/H]') data.rename_column('[M/H]C','FEH') data.rename_column('2MASS','ID') # Now match to allStar to get the location_ids alldata= apread.allStar(raw=True,dr=dr) locids= numpy.zeros(len(data),dtype='int')-1 hmags= numpy.zeros(len(data),dtype='float')-1 snrs = numpy.zeros(len(data),dtype='float')-1 ras= numpy.zeros(len(data),dtype='float')-1 decs= numpy.zeros(len(data),dtype='float')-1 # and match to allVisit for the fibers that each star was observed in allvdata= apread.allVisit(raw=True,dr=dr) fibers= numpy.zeros((len(data),numpy.nanmax(alldata['NVISITS'])), dtype='int')-1 inds = [] for ii in range(len(data)): if 'Pleiades' in data['CLUSTER'][ii]: inds.append(0) continue indx= alldata['APOGEE_ID'] == data['ID'][ii] success = numpy.where(indx==True)[0] if success.size==0 or success.size>1: inds.append(0) elif success.size==1: inds.append(success[0]) if numpy.sum(indx) == 0: raise ValueError('allStar match for %s not found ...' % (data['ID'][ii])) if len(list(set(alldata['LOCATION_ID'][indx]))) > 1: raise ValueError('Multiple matches found for for %s ...' % (data['ID'][ii])) locids[ii]= alldata['LOCATION_ID'][indx][0] hmags[ii]= alldata['H'][indx][0] snrs[ii] = alldata['SNR'][indx][0] ras[ii] = alldata['RA'][indx][0] decs[ii] = alldata['DEC'][indx][0] for jj in range(alldata['NVISITS'][indx][0]): fibers[ii,jj]= allvdata[alldata['VISIT_PK'][indx][0,jj]]['FIBERID'] inds = (numpy.array(inds),) data['LOCATION_ID']= locids data['H']= hmags data['FIBERID']= fibers data['SNR'] = snrs data['APOGEE_ID'] = data['ID'] data['RA'] = ras data['DEC'] = decs data['index'] = inds[0] data['M_H'] = data['FEH'] data['FE_H'] = alldata['FE_H'][inds] if dr == '13': rel = 'FE' if dr != '13': rel = 'H' data['C_{0}'.format(rel)] = alldata['C_{0}'.format(rel)][inds] data['N_{0}'.format(rel)] = alldata['N_{0}'.format(rel)][inds] data['O_{0}'.format(rel)] = alldata['O_{0}'.format(rel)][inds] data['NA_{0}'.format(rel)] = alldata['NA_{0}'.format(rel)][inds] data['MG_{0}'.format(rel)] = alldata['MG_{0}'.format(rel)][inds] data['AL_{0}'.format(rel)] = alldata['AL_{0}'.format(rel)][inds] data['SI_{0}'.format(rel)] = alldata['SI_{0}'.format(rel)][inds] data['S_{0}'.format(rel)] = alldata['S_{0}'.format(rel)][inds] data['K_{0}'.format(rel)] = alldata['K_{0}'.format(rel)][inds] data['CA_{0}'.format(rel)] = alldata['CA_{0}'.format(rel)][inds] data['TI_{0}'.format(rel)] = alldata['TI_{0}'.format(rel)][inds] data['V_{0}'.format(rel)] = alldata['V_{0}'.format(rel)][inds] data['MN_{0}'.format(rel)] = alldata['MN_{0}'.format(rel)][inds] data['NI_{0}'.format(rel)] = alldata['NI_{0}'.format(rel)][inds] return data
def make_rcsample(parser): options,args= parser.parse_args() savefilename= options.savefilename if savefilename is None: #Create savefilename if not given savefilename= os.path.join(appath._APOGEE_DATA, 'rcsample_'+appath._APOGEE_REDUX+'.fits') print "Saving to %s ..." % savefilename #Read the base-sample data= apread.allStar(adddist=_ADDHAYDENDIST,rmdups=options.rmdups) #Remove a bunch of fields that we do not want to keep data= esutil.numpy_util.remove_fields(data, ['TARGET_ID', 'FILE', 'AK_WISE', 'SFD_EBV', 'SYNTHVHELIO_AVG', 'SYNTHVSCATTER', 'SYNTHVERR', 'SYNTHVERR_MED', 'RV_TEFF', 'RV_LOGG', 'RV_FEH', 'RV_CCFWHM', 'RV_AUTOFWHM', 'SYNTHSCATTER', 'CHI2_THRESHOLD', 'APSTAR_VERSION', 'ASPCAP_VERSION', 'RESULTS_VERSION', 'REDUCTION_ID', 'SRC_H', 'PM_SRC']) if not appath._APOGEE_REDUX.lower() == 'current' \ and int(appath._APOGEE_REDUX[1:]) < 500: data= esutil.numpy_util.remove_fields(data, ['ELEM']) #Select red-clump stars jk= data['J0']-data['K0'] z= isodist.FEH2Z(data['METALS'],zsolar=0.017) if appath._APOGEE_REDUX.lower() == 'current' \ or int(appath._APOGEE_REDUX[1:]) > 600: from apogee.tools import paramIndx if False: #Use my custom logg calibration that's correct for the RC logg= (1.-0.042)*data['FPARAM'][:,paramIndx('logg')]-0.213 lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1. logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.255 hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8 logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.3726 else: #Use my custom logg calibration that's correct on average logg= (1.+0.03)*data['FPARAM'][:,paramIndx('logg')]-0.37 lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1. logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.34 hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8 logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.256 else: logg= data['LOGG'] indx= (jk < 0.8)*(jk >= 0.5)\ *(z <= 0.06)\ *(z <= rcmodel.jkzcut(jk,upper=True))\ *(z >= rcmodel.jkzcut(jk))\ *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\ *(logg <= rcmodel.loggteffcut(data['TEFF'],z,upper=True)) data= data[indx] #Add more aggressive flag cut data= esutil.numpy_util.add_fields(data,[('ADDL_LOGG_CUT',numpy.int32)]) data['ADDL_LOGG_CUT']= ((data['TEFF']-4800.)/1000.+2.75) > data['LOGG'] if options.loggcut: data= data[data['ADDL_LOGG_CUT'] == 1] print "Making catalog of %i objects ..." % len(data) #Add distances data= esutil.numpy_util.add_fields(data,[('RC_DIST', float), ('RC_DM', float), ('RC_GALR', float), ('RC_GALPHI', float), ('RC_GALZ', float)]) rcd= rcmodel.rcdist() jk= data['J0']-data['K0'] z= isodist.FEH2Z(data['METALS'],zsolar=0.017) data['RC_DIST']= rcd(jk,z,appmag=data['K0'])*options.distfac data['RC_DM']= 5.*numpy.log10(data['RC_DIST'])+10. XYZ= bovy_coords.lbd_to_XYZ(data['GLON'], data['GLAT'], data['RC_DIST'], degree=True) R,phi,Z= bovy_coords.XYZ_to_galcencyl(XYZ[:,0], XYZ[:,1], XYZ[:,2], Xsun=8.,Zsun=0.025) data['RC_GALR']= R data['RC_GALPHI']= phi data['RC_GALZ']= Z #Save fitsio.write(savefilename,data,clobber=True) if not options.nostat: #Determine statistical sample and add flag apo= apogee.select.apogeeSelect() statIndx= apo.determine_statistical(data) mainIndx= apread.mainIndx(data) data= esutil.numpy_util.add_fields(data,[('STAT',numpy.int32), ('INVSF',float)]) data['STAT']= 0 data['STAT'][statIndx*mainIndx]= 1 for ii in range(len(data)): if (statIndx*mainIndx)[ii]: data['INVSF'][ii]= 1./apo(data['LOCATION_ID'][ii], data['H'][ii]) else: data['INVSF'][ii]= -1. if options.nopm: fitsio.write(savefilename,data,clobber=True) return None #Get proper motions from astroquery.vizier import Vizier import astroquery from astropy import units as u import astropy.coordinates as coord pmfile= savefilename.split('.')[0]+'_pms.fits' if os.path.exists(pmfile): pmdata= fitsio.read(pmfile,1) else: pmdata= numpy.recarray(len(data), formats=['f8','f8','f8','f8','f8','f8','i4'], names=['RA','DEC','PMRA','PMDEC', 'PMRA_ERR','PMDEC_ERR','PMMATCH']) rad= u.Quantity(4./3600.,u.degree) v= Vizier(columns=['RAJ2000','DEJ2000','pmRA','pmDE','e_pmRA','e_pmDE']) for ii in range(len(data)): #if ii > 100: break sys.stdout.write('\r'+"Getting pm data for point %i / %i" % (ii+1,len(data))) sys.stdout.flush() pmdata.RA[ii]= data['RA'][ii] pmdata.DEC[ii]= data['DEC'][ii] co= coord.ICRS(ra=data['RA'][ii], dec=data['DEC'][ii], unit=(u.degree, u.degree)) trying= True while trying: try: tab= v.query_region(co,rad,catalog='I/322') #UCAC-4 catalog except astroquery.exceptions.TimeoutError: pass else: trying= False if len(tab) == 0: pmdata.PMMATCH[ii]= 0 print "Didn't find a match for %i ..." % ii continue else: pmdata.PMMATCH[ii]= len(tab) if len(tab[0]['pmRA']) > 1: print "Found more than 1 match for %i ..." % ii try: pmdata.PMRA[ii]= float(tab[0]['pmRA']) except TypeError: jj= 1 while len(tab[0]['pmRA']) > 1 and jj < 4: trad= u.Quantity((4.-jj)/3600.,u.degree) trying= True while trying: try: tab= v.query_region(co,trad,catalog='I/322') #UCAC-4 catalog except astroquery.exceptions.TimeoutError: pass else: trying= False jj+= 1 if len(tab) == 0: pmdata.PMMATCH[ii]= 0 print "Didn't find a unambiguous match for %i ..." % ii continue pmdata.PMRA[ii]= float(tab[0]['pmRA']) pmdata.PMDEC[ii]= float(tab[0]['pmDE']) pmdata.PMRA_ERR[ii]= float(tab[0]['e_pmRA']) pmdata.PMDEC_ERR[ii]= float(tab[0]['e_pmDE']) if numpy.isnan(float(tab[0]['pmRA'])): pmdata.PMMATCH[ii]= 0 sys.stdout.write('\r'+_ERASESTR+'\r') sys.stdout.flush() fitsio.write(pmfile,pmdata,clobber=True) #To make sure we're using the same format below pmdata= fitsio.read(pmfile,1) #Match proper motions try: #These already exist currently, but may not always exist data= esutil.numpy_util.remove_fields(data,['PMRA','PMDEC']) except ValueError: pass data= esutil.numpy_util.add_fields(data,[('PMRA', numpy.float), ('PMDEC', numpy.float), ('PMRA_ERR', numpy.float), ('PMDEC_ERR', numpy.float), ('PMMATCH',numpy.int32)]) data['PMMATCH']= 0 h=esutil.htm.HTM() m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'], data['RA'],data['DEC'], 2./3600.,maxmatch=1) data['PMRA'][m2]= pmdata['PMRA'][m1] data['PMDEC'][m2]= pmdata['PMDEC'][m1] data['PMRA_ERR'][m2]= pmdata['PMRA_ERR'][m1] data['PMDEC_ERR'][m2]= pmdata['PMDEC_ERR'][m1] data['PMMATCH'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32) pmindx= data['PMMATCH'] == 1 data['PMRA'][True-pmindx]= -9999.99 data['PMDEC'][True-pmindx]= -9999.99 data['PMRA_ERR'][True-pmindx]= -9999.99 data['PMDEC_ERR'][True-pmindx]= -9999.99 #Calculate Galactocentric velocities data= esutil.numpy_util.add_fields(data,[('GALVR', numpy.float), ('GALVT', numpy.float), ('GALVZ', numpy.float)]) lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True) XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True) pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'],data['PMDEC'], data['RA'],data['DEC'], degree=True) vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'], pmllpmbb[:,0], pmllpmbb[:,1], lb[:,0],lb[:,1],data['RC_DIST'], degree=True) vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0], vxvyvz[:,1], vxvyvz[:,2], 8.-XYZ[:,0], XYZ[:,1], XYZ[:,2]+0.025, vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc data['GALVR']= vR data['GALVT']= vT data['GALVZ']= vZ data['GALVR'][True-pmindx]= -9999.99 data['GALVT'][True-pmindx]= -9999.99 data['GALVZ'][True-pmindx]= -9999.99 #Get proper motions pmfile= savefilename.split('.')[0]+'_pms_ppmxl.fits' if os.path.exists(pmfile): pmdata= fitsio.read(pmfile,1) else: pmdata= numpy.recarray(len(data), formats=['f8','f8','f8','f8','f8','f8','i4'], names=['RA','DEC','PMRA','PMDEC', 'PMRA_ERR','PMDEC_ERR','PMMATCH']) rad= u.Quantity(4./3600.,u.degree) v= Vizier(columns=['RAJ2000','DEJ2000','pmRA','pmDE','e_pmRA','e_pmDE']) for ii in range(len(data)): #if ii > 100: break sys.stdout.write('\r'+"Getting pm data for point %i / %i" % (ii+1,len(data))) sys.stdout.flush() pmdata.RA[ii]= data['RA'][ii] pmdata.DEC[ii]= data['DEC'][ii] co= coord.ICRS(ra=data['RA'][ii], dec=data['DEC'][ii], unit=(u.degree, u.degree)) trying= True while trying: try: tab= v.query_region(co,rad,catalog='I/317') #PPMXL catalog except astroquery.exceptions.TimeoutError: pass else: trying= False if len(tab) == 0: pmdata.PMMATCH[ii]= 0 print "Didn't find a match for %i ..." % ii continue else: pmdata.PMMATCH[ii]= len(tab) if len(tab[0]['pmRA']) > 1: pass #print "Found more than 1 match for %i ..." % ii try: pmdata.PMRA[ii]= float(tab[0]['pmRA']) except TypeError: #Find nearest cosdists= numpy.zeros(len(tab[0]['pmRA'])) for jj in range(len(tab[0]['pmRA'])): cosdists[jj]= cos_sphere_dist(tab[0]['RAJ2000'][jj], tab[0]['DEJ2000'][jj], data['RA'][ii], data['DEC'][ii]) closest= numpy.argmax(cosdists) pmdata.PMRA[ii]= float(tab[0]['pmRA'][closest]) pmdata.PMDEC[ii]= float(tab[0]['pmDE'][closest]) pmdata.PMRA_ERR[ii]= float(tab[0]['e_pmRA'][closest]) pmdata.PMDEC_ERR[ii]= float(tab[0]['e_pmDE'][closest]) if numpy.isnan(float(tab[0]['pmRA'][closest])): pmdata.PMMATCH[ii]= 0 else: pmdata.PMDEC[ii]= float(tab[0]['pmDE']) pmdata.PMRA_ERR[ii]= float(tab[0]['e_pmRA']) pmdata.PMDEC_ERR[ii]= float(tab[0]['e_pmDE']) if numpy.isnan(float(tab[0]['pmRA'])): pmdata.PMMATCH[ii]= 0 sys.stdout.write('\r'+_ERASESTR+'\r') sys.stdout.flush() fitsio.write(pmfile,pmdata,clobber=True) #To make sure we're using the same format below pmdata= fitsio.read(pmfile,1) #Match proper motions to ppmxl data= esutil.numpy_util.add_fields(data,[('PMRA_PPMXL', numpy.float), ('PMDEC_PPMXL', numpy.float), ('PMRA_ERR_PPMXL', numpy.float), ('PMDEC_ERR_PPMXL', numpy.float), ('PMMATCH_PPMXL',numpy.int32)]) data['PMMATCH_PPMXL']= 0 h=esutil.htm.HTM() m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'], data['RA'],data['DEC'], 2./3600.,maxmatch=1) data['PMRA_PPMXL'][m2]= pmdata['PMRA'][m1] data['PMDEC_PPMXL'][m2]= pmdata['PMDEC'][m1] data['PMRA_ERR_PPMXL'][m2]= pmdata['PMRA_ERR'][m1] data['PMDEC_ERR_PPMXL'][m2]= pmdata['PMDEC_ERR'][m1] data['PMMATCH_PPMXL'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32) pmindx= data['PMMATCH_PPMXL'] == 1 data['PMRA_PPMXL'][True-pmindx]= -9999.99 data['PMDEC_PPMXL'][True-pmindx]= -9999.99 data['PMRA_ERR_PPMXL'][True-pmindx]= -9999.99 data['PMDEC_ERR_PPMXL'][True-pmindx]= -9999.99 #Calculate Galactocentric velocities data= esutil.numpy_util.add_fields(data,[('GALVR_PPMXL', numpy.float), ('GALVT_PPMXL', numpy.float), ('GALVZ_PPMXL', numpy.float)]) lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True) XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True) pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA_PPMXL'], data['PMDEC_PPMXL'], data['RA'],data['DEC'], degree=True) vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'], pmllpmbb[:,0], pmllpmbb[:,1], lb[:,0],lb[:,1],data['RC_DIST'], degree=True) vR, vT, vZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0], vxvyvz[:,1], vxvyvz[:,2], 8.-XYZ[:,0], XYZ[:,1], XYZ[:,2]+0.025, vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc data['GALVR_PPMXL']= vR data['GALVT_PPMXL']= vT data['GALVZ_PPMXL']= vZ data['GALVR_PPMXL'][True-pmindx]= -9999.99 data['GALVT_PPMXL'][True-pmindx]= -9999.99 data['GALVZ_PPMXL'][True-pmindx]= -9999.99 #Save fitsio.write(savefilename,data,clobber=True) return None
def get_rgbsample(loggcut=[1.8, 3.0], teffcut=[0, 10000], add_ages=False, agetype='Martig', apply_corrections=False, distance_correction=False, verbose=False): """ Get a clean sample of dr12 APOGEE data with Michael Haydens distances --- INPUT: None OUTPUT: Clean rgb sample with added distances HISTORY: Started - Mackereth 02/06/16 """ #get the allStar catalogue using apogee python (exlude all bad flags etc) allStar = apread.allStar(rmcommissioning=True, exclude_star_bad=True, exclude_star_warn=True, main=True, ak=True, adddist=False) #cut to a 'sensible' logg range (giants which are not too high on the RGB) allStar = allStar[(allStar['LOGG'] > loggcut[0]) & (allStar['LOGG'] < loggcut[1]) & (allStar['TEFF'] > teffcut[0]) & (allStar['TEFF'] < teffcut[1])] if verbose == True: print str( len(allStar )) + ' Stars before Distance catalogue join (after Log(g) cut)' #load the distance VAC dists = fits.open(catpath + 'DR12_DIST_R-GC.fits')[1].data #convert to astropy Table allStar_tab = Table(data=allStar) dists_tab = Table(data=dists) #join table tab = join(allStar_tab, dists_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['', '2']) data = tab.as_array() data = esutil.numpy_util.add_fields(data, [('M_J', float), ('M_H', float), ('M_K', float), ('MH50_DIST', float), ('MH50_GALR', float), ('MH50_GALZ', float), ('MH50_GALPHI', float), ('AVG_ALPHAFE', float)]) data['MH50_DIST'] = (10**((data['HAYDEN_DISTMOD_50'] + 5) / 5)) / 1e3 if distance_correction == True: data['MH50_DIST'] *= 1.05 XYZ = bovy_coords.lbd_to_XYZ(data['GLON'], data['GLAT'], data['MH50_DIST'], degree=True) RphiZ = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=8., Zsun=0.025) data['MH50_GALR'] = RphiZ[:, 0] data['MH50_GALPHI'] = RphiZ[:, 1] data['MH50_GALZ'] = RphiZ[:, 2] data['M_J'] = data['J0'] - data['HAYDEN_DISTMOD_50'] data['M_H'] = data['H0'] - data['HAYDEN_DISTMOD_50'] data['M_K'] = data['K0'] - data['HAYDEN_DISTMOD_50'] data['AVG_ALPHAFE'] = avg_alphafe_dr12(data) data[_FEHTAG] += -0.1 #remove locations not in the apogee selection function (FIND OUT WHATS UP HERE) data = data[np.in1d(data['LOCATION_ID'], apo.list_fields())] # Remove locations outside of the Pan-STARRS dust map # In the Southern hemisphere data = data[data['LOCATION_ID'] != 4266] #240,-18 data = data[data['LOCATION_ID'] != 4331] #5.5,-14.2 data = data[data['LOCATION_ID'] != 4381] #5.2,-12.2 data = data[data['LOCATION_ID'] != 4332] #1,-4 data = data[data['LOCATION_ID'] != 4329] #0,-5 data = data[data['LOCATION_ID'] != 4351] #0,-2 data = data[data['LOCATION_ID'] != 4353] #358,0 data = data[data['LOCATION_ID'] != 4385] #358.6,1.4 # Close to the ecliptic pole where there's no data (is it the ecliptic pole? data = data[data['LOCATION_ID'] != 4528] #120,30 data = data[data['LOCATION_ID'] != 4217] #123,22.4 #remove any non-finite magnitudes data = data[np.isfinite(data['M_H'])] if verbose == True: print str(len( data)) + ' Stars with distance measures (and in good fields...)' if add_ages == True: if agetype == 'Martig': ages = fits.open(catpath + 'DR12_martigages_vizier.fits')[1].data idtag = '2MASS_ID' if agetype == 'Cannon': ages = fits.open(catpath + 'RGB_Cannon_Ages.fits')[1].data ages = esutil.numpy_util.add_fields(ages, [('Age', float)]) ages['Age'] = np.exp(ages['ln_age']) idtag = 'ID' ages_tab = Table(data=ages) ages_tab.rename_column(idtag, 'APOGEE_ID') tab = join(ages_tab, data, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['', '2']) allStar_full = tab.as_array() data = allStar_full if verbose == True: print str(len(data)) + ' Stars with ages' if apply_corrections == True: #martig1 = np.genfromtxt(catpath+'martig2016_table1.txt', dtype=None, names=True, skip_header=2) martig1 = fits.open(catpath + 'martig_table1.fits') fit = lowess(np.log10(martig1['Age_out']), np.log10(martig1['Age_in'])) xs = np.linspace(-0.3, 1.2, 100) xsinterpolate = interp1d(xs, xs) fys = fit[:, 0] - xsinterpolate(fit[:, 1]) interp = UnivariateSpline(fit[:, 1], fys) corr_age = np.log10(data['Age']) + (interp(np.log10(data['Age']))) corr_age = 10**corr_age data['Age'] = corr_age return data
def get_rgbsample(cuts=True, add_dist=False, astronn_dist=True, add_ages=False, rm_bad_dist=True, no_gaia=False, distkey='BPG_meandist', verbose=True, alternate_ages=False, rmdups=True): """ Get a clean sample of dr14 APOGEE data with Gaia (!) parallaxes and PMs --- INPUT: None OUTPUT: Clean rgb sample with added parallaxes and PMs HISTORY: Started - Mackereth 24/04/18 """ if cuts: allStar = apread.allStar(rmcommissioning=True, exclude_star_bad=True, exclude_star_warn=True, main=False, ak=True, rmdups=True, adddist=False) allStar = allStar[(allStar['LOGG'] > 1.8) & (allStar['LOGG'] < 3.0)] else: allStar = apread.allStar(rmcommissioning=True, main=False, ak=True, rmdups=True, adddist=False) if verbose: print('%i Objects meeting quality cuts in APOGEE DR14' % len(allStar)) if not no_gaia: gaia_xmatch = fits.open( '../sav/allStar_l31c2_GaiaDR2_crossmatch_withpms.fits') gaia_xmatch = gaia_xmatch[1].data gaia_xmatch = Table(data=gaia_xmatch) allStar_tab = Table(data=allStar) tab = join(allStar_tab, gaia_xmatch, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['', '_xmatch']) dat = tab.as_array() if verbose: print('%i Matched Objects in Gaia DR2' % len(dat)) else: dat = allStar dat = esutil.numpy_util.add_fields(dat, [('AVG_ALPHAFE', float)]) dat['AVG_ALPHAFE'] = avg_alphafe(dat) if add_dist: if astronn_dist: dists = fits.open(astronn_dists)[1].data distkey = 'pc' else: dists = fits.open( '/gal/astjmack/apogee/catalogues/apogee_distances-DR14.fits' )[1].data allStar_tab = Table(data=dat) dists_tab = Table(data=dists) #join table tab = join(allStar_tab, dists_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['', '_dist_table']) dat = tab.as_array() if rm_bad_dist: mask = np.isfinite(dat[distkey]) dat = dat[mask] if verbose: print('%i Matched Objects in APOGEE distance VAC' % len(dat)) if add_ages: allStar_tab = Table(data=dat) if alternate_ages: ages = np.load(corr_agecat) ages_tab = Table(data=ages) ages_tab.rename_column('astroNN_age', 'Age') else: ages = np.genfromtxt(agecat, names=True, dtype=None) ages_tab = Table(data=ages) ages_tab.rename_column('2MASS_ID', 'APOGEE_ID') tab = join(allStar_tab, ages_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['', '_ages']) dat = tab.as_array() if rmdups: print('removing duplicates...') dat = remove_duplicates(dat) if verbose: print('%i Matched Objects in Age Catalogue' % len(dat)) return dat
import vsEnvironSetup vsEnvironSetup.setVariables() import apogee.tools.read as apread data = apread.allStar(dr='13') apogeeIDs = data['APOGEE_ID'] locationIDs = data['LOCATION_ID'] targetCount = len(apogeeIDs) filename = 'lists/all.csv' f = open(filename, 'w') for i in range(targetCount): f.write(str(locationIDs[i]) + ',' + str(apogeeIDs[i]) + '\n') f.close()
import copy import numpy import apogee.tools.read as apread from apogee.tools import bitmask, paramIndx, elemIndx _DATA= apread.allStar(raw=True) #such that we can re-use it in different tests from _util import known_failure def test_telescope(): #Test the telescope tag against the APSTAR_ID onemIndx= numpy.array(['apogee.apo1m' in s for s in _DATA['APSTAR_ID']]) telescopeIndx= numpy.array(['apo1m' in d for d in _DATA['TELESCOPE']], dtype='bool') assert numpy.sum(onemIndx*(True-telescopeIndx)) == 0,\ 'TELESCOPE tag does not correspond to APSTAR_ID for 1m data' return None def test_targflags_apogee_target1(): # Test that TARGFLAGS corresponds to the bits in APOGEE_TARGET targ1bits= range(31) #don't check 31, bc always set targ1bits.pop(14) #14 not populated for targbit in targ1bits: name= bitmask.apogee_target1_string(targbit) targindx= numpy.array([name in s for s in _DATA['TARGFLAGS']], dtype='bool') if targbit == 0: targindx*= \ numpy.array([not 'APOGEE_FAINT_EXTRA' in s for s in _DATA['TARGFLAGS']], dtype='bool') badindx= ((_DATA['APOGEE_TARGET1'] & 2**targbit) != 0)*(True-targindx) assert numpy.sum(badindx) == 0, 'Some objects with bit %i set in apogee_target1 do not have the corresponding flag name in TARGFLAGS set' % targbit return None
def read_caldata(filename=os.path.join( os.path.dirname(os.path.realpath(__file__)), 'aj485195t4_mrt.txt'), dr='12'): """ NAME: read_caldata PURPOSE: Read the data on calibration clusters from Meszaros et al. (2013) INPUT: filename= Name of the file that has the ApJ machine-readable table OUTPUT: data structure with the data HISTORY: 2015-02-11 - Written - Bovy (IAS@KITP) """ data = astropy.io.ascii.read(filename) data.rename_column('Cluster', 'CLUSTER') data.remove_column('Teff') data.rename_column('TeffC', 'TEFF') data.remove_column('logg') data.rename_column('loggC', 'LOGG') data.remove_column('[M/H]') data.rename_column('[M/H]C', 'FEH') data.rename_column('2MASS', 'ID') # Now match to allStar to get the location_ids alldata = apread.allStar(raw=True) locids = numpy.zeros(len(data), dtype='int') - 1 hmags = numpy.zeros(len(data), dtype='float') - 1 snrs = numpy.zeros(len(data), dtype='float') - 1 ras = numpy.zeros(len(data), dtype='float') - 1 decs = numpy.zeros(len(data), dtype='float') - 1 # and match to allVisit for the fibers that each star was observed in allvdata = apread.allVisit(raw=True) fibers = numpy.zeros( (len(data), numpy.nanmax(alldata['NVISITS'])), dtype='int') - 1 inds = [] for ii in range(len(data)): if 'Pleiades' in data['CLUSTER'][ii]: inds.append(0) continue indx = alldata['APOGEE_ID'] == data['ID'][ii] success = numpy.where(indx == True)[0] if success.size == 0 or success.size > 1: inds.append(0) elif success.size == 1: inds.append(success[0]) print(indx) # if numpy.sum(indx) == 0: # raise ValueError('allStar match for %s not found ...' % (data['ID'][ii])) # if len(list(set(alldata['LOCATION_ID'][indx]))) > 1: # raise ValueError('Multiple matches found for for %s ...' % (data['ID'][ii])) locids[ii] = alldata['LOCATION_ID'][indx][0] hmags[ii] = alldata['H'][indx][0] snrs[ii] = alldata['SNR'][indx][0] ras[ii] = alldata['RA'][indx][0] decs[ii] = alldata['DEC'][indx][0] for jj in range(alldata['NVISITS'][indx][0]): fibers[ii, jj] = allvdata[alldata['VISIT_PK'][indx][0, jj]]['FIBERID'] inds = (numpy.array(inds), ) data['LOCATION_ID'] = locids data['H'] = hmags data['FIBERID'] = fibers data['SNR'] = snrs data['APOGEE_ID'] = data['ID'] data['RA'] = ras data['DEC'] = decs data['index'] = inds[0] data['M_H'] = data['FEH'] data['FE_H'] = alldata['FE_H'][inds] if int(dr) > 12: rel = 'FE' if int(dr) <= 12: rel = 'H' data['C_{0}'.format(rel)] = alldata['C_{0}'.format(rel)][inds] data['N_{0}'.format(rel)] = alldata['N_{0}'.format(rel)][inds] data['O_{0}'.format(rel)] = alldata['O_{0}'.format(rel)][inds] data['NA_{0}'.format(rel)] = alldata['NA_{0}'.format(rel)][inds] data['MG_{0}'.format(rel)] = alldata['MG_{0}'.format(rel)][inds] data['AL_{0}'.format(rel)] = alldata['AL_{0}'.format(rel)][inds] data['SI_{0}'.format(rel)] = alldata['SI_{0}'.format(rel)][inds] data['S_{0}'.format(rel)] = alldata['S_{0}'.format(rel)][inds] data['K_{0}'.format(rel)] = alldata['K_{0}'.format(rel)][inds] data['CA_{0}'.format(rel)] = alldata['CA_{0}'.format(rel)][inds] data['TI_{0}'.format(rel)] = alldata['TI_{0}'.format(rel)][inds] data['V_{0}'.format(rel)] = alldata['V_{0}'.format(rel)][inds] data['MN_{0}'.format(rel)] = alldata['MN_{0}'.format(rel)][inds] data['NI_{0}'.format(rel)] = alldata['NI_{0}'.format(rel)][inds] return numpy.array(data)
from astropy.io import fits import apogee.tools.read as apread import numpy as np allStarDR14 = apread.allStar(rmcommissioning=True, main=False, ak=True, akvers='targ', adddist=False) locationIDs = allStarDR14['LOCATION_ID'] apogeeIDs = allStarDR14['APOGEE_ID'] apogeeIDs = [s.decode('utf-8') for s in apogeeIDs] #remove bit from string # R calculation def calcR401(x, pos1=0, pos2=401, peakLoc=201): ''' Calculates the value of R with the given array x Returns: The value of R for whole CCF Assupmtion: the center peak lies in CCF lag space 201 ''' # if peakLoc is near the edges just skip it if (peakLoc <= 10) or (peakLoc >= 390): return np.nan Mirror = (x[peakLoc:pos2])[::-1] sigmaA = np.sqrt(1.0 / (2.0 * len(Mirror)) * np.sum( (x[pos1:peakLoc] - Mirror)**2)) r401 = np.max(x) / (np.sqrt(2.0) * sigmaA) return r401
def make_rcsample(parser): options, args = parser.parse_args() savefilename = options.savefilename if savefilename is None: #Create savefilename if not given savefilename = os.path.join( appath._APOGEE_DATA, 'rcsample_' + appath._APOGEE_REDUX + '.fits') print("Saving to %s ..." % savefilename) #Read the base-sample data = apread.allStar(adddist=_ADDHAYDENDIST, rmdups=options.rmdups) #Remove a bunch of fields that we do not want to keep data = esutil.numpy_util.remove_fields(data, [ 'TARGET_ID', 'FILE', 'AK_WISE', 'SFD_EBV', 'SYNTHVHELIO_AVG', 'SYNTHVSCATTER', 'SYNTHVERR', 'SYNTHVERR_MED', 'RV_TEFF', 'RV_LOGG', 'RV_FEH', 'RV_ALPHA', 'RV_CARB', 'RV_CCFWHM', 'RV_AUTOFWHM', 'SYNTHSCATTER', 'STABLERV_CHI2', 'STABLERV_RCHI2', 'STABLERV_CHI2_PROB', 'CHI2_THRESHOLD', 'APSTAR_VERSION', 'ASPCAP_VERSION', 'RESULTS_VERSION', 'WASH_M', 'WASH_M_ERR', 'WASH_T2', 'WASH_T2_ERR', 'DDO51', 'DDO51_ERR', 'IRAC_3_6', 'IRAC_3_6_ERR', 'IRAC_4_5', 'IRAC_4_5_ERR', 'IRAC_5_8', 'IRAC_5_8_ERR', 'IRAC_8_0', 'IRAC_8_0_ERR', 'WISE_4_5', 'WISE_4_5_ERR', 'TARG_4_5', 'TARG_4_5_ERR', 'WASH_DDO51_GIANT_FLAG', 'WASH_DDO51_STAR_FLAG', 'REDUCTION_ID', 'SRC_H', 'PM_SRC' ]) # More if appath._APOGEE_REDUX.lower() == 'l33': data = esutil.numpy_util.remove_fields(data, [ 'GAIA_SOURCE_ID', 'GAIA_PARALLAX', 'GAIA_PARALLAX_ERROR', 'GAIA_PMRA', 'GAIA_PMRA_ERROR', 'GAIA_PMDEC', 'GAIA_PMDEC_ERROR', 'GAIA_PHOT_G_MEAN_MAG', 'GAIA_PHOT_BP_MEAN_MAG', 'GAIA_PHOT_RP_MEAN_MAG', 'GAIA_RADIAL_VELOCITY', 'GAIA_RADIAL_VELOCITY_ERROR', 'GAIA_R_EST', 'GAIA_R_LO', 'GAIA_R_HI', 'TEFF_SPEC', 'LOGG_SPEC' ]) if not appath._APOGEE_REDUX.lower() == 'current' \ and not 'l3' in appath._APOGEE_REDUX \ and int(appath._APOGEE_REDUX[1:]) < 500: data = esutil.numpy_util.remove_fields(data, ['ELEM']) #Select red-clump stars jk = data['J0'] - data['K0'] z = isodist.FEH2Z(data['METALS'], zsolar=0.017) if 'l31' in appath._APOGEE_REDUX: logg = data['LOGG'] elif 'l30' in appath._APOGEE_REDUX: logg = data['LOGG'] elif appath._APOGEE_REDUX.lower() == 'current' \ or int(appath._APOGEE_REDUX[1:]) > 600: if False: #Use my custom logg calibration that's correct for the RC logg = (1. - 0.042) * data['FPARAM'][:, paramIndx('logg')] - 0.213 lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1. logg[lowloggindx] = data['FPARAM'][lowloggindx, paramIndx('logg')] - 0.255 hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8 logg[hiloggindx] = data['FPARAM'][hiloggindx, paramIndx('logg')] - 0.3726 else: #Use my custom logg calibration that's correct on average logg = (1. + 0.03) * data['FPARAM'][:, paramIndx('logg')] - 0.37 lowloggindx = data['FPARAM'][:, paramIndx('logg')] < 1. logg[lowloggindx] = data['FPARAM'][lowloggindx, paramIndx('logg')] - 0.34 hiloggindx = data['FPARAM'][:, paramIndx('logg')] > 3.8 logg[hiloggindx] = data['FPARAM'][hiloggindx, paramIndx('logg')] - 0.256 else: logg = data['LOGG'] indx= (jk < 0.8)*(jk >= 0.5)\ *(z <= 0.06)\ *(z <= rcmodel.jkzcut(jk,upper=True))\ *(z >= rcmodel.jkzcut(jk))\ *(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\ *(logg+0.1*('l31' in appath._APOGEE_REDUX or 'l33' in appath._APOGEE_REDUX) \ <= rcmodel.loggteffcut(data['TEFF'],z,upper=True)) data = data[indx] #Add more aggressive flag cut data = esutil.numpy_util.add_fields(data, [('ADDL_LOGG_CUT', numpy.int32)]) data['ADDL_LOGG_CUT'] = ( (data['TEFF'] - 4800.) / 1000. + 2.75) > data['LOGG'] if options.loggcut: data = data[data['ADDL_LOGG_CUT'] == 1] print("Making catalog of %i objects ..." % len(data)) #Add distances data = esutil.numpy_util.add_fields(data, [('RC_DIST', float), ('RC_DM', float), ('RC_GALR', float), ('RC_GALPHI', float), ('RC_GALZ', float)]) rcd = rcmodel.rcdist() jk = data['J0'] - data['K0'] z = isodist.FEH2Z(data['METALS'], zsolar=0.017) data['RC_DIST'] = rcd(jk, z, appmag=data['K0']) * options.distfac data['RC_DM'] = 5. * numpy.log10(data['RC_DIST']) + 10. XYZ = bovy_coords.lbd_to_XYZ(data['GLON'], data['GLAT'], data['RC_DIST'], degree=True) RphiZ = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=8.15, Zsun=0.0208) R = RphiZ[:, 0] phi = RphiZ[:, 1] Z = RphiZ[:, 2] data['RC_GALR'] = R data['RC_GALPHI'] = phi data['RC_GALZ'] = Z #Save fitswrite(savefilename, data, clobber=True) # Add Tycho-2 matches if options.tyc2: data = esutil.numpy_util.add_fields(data, [('TYC2MATCH', numpy.int32), ('TYC1', numpy.int32), ('TYC2', numpy.int32), ('TYC3', numpy.int32)]) data['TYC2MATCH'] = 0 data['TYC1'] = -1 data['TYC2'] = -1 data['TYC3'] = -1 # Write positions posfilename = tempfile.mktemp('.csv', dir=os.getcwd()) resultfilename = tempfile.mktemp('.csv', dir=os.getcwd()) with open(posfilename, 'w') as csvfile: wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) wr.writerow(['RA', 'DEC']) for ii in range(len(data)): wr.writerow([data[ii]['RA'], data[ii]['DEC']]) # Send to CDS for matching result = open(resultfilename, 'w') try: subprocess.check_call([ 'curl', '-X', 'POST', '-F', 'request=xmatch', '-F', 'distMaxArcsec=2', '-F', 'RESPONSEFORMAT=csv', '-F', 'cat1=@%s' % os.path.basename(posfilename), '-F', 'colRA1=RA', '-F', 'colDec1=DEC', '-F', 'cat2=vizier:Tycho2', 'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync' ], stdout=result) except subprocess.CalledProcessError: os.remove(posfilename) if os.path.exists(resultfilename): result.close() os.remove(resultfilename) result.close() # Directly match on input RA ma = numpy.loadtxt(resultfilename, delimiter=',', skiprows=1, usecols=(1, 2, 7, 8, 9)) iis = numpy.arange(len(data)) mai = [iis[data['RA'] == ma[ii, 0]][0] for ii in range(len(ma))] data['TYC2MATCH'][mai] = 1 data['TYC1'][mai] = ma[:, 2] data['TYC2'][mai] = ma[:, 3] data['TYC3'][mai] = ma[:, 4] os.remove(posfilename) os.remove(resultfilename) if not options.nostat: #Determine statistical sample and add flag apo = apogee.select.apogeeSelect() statIndx = apo.determine_statistical(data) mainIndx = apread.mainIndx(data) data = esutil.numpy_util.add_fields(data, [('STAT', numpy.int32), ('INVSF', float)]) data['STAT'] = 0 data['STAT'][statIndx * mainIndx] = 1 for ii in range(len(data)): if (statIndx * mainIndx)[ii]: data['INVSF'][ii] = 1. / apo(data['LOCATION_ID'][ii], data['H'][ii]) else: data['INVSF'][ii] = -1. if options.nopm: fitswrite(savefilename, data, clobber=True) return None data = _add_proper_motions(data, savefilename) # Save fitswrite(savefilename, data, clobber=True) return None
def plot_distcomparisons(plotfilename): data= apread.allStar(adddist=True) plotKASCDiffs= [] plotKASCDiffErrs= [] plotRCDiffs= [] plotRCDiffErrs= [] # RC vs. APOKASC rcDiff= distDiff(data['APOKASC_DIST_DIRECT'],data['RC_DIST']) plotKASCDiffs.append(rcDiff[0]) plotKASCDiffErrs.append(rcDiff[1]) # RC vs. RC plotRCDiffs.append(0.) plotRCDiffErrs.append(0.) # BPG Dist1 vs. APOKASC apokascMinDist= 1. bpgDiff= distDiff(data['APOKASC_DIST_DIRECT'],data['BPG_DIST1_MEAN'], minDist=apokascMinDist) plotKASCDiffs.append(bpgDiff[0]) plotKASCDiffErrs.append(bpgDiff[1]) # BPG vs. RC bpgDiff= distDiff(data['RC_DIST'],data['BPG_DIST1_MEAN']) plotRCDiffs.append(bpgDiff[0]) plotRCDiffErrs.append(bpgDiff[1]) # Hayden peak vs. APOKASC haydenDiff= distDiff(data['APOKASC_DIST_DIRECT'],data['HAYDEN_DIST_PEAK'], minDist=apokascMinDist) plotKASCDiffs.append(haydenDiff[0]) plotKASCDiffErrs.append(haydenDiff[1]) # Hayden peak vs. RC haydenDiff= distDiff(data['RC_DIST'],data['HAYDEN_DIST_PEAK']) plotRCDiffs.append(haydenDiff[0]) plotRCDiffErrs.append(haydenDiff[1]) # Schultheis vs. APOKASC schultheisDiff= distDiff(data['APOKASC_DIST_DIRECT'],data['SCHULTHEIS_DIST'], minDist=apokascMinDist) plotKASCDiffs.append(schultheisDiff[0]) plotKASCDiffErrs.append(schultheisDiff[1]) # Schultheis vs. RC schultheisDiff= distDiff(data['RC_DIST'],data['SCHULTHEIS_DIST']) plotRCDiffs.append(schultheisDiff[0]) plotRCDiffErrs.append(schultheisDiff[1]) # plot bovy_plot.bovy_print(fig_width=7., text_fontsize=20., legend_fontsize=24., xtick_labelsize=18., ytick_labelsize=18., axes_labelsize=24.) ms= 8. line1= bovy_plot.bovy_plot([1,2,3,4],plotKASCDiffs,'bo',ms=ms, xrange=[0,5], yrange=[-0.11,0.11], ylabel=r'$\mathrm{distance\ modulus\ offset}$') pyplot.errorbar([1,2,3,4],plotKASCDiffs,yerr=plotKASCDiffErrs, ls='none',marker='o',color='b',ms=ms) line2= bovy_plot.bovy_plot([1,2,3,4],plotRCDiffs,'ro',ms=ms, overplot=True) pyplot.errorbar([1,2,3,4],plotRCDiffs,yerr=plotRCDiffErrs, ls='none',marker='o',color='r',ms=ms) pyplot.legend([line1[0],line2[0]], [r'$\mathrm{wrt\ APOKASC}$', r'$\mathrm{wrt\ RC}$'], loc='lower left',fontsize=14.,frameon=False,numpoints=1) #Put labels and rotate them pyplot.xticks([1,2,3,4], [r'$\mathrm{RC}$', r"$\mathrm{BPG\ dist1}$", r"$\mathrm{Hayden\ peak}$", r"$\mathrm{Schultheis}$"],size=16., rotation=45.) bovy_plot.bovy_end_print(plotfilename) return None
def get_spectra( name, red_clump, location ): ###Function to read the allStar file and get the spectra, correct spectra for ###small and large uncertainties, remove red clump stars """Return cluster data, spectra, spectral errors, photometric Teffs, and bitmask from APOGEE. If the data file for the specified cluster already exists locally, import the data from the file (cluster data, spectra, spectral errors, bitmask). If the data file does not exist, obtain the APOGEE spectra from a specified cluster from the allStar catalogue, replacing ASPCAP abundances with astroNN abundances. Parameters ---------- name : str Name of desired cluster (i.e. 'NGC 2682') red_clump : str If the red clump stars in rcsample are to be removed, set to 'True'. If all stars are to be used, set to 'False'. location : str If running locally, set to 'personal'. If running on the server, set to 'server'. Returns ------- apogee_cluster_data (all stars) or apogee_cluster_data_final (red clumps removed) : structured array All cluster data from APOGEE spectra_50 (all stars) or spectra_final (red clumps removed) : tuple Array of floats representing the cleaned-up fluxes in the APOGEE spectra with red clump stars removed spectra_err_50 (all stars) or spectra_err_final (red clumps removed) : tuple Array of floats representing the cleaned-up spectral errors from the APOGEE spectra with red clump stars removed good_T (all stars) or T_final (red clumps removed) : tuple Array of floats representing the effective temperatures of the stars in the cluster between 4000K and 5000K full_bitmask (all stars) or bitmask_final (red clumps removed) : tuple Array of ints (1 or 0), cleaned in the same way as the spectra, representing the bad pixels in the APOGEE_PIXMASK bitmask """ #Path, strip spaces in cluster name if location == 'personal': ###If running on my Mac path = '/Users/chloecheng/Personal/' + str(name).replace( ' ', '') + '.hdf5' ###Path to folder named after cluster elif location == 'server': ###If running on the server path = '/geir_data/scr/ccheng/AST425/Personal/' + str(name).replace( ' ', '') + '.hdf5' ###Path to cluster folder #If the data file for this cluster exists, save the data to variables and return them if glob.glob(path): ###If the file exists if red_clump == 'False': ###If we're keeping all of the stars, read in the data file = h5py.File(path, 'r') apogee_cluster_data = file['apogee_cluster_data'][()] spectra_50 = file['spectra'][()] spectra_err_50 = file['spectra_errs'][()] good_T = file['T'][()] full_bitmask = file['bitmask'][()] file.close() print(name, ' complete.') ###Notification that this function is done return apogee_cluster_data, spectra_50, spectra_err_50, good_T, full_bitmask elif red_clump == 'True': ###If we're removing the red clumps, read in the data file = h5py.File(path, 'r') apogee_cluster_data_final = file['apogee_cluster_data'][()] spectra_final = file['spectra'][()] spectra_err_final = file['spectra_errs'][()] T_final = file['T'][()] bitmask_final = file['bitmask'][()] file.close() print(name, ' complete.') ###Notification that this function is done return apogee_cluster_data_final, spectra_final, spectra_err_final, T_final, bitmask_final #If the file does not exist, get the data from APOGEE else: ###If the file does not exist #Get red clump stars from rcsample rc_data = rcsample(dr='14') ###Get the rcsample data for DR14 rc_stars = [] ###Empty list for the stars for i in range(len(rc_data)): ###Iterate through the rcsample data if location == 'personal': ###If running on Mac rc_stars.append( rc_data[i][2]) ###Append just the names of the stars elif location == 'server': ###If running on server rc_stars.append( rc_data[i][2].decode('UTF-8') ) ###Append just the names of the stars (decode because on server the names are bitwise for some reason) rc_stars = np.array( rc_stars) ###Make list of red clump star names into array #Read in APOGEE catalogue data, removing duplicated stars and replacing ASPCAP with astroNN abundances apogee_cat = apread.allStar( use_astroNN_abundances=True ) ###Read the allStar file, using the astroNN abundances unique_apoids, unique_inds = np.unique( apogee_cat['APOGEE_ID'], return_index=True) ###Get the APOGEE IDs apogee_cat = apogee_cat[unique_inds] ###Get the APOGEE IDs #Read in overall cluster information cls = afits.open('occam_cluster-DR14.fits') ###Read in the OCCAM data cls = cls[1].data ###Get the cluster information #Read in information about cluster members members = afits.open( 'occam_member-DR14.fits') ###Read in the OCCAM members data members = members[1].data ###Get the member information #Select all members of a given cluster cluster_members = (members['CLUSTER'] == name) & ( members['MEMBER_FLAG'] == 'GM' ) #second part of the mask indicates to only use giant stars member_list = members[ cluster_members] ###Make a list of all member stars in the cluster #Find APOGEE entries for that cluster #numpy.in1d finds the 1D intersection between two lists. #In this case we're matching using the unique APOGEE ID assigned to each star #The indices given by numpy.in1d are for the first argument, so in this case the apogee catalogue cluster_inds = np.in1d((apogee_cat['APOGEE_ID']).astype('U100'), member_list['APOGEE_ID'] ) ###Get the indices of the cluster members apogee_cluster_data = apogee_cat[ cluster_inds] ###Get the allStar data for these members T = photometric_Teff( apogee_cluster_data ) ###Compute the photometric effective temperature #Mark red clump stars in the members of the cluster as NaNs cluster_stars = member_list[ 'APOGEE_ID'] ###Get a list of all the names of the member stars in the cluster cluster_marked = np.copy( cluster_stars ) ###Create a copy of this list to mark which stars are red clumps for i in range(len(cluster_stars) ): ###Iterate through all of the stars in the cluster for j in range(len( rc_stars)): ###Iterate through all of the rcsample stars if cluster_stars[i] in rc_stars[ j]: ###If a cluster member is also a member of the rcsample stars cluster_marked[ i] = np.nan ###Replace the name of that star with a NaN to ignore it #Get spectra, spectral errors, and bitmask for each star - apStar #We can use the APOGEE package to read each star's spectrum #We'll read in the ASPCAP spectra, which have combined all of the visits for each star and removed the spaces between the spectra number_of_members = len( member_list) ###Number of members in the cluster spectra = np.zeros((number_of_members, 7514)) ###Create an empty array to add the spectra spectra_errs = np.zeros( (number_of_members, 7514)) ###Create an empty array to add the spectral errors bitmask = np.zeros((number_of_members, 7514)) ###Create an empty array to add the bitmask for s, star in enumerate( apogee_cluster_data): ###Iterate through the allStar data spectra[s] = apread.aspcapStar( star['LOCATION_ID'], star['APOGEE_ID'], ext=1, header=False, dr='14', aspcapWavegrid=True) ###Get the spectra spectra_errs[s] = apread.aspcapStar( star['LOCATION_ID'], star['APOGEE_ID'], ext=2, header=False, dr='14', aspcapWavegrid=True) ###Get the spectral errors bitmask[s] = apread.apStar( star['LOCATION_ID'], star['APOGEE_ID'], ext=3, header=False, dr='14', aspcapWavegrid=True)[1] ###Get the bitmask #Set all entries in bitmask to integers bitmask = bitmask.astype( int) ###Set all entries in the bitmask to integers bitmask_flip = np.zeros_like( bitmask ) ###Create an empty array for the bitmask with flipped entries for i in range( len(spectra )): ###Iterate through the number of stars in the cluster for j in range(7514): ###Iterate through the wavelength range if bitmask[i][j] == 0: ###If the bitmask entry is set to 0 bitmask_flip[i][j] = 1 ###Set it to 1 else: ###If the bitmask entry is not set to 0 bitmask_flip[i][j] = 0 ###Set it to 0 ###I do this part because the unmasked entries are always 0 in the original bitmask but I think before I was maybe adding in other values to include in the mask that may not have necessarily been 1 so I just set all masked bits to 0 and all unmasked bits to 1 (or maybe this just made more sense in my head for masked to be 0 and unmasked to be 1) #Remove empty spectra full_spectra = [ ] ###Empty list for the spectra sans empty ones, list not array because we don't know how many stars will be eliminated full_spectra_errs = [ ] ###Empty list for the spectral errors sans empty spectra full_bitmask = [] ###Empty list for bitmask sans empty spectra full_T = [] ###Empty list for temperatures sans empty spectra full_stars = [] ###Empty list for indices of stars sans empty spectra for i in range(len(spectra)): ###Iterate through the number of stars if any(spectra[i, :] != 0 ): ###For all of the rows whose entries are not all 0 full_spectra.append(spectra[i]) ###Append those spectra full_spectra_errs.append( spectra_errs[i]) ###Append those spectral errors full_bitmask.append( bitmask_flip[i]) ###Append those bitmask rows full_T.append(T[i]) ###Append those temperatures full_stars.append(i) ###Append the indices of those stars full_spectra = np.array(full_spectra) ###Make list into array full_spectra_errs = np.array( full_spectra_errs) ###Make list into array full_bitmask = np.array(full_bitmask) ###Make list into array full_T = np.array(full_T) ###Make list into array full_stars = np.array(full_stars) ###Make list into array full_marked_stars = cluster_marked[ full_stars] ###Use array of stars left to index marked stars so we know which ones are red clump stars #Create array of NaNs to replace flagged values in spectra masked_spectra = np.empty_like( full_spectra ) ###Create an empty array that is the same shape as full_spectra masked_spectra_errs = np.empty_like( full_spectra_errs ) ###Create an empty array that is the same shape as full_spectra_errs masked_spectra[:] = np.nan ###Set all of the entries to NaNs masked_spectra_errs[:] = np.nan ###Set all of the entries to NaNs #Mask the spectra for i in range( len(full_spectra)): ###Iterate through the number of stars for j in range(7514): ###Iterate through the wavelength range if full_bitmask[i][ j] != 0: ###If the bitmask is not 0 (i.e. if the bit is unmasked) masked_spectra[i][j] = full_spectra[i][ j] ###Retain the value of the unmasked spectra here masked_spectra_errs[i][j] = full_spectra_errs[i][ j] ###Retain the value of the unmasked spectral errors here ###All of the masked bits that were not captured by this if statement will remain NaNs and will thus be ignored #Cut stars that are outside of the temperature limits good_T_inds = (full_T > 4000) & ( full_T < 5000 ) ###Get the indices of the temperatures that are between 4000K and 5000K final_spectra = masked_spectra[ good_T_inds] ###Index the spectra to only keep stars that are within the temperature limits final_spectra_errs = masked_spectra_errs[ good_T_inds] ###Index the spectral errors to only keep stars within Teff limits good_T = full_T[ good_T_inds] ###Index the temperatures to keep only stars within Teff limits apogee_cluster_data = apogee_cluster_data[ good_T_inds] ###Index the allStar data to keep stars only within Teff limits full_bitmask = full_bitmask[ good_T_inds] ###Index the bitmask to keep stars only within Teff limits final_stars = full_marked_stars[ good_T_inds] ###Index the array of red-clump-marked stars to keep only those within Teff limits rgs = (final_stars != 'nan' ) #Get indices for final red giant stars to be used #Want an SNR of 200 so set those errors that have a larger SNR to have an SNR of 200 spectra_err_200 = np.zeros_like( final_spectra_errs ) ###Create an empty array to add corrected spectral errors to - shape will not change, just altering values for i in range(len(final_spectra)): ###Iterate through the stars for j in range(7514): ###Iterate through wavelength range if final_spectra[i][j] / final_spectra_errs[i][ j] <= 200: ###If errors are of a reasonable size spectra_err_200[i][j] = final_spectra_errs[i][ j] ###Leave them as they are else: ###If errors are too small spectra_err_200[i][j] = final_spectra[i][ j] / 200 ###Make them a bit bigger #Cut errors with SNR of less than 50 spectra_50 = np.copy( final_spectra ) ###Create a copy of the spectra to cut large error pixels spectra_err_50 = np.copy( spectra_err_200 ) ###Create a copy of the spectral errors to cut large error pixels for i in range(len(final_spectra)): ###Iterate through stars for j in range(7514): ###Iterate through wavelength range if final_spectra[i][j] / spectra_err_200[i][ j] <= 50: ###If an error is too big spectra_50[i][ j] = np.nan ###Set the corresponding entry in the spectra to be a NaN, will be ignored spectra_err_50[i][ j] = np.nan ###Set the corresponding entry in the spectral errors to be a NaN, will be ignored #Cut red clumps logg = apogee_cluster_data[ 'LOGG'] ###Get the logg values for the cluster (all corrections have been applied) apogee_cluster_data_final = apogee_cluster_data[ rgs] ###Get the allStar data for the RGB stars only (no red clumps) spectra_final = spectra_50[ rgs] ###Get the spectra for the RGB stars only spectra_err_final = spectra_err_50[ rgs] ###Get the spectral errors for the RGB stars only T_final = good_T[rgs] ###Get the temperatures for the RGB stars only bitmask_final = full_bitmask[ rgs] ###Get the bitmask for the RGB stars only if red_clump == 'False': ###If we are looking at all of the stars, save all data before red clumps were cut to file #Write to file file = h5py.File(path, 'w') file['apogee_cluster_data'] = apogee_cluster_data file['spectra'] = spectra_50 file['spectra_errs'] = spectra_err_50 file['T'] = good_T file['bitmask'] = full_bitmask file.close() print(name, 'complete') ###Notification that this function is done return apogee_cluster_data, spectra_50, spectra_err_50, good_T, full_bitmask elif red_clump == 'True': ###If we are removing the red clump stars, save the data after red clumps cut to file #Write to file file = h5py.File(path, 'w') file['apogee_cluster_data'] = apogee_cluster_data_final file['spectra'] = spectra_final file['spectra_errs'] = spectra_err_final file['T'] = T_final file['bitmask'] = bitmask_final file.close() print(name, 'complete') ###Notification that this function is done return apogee_cluster_data_final, spectra_final, spectra_err_final, T_final, bitmask_final
def get_spectra(name, red_clump, location): """Return cluster data, spectra, spectral errors, photometric Teffs, and bitmask from APOGEE. If the data file for the specified cluster already exists locally, import the data from the file (cluster data, spectra, spectral errors, bitmask). If the data file does not exist, obtain the APOGEE spectra from a specified cluster from the allStar catalogue, replacing ASPCAP abundances with astroNN abundances. Parameters ---------- name : str Name of desired cluster (i.e. 'NGC 2682') red_clump : str If the red clump stars in rcsample are to be removed, set to 'True'. If all stars are to be used, set to 'False'. location : str If running locally, set to 'personal'. If running on the server, set to 'server'. Returns ------- apogee_cluster_data (all stars) or apogee_cluster_data_final (red clumps removed) : structured array All cluster data from APOGEE spectra_50 (all stars) or spectra_final (red clumps removed) : tuple Array of floats representing the cleaned-up fluxes in the APOGEE spectra with red clump stars removed spectra_err_50 (all stars) or spectra_err_final (red clumps removed) : tuple Array of floats representing the cleaned-up spectral errors from the APOGEE spectra with red clump stars removed good_T (all stars) or T_final (red clumps removed) : tuple Array of floats representing the effective temperatures of the stars in the cluster between 4000K and 5000K full_bitmask (all stars) or bitmask_final (red clumps removed) : tuple Array of ints (1 or 0), cleaned in the same way as the spectra, representing the bad pixels in the APOGEE_PIXMASK bitmask """ #Path, strip spaces in cluster name if location == 'personal': path = '/Users/chloecheng/Personal/' + str(name).replace(' ', '') + '.hdf5' elif location == 'server': path = '/geir_data/scr/ccheng/AST425/Personal/' + str(name).replace(' ', '') + '.hdf5' #If the data file for this cluster exists, save the data to variables if glob.glob(path): if red_clump == 'False': file = h5py.File(path, 'r') apogee_cluster_data = file['apogee_cluster_data'][()] spectra_50 = file['spectra'][()] spectra_err_50 = file['spectra_errs'][()] good_T = file['T'][()] full_bitmask = file['bitmask'][()] file.close() print(name, ' complete.') return apogee_cluster_data, spectra_50, spectra_err_50, good_T, full_bitmask elif red_clump == 'True': file = h5py.File(path, 'r') apogee_cluster_data_final = file['apogee_cluster_data'][()] spectra_final = file['spectra'][()] spectra_err_final = file['spectra_errs'][()] T_final = file['T'][()] bitmask_final = file['bitmask'][()] file.close() print(name, ' complete.') return apogee_cluster_data_final, spectra_final, spectra_err_final, T_final, bitmask_final #If the file does not exist, get the data from APOGEE else: #Get red clump stars from rcsample rc_data = rcsample(dr='14') rc_stars = [] for i in range(len(rc_data)): #rc_stars.append(rc_data[i][2]) - REMOVE IN FINAL VERSION rc_stars.append(rc_data[i][2].decode('UTF-8')) rc_stars = np.array(rc_stars) #Read in APOGEE catalogue data, removing duplicated stars and replacing ASPCAP with astroNN abundances apogee_cat = apread.allStar(use_astroNN_abundances=True) unique_apoids,unique_inds = np.unique(apogee_cat['APOGEE_ID'],return_index=True) apogee_cat = apogee_cat[unique_inds] #Read in overall cluster information cls = afits.open('occam_cluster-DR14.fits') cls = cls[1].data #Read in information about cluster members members = afits.open('occam_member-DR14.fits') members = members[1].data #Select all members of a given cluster cluster_members = (members['CLUSTER']==name) & (members['MEMBER_FLAG']=='GM') #second part of the mask indicates to only use giant stars member_list = members[cluster_members] #Find APOGEE entries for that cluster #numpy.in1d finds the 1D intersection between two lists. #In this case we're matching using the unique APOGEE ID assigned to each star #The indices given by numpy.in1d are for the first argument, so in this case the apogee catalogue cluster_inds = np.in1d((apogee_cat['APOGEE_ID']).astype('U100'),member_list['APOGEE_ID']) apogee_cluster_data = apogee_cat[cluster_inds] T = photometric_Teff(apogee_cluster_data) #Mark red clump stars in the members of the cluster as NaNs cluster_stars = member_list['APOGEE_ID'] cluster_marked = np.copy(cluster_stars) for i in range(len(cluster_stars)): for j in range(len(rc_stars)): if cluster_stars[i] == rc_stars[j]: cluster_marked[i] = np.nan #Get spectra, spectral errors, and bitmask for each star - apStar #We can use the APOGEE package to read each star's spectrum #We'll read in the ASPCAP spectra, which have combined all of the visits for each star and removed the spaces between the spectra number_of_members = len(member_list) spectra = np.zeros((number_of_members, 7514)) spectra_errs = np.zeros((number_of_members, 7514)) bitmask = np.zeros((number_of_members, 7514)) for s,star in enumerate(apogee_cluster_data): spectra[s] = apread.aspcapStar(star['LOCATION_ID'],star['APOGEE_ID'],ext=1,header=False,dr='14',aspcapWavegrid=True) spectra_errs[s] = apread.aspcapStar(star['LOCATION_ID'],star['APOGEE_ID'],ext=2,header=False,dr='14',aspcapWavegrid=True) bitmask[s] = apread.apStar(star['LOCATION_ID'],star['APOGEE_ID'],ext=3,header=False,dr='14', aspcapWavegrid=True)[1] #Set all entries in bitmask to integers bitmask = bitmask.astype(int) bitmask_flip = np.zeros_like(bitmask) for i in range(len(spectra)): for j in range(7514): if bitmask[i][j] == 0: bitmask_flip[i][j] = 1 else: bitmask_flip[i][j] = 0 #Remove empty spectra full_spectra = [] full_spectra_errs = [] full_bitmask = [] full_T = [] full_stars = [] for i in range(len(spectra)): if any(spectra[i,:] != 0): full_spectra.append(spectra[i]) full_spectra_errs.append(spectra_errs[i]) full_bitmask.append(bitmask_flip[i]) full_T.append(T[i]) full_stars.append(i) full_spectra = np.array(full_spectra) full_spectra_errs = np.array(full_spectra_errs) full_bitmask = np.array(full_bitmask) full_T = np.array(full_T) full_stars = np.array(full_stars) full_marked_stars = cluster_marked[full_stars] #Create array of NaNs to replace flagged values in spectra masked_spectra = np.empty_like(full_spectra) masked_spectra_errs = np.empty_like(full_spectra_errs) masked_spectra[:] = np.nan masked_spectra_errs[:] = np.nan #Mask the spectra for i in range(len(full_spectra)): for j in range(7514): if full_bitmask[i][j] != 0: masked_spectra[i][j] = full_spectra[i][j] masked_spectra_errs[i][j] = full_spectra_errs[i][j] #Cut stars that are outside of the temperature limits good_T_inds = (full_T > 4000) & (full_T < 5000) final_spectra = masked_spectra[good_T_inds] final_spectra_errs = masked_spectra_errs[good_T_inds] good_T = full_T[good_T_inds] apogee_cluster_data = apogee_cluster_data[good_T_inds] full_bitmask = full_bitmask[good_T_inds] final_stars = full_marked_stars[good_T_inds] rgs = (final_stars != 'nan') #Get indices for final red giant stars to be used #Want an SNR of 200 so set those errors that have a larger SNR to have an SNR of 200 spectra_err_200 = np.zeros_like(final_spectra_errs) for i in range(len(final_spectra)): for j in range(7514): if final_spectra[i][j]/final_spectra_errs[i][j] <= 200: spectra_err_200[i][j] = final_spectra_errs[i][j] else: spectra_err_200[i][j] = final_spectra[i][j]/200 #Cut errors with SNR of less than 50 spectra_50 = np.copy(final_spectra) spectra_err_50 = np.copy(spectra_err_200) for i in range(len(final_spectra)): for j in range(7514): if final_spectra[i][j]/spectra_err_200[i][j] <= 50: spectra_50[i][j] = np.nan spectra_err_50[i][j] = np.nan #Cut red clumps logg = apogee_cluster_data['LOGG'] apogee_cluster_data_final = apogee_cluster_data[rgs] spectra_final = spectra_50[rgs] spectra_err_final = spectra_err_50[rgs] T_final = good_T[rgs] bitmask_final = full_bitmask[rgs] if red_clump == 'False': #Write to file file = h5py.File(path, 'w') file['apogee_cluster_data'] = apogee_cluster_data file['spectra'] = spectra_50 file['spectra_errs'] = spectra_err_50 file['T'] = good_T file['bitmask'] = full_bitmask file.close() print(name, 'complete') return apogee_cluster_data, spectra_50, spectra_err_50, good_T, full_bitmask elif red_clump == 'True': #Write to file file = h5py.File(path, 'w') file['apogee_cluster_data'] = apogee_cluster_data_final file['spectra'] = spectra_final file['spectra_errs'] = spectra_err_final file['T'] = T_final file['bitmask'] = bitmask_final file.close() print(name, 'complete') return apogee_cluster_data_final, spectra_final, spectra_err_final, T_final, bitmask_final