Пример #1
0
def good4CN(cluster,data):
    """
    NAME:
       good4CN
    PURPOSE:
       return the indices of stars that can be used to determine the spread in C/N
    INPUT:
       cluster - the cluster name
       data - the data for this cluster
    OUTPUT:
       index
    HISTORY:
       2015-09-04 - Written - Bovy (UofT)
    """
    if cluster.lower() == 'm67':
        indx= (data['LOGG'] > _m67rccut(data['TEFF']))\
            *(numpy.fabs(data['TEFF']-4600.)>3.)
    elif cluster.lower() == 'n6819':
        apokasc= apread.apokasc()
        ma= numpy.zeros(len(data),dtype='int')-1
        for ii in range(len(data)):
            try:
                ma[ii]= (numpy.arange(len(apokasc))[apokasc['APOGEE_ID'] == data['ID'][ii]])[0]
            except IndexError: pass
        indx= numpy.ones(len(data),dtype='bool')
        for ii in range(len(data)):
            if ma[ii] >= 0 \
                    and apokasc[ma[ii]]['SEISMO EVOL'] == 'CLUMP          ':
                indx[ii]= False
        # Also the clump stars' friends, they are all suspicious
        indx[numpy.fabs(data['TEFF']-4765.) < 60.]= False
    else:
        indx= numpy.ones(len(data),dtype='bool')
    return indx
Пример #2
0
def match_apokasc_saga():
    sagadata= read_saga()
    ids= sagadata['KICID'].data
    apokasc= apread.apokasc()
    dists= numpy.zeros(len(apokasc))-1
    edists= numpy.zeros(len(apokasc))-1
    for ii in range(len(apokasc)):
        indx= ids == apokasc['KEPLER ID'][ii]
        if numpy.sum(indx) == 0: continue
        dists[ii]= sagadata['Dis'][indx]
        edists[ii]= 0.5*(sagadata['E_Dis'][indx]+sagadata['e_Dis'][indx])
    apokasc= esutil.numpy_util.add_fields(apokasc,[('DIST_SEISMO', float),
                                                   ('E_DIST_SEISMO', float),
                                                   ])
    apokasc['DIST_SEISMO']= dists/1000.
    apokasc['E_DIST_SEISMO']= edists/1000.
    return apokasc
Пример #3
0
def match_apokasc_rc(rcfile=None):
    #First read apokasc
    kascdata= apread.apokasc()
    #Then read rc
    if rcfile is None:
        rcdata= apread.rcsample()
    else:
        rcdata= fitsio.read(rcfile)
        print len(rcdata), numpy.sum(rcdata['ADDL_LOGG_CUT'])
        rcdata= rcdata[rcdata['ADDL_LOGG_CUT'] == 1]
    #Match
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(kascdata['RA'],kascdata['DEC'],
                        rcdata['RA'],rcdata['DEC'],
                        2./3600.,maxmatch=1)
    kascdata= esutil.numpy_util.add_fields(kascdata,[('RC', int)])
    kascdata['RC']= 0
    kascdata['RC'][m1]= 1
    return kascdata
def match_apokasc_rc(rcfile=None,addl_logg_cut=False):
    #First read apokasc
    kascdata= apread.apokasc()
    #Then read rc
    if rcfile is None:
        rcdata= apread.rcsample()
    else:
        rcdata= fitsio.read(rcfile)
    if addl_logg_cut:
        rcdata= rcdata[rcdata['ADDL_LOGG_CUT'] == 1]
    print "RC catalog has %i entries ..." % len(rcdata)
    #Match
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(kascdata['RA'],kascdata['DEC'],
                        rcdata['RA'],rcdata['DEC'],
                        2./3600.,maxmatch=1)
    kascdata= esutil.numpy_util.add_fields(kascdata,[('RC', int)])
    kascdata['RC']= 0
    kascdata['RC'][m1]= 1
    return kascdata
Пример #5
0
def match_apokasc_rc(rcfile=None,addl_logg_cut=False):
    #First read apokasc
    kascdata= apread.apokasc()
    #Then read rc
    if rcfile is None:
        rcdata= apread.rcsample()
    else:
        rcdata= fitsio.read(rcfile)
    if addl_logg_cut:
        rcdata= rcdata[rcdata['ADDL_LOGG_CUT'] == 1]
    print("RC catalog has %i entries ..." % len(rcdata))
    #Match
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(kascdata['RA'],kascdata['DEC'],
                        rcdata['RA'],rcdata['DEC'],
                        2./3600.,maxmatch=1)
    kascdata= esutil.numpy_util.add_fields(kascdata,[('RC', int)])
    kascdata['RC']= 0
    kascdata['RC'][m1]= 1
    return kascdata
Пример #6
0
def match_apokasc_rodrigues(median=False,dteff=False,scale=False):
    rdata= read_rodrigues(median=median,dteff=dteff,scale=scale)
    ids= numpy.array([str(int(id)) for id in rdata[:,0]])
    apokasc= apread.apokasc()
    dists= numpy.zeros(len(apokasc))-1
    edists= numpy.zeros(len(apokasc))-1
    ages= numpy.zeros(len(apokasc))-1
    eages= numpy.zeros(len(apokasc))-1
    llages= numpy.zeros(len(apokasc))-1
    ulages= numpy.zeros(len(apokasc))-1
    for ii in range(len(apokasc)):
        indx= ids == apokasc['KEPLER ID'][ii]
        if numpy.sum(indx) == 0: continue
        if median:
            dists[ii]= rdata[indx,107]
            edists[ii]= 0.5*(rdata[indx,109]-rdata[indx,108])
        else:
            dists[ii]= rdata[indx,109]
            edists[ii]= 0.5*(rdata[indx,111]-rdata[indx,110])
        ages[ii]= rdata[indx,1]
        eages[ii]= 0.5*(rdata[indx,3]-rdata[indx,2])
        llages[ii]= rdata[indx,4]
        ulages[ii]= rdata[indx,5]
    apokasc= esutil.numpy_util.add_fields(apokasc,[('DIST_SEISMO', float),
                                                   ('E_DIST_SEISMO', float),
                                                   ('AGE_SEISMO', float),
                                                   ('E_AGE_SEISMO', float),
                                                   ('LL_AGE_SEISMO', float),
                                                   ('UL_AGE_SEISMO', float),
                                                   ])
    apokasc['DIST_SEISMO']= dists/1000.
    apokasc['E_DIST_SEISMO']= edists/1000.
    apokasc['AGE_SEISMO']= ages
    apokasc['E_AGE_SEISMO']= eages
    apokasc['LL_AGE_SEISMO']= llages
    apokasc['UL_AGE_SEISMO']= ulages
    return apokasc
Пример #7
0
def good4CN(cluster, data):
    """
    NAME:
       good4CN
    PURPOSE:
       return the indices of stars that can be used to determine the spread in C/N
    INPUT:
       cluster - the cluster name
       data - the data for this cluster
    OUTPUT:
       index
    HISTORY:
       2015-09-04 - Written - Bovy (UofT)
    """
    if cluster.lower() == 'm67':
        indx= (data['LOGG'] > _m67rccut(data['TEFF']))\
            *(numpy.fabs(data['TEFF']-4600.)>3.)
    elif cluster.lower() == 'n6819':
        apokasc = apread.apokasc()
        ma = numpy.zeros(len(data), dtype='int') - 1
        for ii in range(len(data)):
            try:
                ma[ii] = (numpy.arange(
                    len(apokasc))[apokasc['APOGEE_ID'] == data['ID'][ii]])[0]
            except IndexError:
                pass
        indx = numpy.ones(len(data), dtype='bool')
        for ii in range(len(data)):
            if ma[ii] >= 0 \
                    and apokasc[ma[ii]]['SEISMO EVOL'] == 'CLUMP          ':
                indx[ii] = False
        # Also the clump stars' friends, they are all suspicious
        indx[numpy.fabs(data['TEFF'] - 4765.) < 60.] = False
    else:
        indx = numpy.ones(len(data), dtype='bool')
    return indx
Пример #8
0
def calibrate_logg_dr12(rgb=False):
    """Calibrate, using RC or RGV when rgb=True (the latter should reproduce the official calibration"""
    #Read the calibration file, APOKASC, and match them
    caldata= fitsio.read(os.path.join(os.getenv('APOGEE_DATA'),'cal_%s.fits' % os.getenv('APOGEE_REDUX')),1)
    apokasc= apread.apokasc()
    h=esutil.htm.HTM()
    m1,m2,d12 = h.match(caldata['RA'],caldata['DEC'],
                        apokasc['RA'],apokasc['DEC'],
                        2./3600.,maxmatch=1)
    caldata= caldata[m1]
    apokasc= apokasc[m2]
    #Select a decent calibration sample
    # Use stars that are definitely RGB or RC
    seismoState= numpy.char.strip(apokasc['SEISMO EVOL'])
    if rgb:
        indx= (seismoState == 'RGB') \
            + (seismoState == 'DWARF/SUBGIANT')
    else:
        indx= (seismoState == 'CLUMP')
    #Add low-logg giants of any kind
    indx+= (apokasc['KASC_RG_LOGG_SCALE_2'] < 2.)
    #rm bad data
    indx= (caldata['FPARAM'][:,paramIndx('logg')] > -1000.)
    indx*= (apokasc['KASC_RG_LOGG_SCALE_2'] > -1000.)
    #Apply limits
    indx*= (caldata['FPARAM'][:,paramIndx('logg')] > 1.)\
        *(caldata['FPARAM'][:,paramIndx('logg')] < 3.8)
    print "Using %i stars to calibrate logg ..." % numpy.sum(indx)
    #Now fit the difference
    fitOut= numpy.polyfit(caldata['FPARAM'][indx,paramIndx('logg')],
                          caldata['FPARAM'][indx,paramIndx('logg')]\
                              -apokasc['KASC_RG_LOGG_SCALE_2'][indx],
                          1)
    print fitOut
    if True:
        bovy_plot.bovy_print()
        bovy_plot.bovy_plot(caldata['FPARAM'][indx,paramIndx('logg')],
                            caldata['FPARAM'][indx,paramIndx('logg')]\
                                -apokasc['KASC_RG_LOGG_SCALE_2'][indx],
                            'k.',
                            xrange=[0.,5.],yrange=[-0.5,1.],
                            xlabel=r'$\log g_{\mathrm{ASPCAP}}$',
                            ylabel=r'$\log g_{\mathrm{ASPCAP}}-\log g_{\mathrm{seismo}}$')
        if not rgb:
            plotindx= (seismoState == 'RGB') \
                + (seismoState == 'DWARF/SUBGIANT')
        else:
            plotindx= (seismoState == 'CLUMP')
        plotindx*= (caldata['FPARAM'][:,paramIndx('logg')] > -1000.)
        plotindx*= (apokasc['KASC_RG_LOGG_SCALE_2'] > -1000.)
        bovy_plot.bovy_plot(caldata['FPARAM'][plotindx,paramIndx('logg')],
                            caldata['FPARAM'][plotindx,paramIndx('logg')]\
                                -apokasc['KASC_RG_LOGG_SCALE_2'][plotindx],
                            '.',color='0.65',overplot=True)
        xs= numpy.linspace(1.,3.8,1001)
        bovy_plot.bovy_plot(xs,fitOut[0]*xs+fitOut[1],'k-',overplot=True)
        bovy_plot.bovy_plot(xs,-0.14*xs+0.588,'k--',overplot=True)
        print numpy.amax(numpy.fabs(fitOut[0]*xs+fitOut[1]-(-0.14*xs+0.588)))
        print numpy.amax(numpy.fabs(fitOut[0]*xs+fitOut[1]-(-0.14*xs+0.588))[xs < 2.])
        bovy_plot.bovy_text(r'$\mathrm{diff} = %.3f \log g_{\mathrm{ASPCAP}} + %.3f$' % (fitOut[0],fitOut[1]),
                            bottom_left=True,fontsize=14.)
        bovy_plot.bovy_end_print('/Users/bovy/Desktop/test.png')
    return None
Пример #9
0
def plot_logg_jk_apokasc(parser):
    options,args= parser.parse_args()
    #Setup Zs
    if os.path.exists(args[0]):
        savefile= open(args[0],'rb')
        outhist= pickle.load(savefile)
        hists= pickle.load(savefile)
        edgess= pickle.load(savefile)
        data= pickle.load(savefile)
        savefile.close()
    else:
        zs= numpy.arange(0.0005,0.03005,0.0005)
        if _DEBUG:
            zs= numpy.arange(0.0005,0.03005,0.005)
        fehs= isodist.Z2FEH(zs,zsolar=0.017)#0.017 for APOGEE analysis
        zs= zs[numpy.fabs(fehs-options.feh) < 0.2]
        if _DEBUG:
            zs= [zs[numpy.fabs(fehs-options.feh) < 0.2][0]]
        fehs= isodist.Z2FEH(zs,zsolar=0.017)   
        #Load the RC models for each feh individually
        rcms= []
        hists= []
        edgess= []
        for z in zs:
            print z
            trc= rcmodel.rcmodel(Z=z,loggmin=1.,loggmax=3.5,
                                 band=options.band,basti=options.basti,
                                 imfmodel=options.imfmodel,
                                 parsec=options.parsec)
            rcms.append(trc)
            sample= numpy.vstack([trc._sample[:,0],trc._loggs[:,0]]).T
            weights= trc._weights
            hist, edges= numpy.histogramdd(sample,weights=weights,bins=31,
                                           range=[[0.5,0.8],[1.,3.5]])
            hists.append(hist)
            edgess.append(edges)
        #Load APOKASC data
        data= apread.apokasc()
        indx= (data['KASC_RG_LOGG_SCALE_2'] > 1.)\
            *(data['KASC_RG_LOGG_SCALE_2'] < 3.5)\
            *(data['METALS'] > options.feh-0.2)\
            *(data['METALS'] <= options.feh+0.2)
        print "Using %i APOKASC objects ..." % (numpy.sum(indx))
        data= data[indx]
        ndata= numpy.sum(indx)
        #Stack predictions
        outhist= numpy.zeros_like(hists[0])
        for ii in range(ndata):
            zindx= numpy.argmin(numpy.fabs(fehs-data['METALS'][ii]))
            outhist+= hists[zindx]
        save_pickles(args[0],outhist,hists,edgess,data)
    #Normalize each J-K
    for ii in range(len(outhist[:,0])):
        outhist[ii,:]/= numpy.nanmax(outhist[ii,:])/numpy.nanmax(outhist)
        rev= copy.copy(outhist[ii,::-1]) #reverse, but in one go does not always work
        outhist[ii,:]= rev
    if True:
        #Reload apokasc data
        data= apread.apokasc()
        indx= (data['KASC_RG_LOGG_SCALE_2'] > 1.)\
            *(data['KASC_RG_LOGG_SCALE_2'] < 3.5)\
            *(data['METALS'] > options.feh-0.2)\
            *(data['METALS'] <= options.feh+0.2)
        data= data[indx]
    #Plot everything
    bovy_plot.bovy_print()
    bovy_plot.bovy_dens2d(outhist.T,origin='lower',cmap='gist_yarg',
                          xrange=[edgess[0][0][0],edgess[0][0][-1]],
                          yrange=[edgess[0][1][-1],edgess[0][1][0]],
                          aspect=(edgess[0][0][-1]-edgess[0][0][0])/float(edgess[0][1][-1]-edgess[0][1][0]),
                          xlabel=r'$(J-K_s)_0\ [\mathrm{mag}]$',
                          ylabel=r'$\mathrm{Seismic}\ \log g$',
                          shrink=0.78,
                          interpolation='nearest')
    #Overplot APOKASC data
    noseismo= data['SEISMO EVOL'] == 'UNKNOWN'
    if numpy.sum(noseismo) > 0:
        bovy_plot.bovy_plot(data['J0'][noseismo]-data['K0'][noseismo],
                            data['KASC_RG_LOGG_SCALE_2'][noseismo],'bo',
                            overplot=True,
                            mec='none',ms=3.)
    clumpseismo= data['SEISMO EVOL'] == 'CLUMP'
    if numpy.sum(clumpseismo) > 0:
        bovy_plot.bovy_plot(data['J0'][clumpseismo]-data['K0'][clumpseismo],
                            data['KASC_RG_LOGG_SCALE_2'][clumpseismo],'yo',
                            overplot=True,
                            mec='none',ms=4.5)
    noclumpseismo= (data['SEISMO EVOL'] == 'RGB') \
        + (data['SEISMO EVOL'] == 'DWARF/SUBGIANT')
    if numpy.sum(noclumpseismo) > 0:
        bovy_plot.bovy_plot(data['J0'][noclumpseismo]-data['K0'][noclumpseismo],
                            data['KASC_RG_LOGG_SCALE_2'][noclumpseismo],'ro',
                            overplot=True,
                            mec='none',ms=3.)
    bovy_plot.bovy_text(r'$%.1f < [\mathrm{M/H}] \leq %.1f$' % (options.feh-0.2,options.feh+0.2),
                        top_left=True,size=14.)
    bovy_plot.bovy_end_print(options.outfilename)
    return None
Пример #10
0
def plot_Z_jk_apokasc(parser):
    options, args = parser.parse_args()
    # Setup Zs
    if os.path.exists(args[0]):
        savefile = open(args[0], "rb")
        outhist = pickle.load(savefile)
        hists = pickle.load(savefile)
        edgess = pickle.load(savefile)
        data = pickle.load(savefile)
        savefile.close()
    else:
        if _PREDICT:
            zs = numpy.arange(0.0005, 0.06005, 0.0005)
            if _DEBUG:
                zs = numpy.arange(0.0005, 0.06005, 0.005)
            # Load the RC models for each feh individually
            rcms = []
            hists = []
            edgess = []
            for z in zs:
                print z
                trc = rcmodel.rcmodel(
                    Z=z,
                    loggmin=1.8,
                    loggmax=2.8,
                    band=options.band,
                    basti=options.basti,
                    imfmodel=options.imfmodel,
                    parsec=options.parsec,
                )
                rcms.append(trc)
                sample = numpy.vstack([trc._sample[:, 0], z * numpy.ones(trc._sample.shape[0])]).T
                weights = trc._weights
                hist, edges = numpy.histogramdd(
                    sample, weights=weights, bins=12, range=[[0.5, 0.8], [0.0, 0.06]]  # 12*(10-_DEBUG*9),
                )
                hists.append(hist)
                edgess.append(edges)
        # Load APOKASC data
        data = apread.apokasc()
        indx = (data["KASC_RG_LOGG_SCALE_2"] > 2.25) * (data["KASC_RG_LOGG_SCALE_2"] < 2.65)
        print "Using %i APOKASC objects ..." % (numpy.sum(indx))
        data = data[indx]
        if _PREDICT:
            # Stack predictions
            outhist = numpy.zeros_like(hists[0])
            for ii in range(len(hists)):
                outhist += hists[ii]
            save_pickles(args[0], outhist, hists, edgess, data)
    if _PREDICT:
        # Normalize each color
        pass
        #        for ii in range(len(outhist[:,0])):
    #            outhist[ii,:]/= numpy.nanmax(outhist[ii,:])/numpy.nanmax(outhist)
    # for ii in range(len(outhist[0,:])):
    #    outhist[:,ii]/= numpy.nanmax(outhist[:,ii])/numpy.nanmax(outhist)
    # Plot everything
    bovy_plot.bovy_print()
    if _PREDICT:
        bovy_plot.bovy_dens2d(
            outhist.T,
            origin="lower",
            cmap="gist_yarg",
            xrange=[edgess[0][0][0], edgess[0][0][-1]],
            yrange=[edgess[0][1][0], edgess[0][1][-1]],
            aspect=(edgess[0][0][-1] - edgess[0][0][0]) / float(edgess[0][1][-1] - edgess[0][1][0]),
            xlabel=r"$(J-K_s)_0\ [\mathrm{mag}]$",
            ylabel=r"$Z$",
            shrink=0.78,
            interpolation="nearest",
        )
        # Overplot APOKASC data
        # Load APOKASC data
        data = apread.apokasc()
        indx = (data["KASC_RG_LOGG_SCALE_2"] > 1.8) * (data["KASC_RG_LOGG_SCALE_2"] < 2.8)
        print "Using %i APOKASC objects ..." % (numpy.sum(indx))
        data = data[indx]
    bovy_plot.bovy_plot(
        data["J0"] - data["K0"],
        options.zsolar * 10.0 ** data["METALS"],
        c=data["KASC_RG_LOGG_SCALE_2"] - 2.45,
        s=20.0,
        edgecolors="none",
        scatter=True,
        colorbar=True,
        overplot=True,
    )
    #                        mec='none',ms=3.)
    # Overplot cuts
    jks = numpy.linspace(0.5, 0.8, 201)
    bovy_plot.bovy_plot(jks, rcmodel.jkzcut(jks), "k--", lw=2.0, overplot=True)
    bovy_plot.bovy_plot(jks, rcmodel.jkzcut(jks, upper=True), "k--", lw=2.0, overplot=True)
    bovy_plot.bovy_end_print(options.outfilename)
    return None