예제 #1
0
def calc_aA_sim(RvR,filename,snap_gc):
    # Calculate the action angle variables for a simulation and store
    if not os.path.exists(filename):
        aAI= actionAngleIsochroneApprox(pot=lp,b=0.8)
        nbatch= 20
        multiOut= multi.parallel_map(\
            lambda x: aAI.actionsFreqsAngles(RvR[x*nbatch:(x+1)*nbatch,0]/R0,
                                             RvR[x*nbatch:(x+1)*nbatch,1]/V0,
                                             RvR[x*nbatch:(x+1)*nbatch,2]/V0,
                                             RvR[x*nbatch:(x+1)*nbatch,3]/R0,
                                             RvR[x*nbatch:(x+1)*nbatch,4]/V0,
                                             RvR[x*nbatch:(x+1)*nbatch,5]),
            range(len(snap_gc)//nbatch),
            numcores=25)
        acfs= numpy.reshape(numpy.swapaxes(numpy.array(multiOut),0,1),
                            (9,numpy.prod(numpy.array(multiOut).shape)//9))
        # Write to file
        csvfile= open(filename,'w')
        writer= csv.writer(csvfile,delimiter=',')
        for jj in range(len(acfs[0])):
            writer.writerow([acfs[0][jj],acfs[1][jj],acfs[2][jj],
                                     acfs[3][jj],acfs[4][jj],acfs[5][jj],
                                     acfs[6][jj],acfs[7][jj],acfs[8][jj]])
            csvfile.flush()
        csvfile.close()
    else:
        acfs= numpy.loadtxt(filename,delimiter=',').T
    return acfs
예제 #2
0
def calc_aA_sim(RvR, filename, snap_gc):
    # Calculate the action angle variables for a simulation and store
    if not os.path.exists(filename):
        aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
        nbatch = 20
        multiOut= multi.parallel_map(\
            lambda x: aAI.actionsFreqsAngles(RvR[x*nbatch:(x+1)*nbatch,0]/R0,
                                             RvR[x*nbatch:(x+1)*nbatch,1]/V0,
                                             RvR[x*nbatch:(x+1)*nbatch,2]/V0,
                                             RvR[x*nbatch:(x+1)*nbatch,3]/R0,
                                             RvR[x*nbatch:(x+1)*nbatch,4]/V0,
                                             RvR[x*nbatch:(x+1)*nbatch,5]),
            range(len(snap_gc)//nbatch),
            numcores=25)
        acfs = numpy.reshape(numpy.swapaxes(numpy.array(multiOut), 0, 1),
                             (9, numpy.prod(numpy.array(multiOut).shape) // 9))
        # Write to file
        csvfile = open(filename, 'w')
        writer = csv.writer(csvfile, delimiter=',')
        for jj in range(len(acfs[0])):
            writer.writerow([
                acfs[0][jj], acfs[1][jj], acfs[2][jj], acfs[3][jj],
                acfs[4][jj], acfs[5][jj], acfs[6][jj], acfs[7][jj], acfs[8][jj]
            ])
            csvfile.flush()
        csvfile.close()
    else:
        acfs = numpy.loadtxt(filename, delimiter=',').T
    return acfs
예제 #3
0
def plotCombinedPDF(options,args):
    if options.sample.lower() == 'g':
        npops= 62
    elif options.sample.lower() == 'k':
        npops= 54
    if options.sample.lower() == 'g':
        savefile= open('binmapping_g.sav','rb')
    elif options.sample.lower() == 'k':
        savefile= open('binmapping_k.sav','rb')
    fehs= pickle.load(savefile)
    afes= pickle.load(savefile)
    savefile.close()
    #First calculate the derivative properties
    if not options.group is None:
        gafes, gfehs, legend= getMultiComparisonBins(options)
    else:
        legend= None
    if not options.multi is None:
        PDFs= multi.parallel_map((lambda x: calcAllPDFs(x,options,args)),
                                  range(npops),
                                  numcores=numpy.amin([options.multi,
                                                       npops,
                                                       multiprocessing.cpu_count()]))
    else:
        PDFs= []
        for ii in range(npops):
            PDFs.append(calcAllPDFs(ii,options,args))
    #Go through and combine
    combined_lnpdf= numpy.zeros((options.nrds,options.nfhs))
    for ii in range(npops):
        if not options.group is None:
            if numpy.amin((gfehs-fehs[ii])**2./0.1+(gafes-afes[ii])**2./0.0025) > 0.001:
                continue
        combined_lnpdf+= PDFs[ii]
    alogl= combined_lnpdf-numpy.nanmax(combined_lnpdf)
    #Now plot
    bovy_plot.bovy_print()
    bovy_plot.bovy_dens2d(numpy.exp(alogl).T,
                          origin='lower',cmap='gist_yarg',
                          interpolation='nearest',
                          xrange=[1.9,3.5],yrange=[-1./32.,1.+1./32.],
                          xlabel=r'$R_d\ (\mathrm{kpc})$',ylabel=r'$f_h$')
    if not legend is None:
        bovy_plot.bovy_text(legend,top_left=True,
                            size=14.)
    bovy_plot.bovy_end_print(options.outfilename)
    #Calculate and print derived properties
    derivProps= rawDerived(alogl,options,
                           vo=options.fixvc/_REFV0,zh=options.fixzh,
                           dlnvcdlnr=options.dlnvcdlnr)
    for key in derivProps.keys():
        if not '_err' in key:
            print key, derivProps[key], derivProps[key+'_err'], \
                derivProps[key]/derivProps[key+'_err'] 
    return None
예제 #4
0
def calc_avg_rcmks(parser):
    options,args= parser.parse_args()
    njks= 101
    nmks= 101
    jks= numpy.linspace(0.5,0.8,njks)
    mks= numpy.linspace(-0.5,-3.,nmks)
    if options.basti:
        zs= numpy.array([0.004,0.008,0.01,0.0198,0.03,0.04])
        zsolar= 0.019
    elif options.parsec:
        zs= numpy.arange(0.0005,0.06005,0.0005)
#        zs= numpy.array([0.01,0.02])
        zsolar= 0.019
    else:
        zs= numpy.arange(0.0005,0.03005,0.0005)
#        zs= numpy.array([0.01,0.02])
        zsolar= 0.019
    if not os.path.exists(options.outfilename):
        logpz= localzdist(zs,zsolar=zsolar)
        logmkp= numpy.zeros((len(zs),njks,nmks))
        logp= numpy.zeros((len(zs),njks,nmks))      
        funcargs= (zs,options,njks,jks,nmks,mks,logpz)
        multOut= multi.parallel_map((lambda x: indiv_calc(x,
                                                          *funcargs)),
                                    range(len(zs)),
                                    numcores=numpy.amin([64,len(zs),
                                                         multiprocessing.cpu_count()]))
        for ii in range(len(zs)):
            logmkp[ii,:,:]= multOut[ii][0,:,:]
            logp[ii,:,:]= multOut[ii][1,:,:]
        save_pickles(options.outfilename,logmkp,logp)
    else:
        savefile= open(options.outfilename,'rb')
        logmkp= pickle.load(savefile)
        logp= pickle.load(savefile)
        savefile.close()
    indx= numpy.isnan(logp)
    logp[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
    logmkp[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
    #Average the peak, so calculate the peak
    for ii in range(len(zs)):
        for jj in range(njks):
            maxmkindx= numpy.argmax(logp[ii,jj,:])
            totlogp= maxentropy.logsumexp(logp[ii,jj,:])
            logmkp[ii,jj,:]= logmkp[ii,jj,maxmkindx]-logp[ii,jj,maxmkindx]+totlogp
            logp[ii,jj,:]= totlogp
    avgmk= numpy.exp(maxentropy.logsumexp(logmkp.flatten())\
                         -maxentropy.logsumexp(logp.flatten()))
    solindx= numpy.argmin(numpy.fabs(zs-0.017))
    avgmksolar= numpy.exp(maxentropy.logsumexp(logmkp[solindx,:,:].flatten())\
                              -maxentropy.logsumexp(logp[solindx,:,:].flatten()))
    print "Average mk: %f" % (-avgmk)
    print "Average mk if solar: %f" % (-avgmksolar)
    return -avgmk
예제 #5
0
def calc_effsel(args,options,sample=None):
    # Work-horse function to compute the effective selection function, 
    # sample is a data sample of stars to consider for the (JK,Z) sampling
    # Setup selection function
    selectFile= '../savs/selfunc-nospdata.sav'
    if os.path.exists(selectFile):
        with open(selectFile,'rb') as savefile:
            apo= pickle.load(savefile)
    else:
        # Setup selection function
        apo= apogee.select.apogeeSelect()
        # Delete these because they're big and we don't need them
        del apo._specdata
        del apo._photdata
        save_pickles(selectFile,apo)
    # Get the full data sample for the locations (need all locations where 
    # stars could be observed, so the whole sample, not just the subsample
    # being analyzed)
    data= get_rcsample()
    locations= list(set(list(data['LOCATION_ID'])))
    # Load the dust map and setup the effective selection function
    if options.dmap.lower() == 'green15':
        dmap3d= mwdust.Green15(filter='2MASS H')
    elif options.dmap.lower() == 'marshall06':
        dmap3d= mwdust.Marshall06(filter='2MASS H')
    elif options.dmap.lower() == 'drimmel03':
        dmap3d= mwdust.Drimmel03(filter='2MASS H')
    elif options.dmap.lower() == 'sale14':
        dmap3d= mwdust.Sale14(filter='2MASS H')
    elif options.dmap.lower() == 'zero':
        dmap3d= mwdust.Zero(filter='2MASS H')
    # Sample the M_H distribution
    if options.samplemh:
        if sample is None: sample= data
        MH= sample['H0']-sample['RC_DM']
        MH= numpy.random.permutation(MH)[:1000] # do 1,000 max
    else:
        MH= -1.49
    apof= apogee.select.apogeeEffectiveSelect(apo,dmap3d=dmap3d,MH=MH)
    # Distances at which to calculate the effective selection function
    distmods= numpy.linspace(options.dm_min,options.dm_max,options.ndm)
    ds= 10.**(distmods/5-2.)
    # Now compute all selection functions
    out= multi.parallel_map((lambda x: _calc_effsel_onelocation(\
                locations[x],apof,apo,ds)),
                            range(len(locations)),
                            numcores=numpy.amin([len(locations),
                                                 multiprocessing.cpu_count(),options.multi]))
    # Save out
    out= numpy.array(out)
    save_pickles(args[0],locations,out,distmods,ds)
    return None
예제 #6
0
def calc_effsel(args, options, sample=None):
    # Work-horse function to compute the effective selection function,
    # sample is a data sample of stars to consider for the (JK,Z) sampling
    # Setup selection function
    selectFile = '../savs/selfunc-nospdata.sav'
    if os.path.exists(selectFile):
        with open(selectFile, 'rb') as savefile:
            apo = pickle.load(savefile)
    else:
        # Setup selection function
        apo = apogee.select.apogeeSelect()
        # Delete these because they're big and we don't need them
        del apo._specdata
        del apo._photdata
        save_pickles(selectFile, apo)
    # Get the full data sample for the locations (need all locations where
    # stars could be observed, so the whole sample, not just the subsample
    # being analyzed)
    data = get_rcsample()
    locations = list(set(list(data['LOCATION_ID'])))
    # Load the dust map and setup the effective selection function
    if options.dmap.lower() == 'green15':
        dmap3d = mwdust.Green15(filter='2MASS H')
    elif options.dmap.lower() == 'marshall06':
        dmap3d = mwdust.Marshall06(filter='2MASS H')
    elif options.dmap.lower() == 'drimmel03':
        dmap3d = mwdust.Drimmel03(filter='2MASS H')
    elif options.dmap.lower() == 'sale14':
        dmap3d = mwdust.Sale14(filter='2MASS H')
    elif options.dmap.lower() == 'zero':
        dmap3d = mwdust.Zero(filter='2MASS H')
    # Sample the M_H distribution
    if options.samplemh:
        if sample is None: sample = data
        MH = sample['H0'] - sample['RC_DM']
        MH = numpy.random.permutation(MH)[:1000]  # do 1,000 max
    else:
        MH = -1.49
    apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=dmap3d, MH=MH)
    # Distances at which to calculate the effective selection function
    distmods = numpy.linspace(options.dm_min, options.dm_max, options.ndm)
    ds = 10.**(distmods / 5 - 2.)
    # Now compute all selection functions
    out= multi.parallel_map((lambda x: _calc_effsel_onelocation(\
                locations[x],apof,apo,ds)),
                            range(len(locations)),
                            numcores=numpy.amin([len(locations),
                                                 multiprocessing.cpu_count(),options.multi]))
    # Save out
    out = numpy.array(out)
    save_pickles(args[0], locations, out, distmods, ds)
    return None
def plot_distanceintegral(savename,plotname,rmcenter=False,
                          onlygreen=False):
    if os.path.exists(savename):
        with open(savename,'rb') as savefile:
            area= pickle.load(savefile)
    else:
        # For samping over the absolute magnitude distribution
        iso= gaia_rc.load_iso()
        Gsamples= gaia_rc.sample_Gdist(iso,n=_NGSAMPLES)
        # l and b of the pixels
        theta, phi= healpy.pixelfunc.pix2ang(_NSIDE,
                                             numpy.arange(healpy.pixelfunc.nside2npix(_NSIDE)),
                                             nest=False)
        cosb= numpy.sin(theta)
        area= multi.parallel_map(lambda x: distanceIntegrand(\
                dust._GREEN15DISTS[x],cosb,Gsamples,rmcenter,onlygreen),
                                 range(len(dust._GREEN15DISTS)),
                                 numcores=numpy.amin([16,
                                                      len(dust._GREEN15DISTS),
                                                      multiprocessing.cpu_count()]))

        save_pickles(savename,area)
    # Plot the power spectrum
    if True:
        psdx, psd= signal.periodogram(area*dust._GREEN15DISTS**3./numpy.sum(area*dust._GREEN15DISTS**3.),
                                      fs=1./(dust._GREEN15DISTMODS[1]-dust._GREEN15DISTMODS[0]),
                                      detrend=lambda x: x,scaling='spectrum')
        bovy_plot.bovy_print(fig_height=3.)
        matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{yfonts}"]
        bovy_plot.bovy_plot(psdx[1:],psd[1:],
                            'k-',loglog=True,
                            xlabel=r'$2\pi\,k_\mu\,(\mathrm{mag}^{-1})$',
                            ylabel=r'$P_k$',
                            xrange=[0.04,4.])
        bovy_plot.bovy_text(r'$\mathrm{normalized}\ D^3\,\nu_*(\mu|\theta)\,\textswab{S}(\mu)$',
                            bottom_left=True,size=16.)
        bovy_plot.bovy_end_print(plotname)               
    else:
        bovy_plot.bovy_print(fig_height=3.)
        matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{yfonts}"]
        bovy_plot.bovy_plot(dust._GREEN15DISTMODS,
                            area*dust._GREEN15DISTS**3.,
                            'k-',
                            xlabel=r'$\mu\,(\mathrm{mag}^{-1})$',
                            ylabel=r'$D^3\,\nu_*(\mu|\theta)\,\textswab{S}(\mu)$')
        bovy_plot.bovy_end_print(plotname)
    spl= interpolate.InterpolatedUnivariateSpline(dust._GREEN15DISTMODS,
                                                  area*dust._GREEN15DISTS**3.,
                                                  k=5)
    fthder= [spl.derivatives(dm)[4] for dm in dust._GREEN15DISTMODS]
    print "Simpson error= %g, volume= %g" % (0.5**4./180.*numpy.mean(numpy.fabs(fthder))/integrate.simps(area*dust._GREEN15DISTS**3.,dx=0.5),numpy.sum(area*dust._GREEN15DISTS**3.))
    return None
예제 #8
0
def plotRdsz(options,args):
    #Go through all of the bins
    if options.sample.lower() == 'g':
        npops= 62
    elif options.sample.lower() == 'k':
        npops= 30
    if not options.multi is None:
        dummy= multi.parallel_map((lambda x: plotRdsz_single(x,options,args)),
                                  range(npops),
                                  numcores=numpy.amin([options.multi,
                                                       npops,
                                                       multiprocessing.cpu_count()]))
    else:
        for ii in range(npops):
            plotRdsz_single(ii,options,args)
예제 #9
0
def plot_distanceareaintegral(savename,
                              plotname,
                              rmcenter=False,
                              onlygreen=False):
    if os.path.exists(savename):
        with open(savename, 'rb') as savefile:
            area = pickle.load(savefile)
    else:
        # For samping over the absolute magnitude distribution
        iso = gaia_rc.load_iso()
        Gsamples = gaia_rc.sample_Gdist(iso, n=_NGSAMPLES)
        # l and b of the pixels
        theta, phi = healpy.pixelfunc.pix2ang(
            _NSIDE,
            numpy.arange(healpy.pixelfunc.nside2npix(_NSIDE)),
            nest=False)
        cosb = numpy.sin(theta)
        area= multi.parallel_map(lambda x: distanceAreaIntegrand(\
                dust._GREEN15DISTS[x],cosb,Gsamples,rmcenter,onlygreen),
                                 range(len(dust._GREEN15DISTS)),
                                 numcores=numpy.amin([16,
                                                      len(dust._GREEN15DISTS),
                                                      multiprocessing.cpu_count()]))

        save_pickles(savename, area)
    # Plot the power spectrum
    area = numpy.array(area)
    if True:
        psdthis = ((area.T * dust._GREEN15DISTS**3.).T / numpy.sum(
            (area.T * dust._GREEN15DISTS**3.), axis=1))
        psdx, psd = signal.periodogram(
            psdthis,
            fs=1. / (dust._GREEN15DISTMODS[1] - dust._GREEN15DISTMODS[0]),
            detrend=lambda x: x,
            scaling='spectrum',
            axis=0)
        bovy_plot.bovy_print(fig_height=3.)
        matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{yfonts}"]
        healpy.visufunc.mollview(numpy.log10(psd[-2]),
                                 nest=False,
                                 xsize=4000,
                                 min=-10.,
                                 max=-3.,
                                 cmap='gist_yarg',
                                 title="")
        bovy_plot.bovy_end_print(plotname)
    return None
def plot_pdfs_x(plotfilename):
    lp= potential.LogarithmicHaloPotential(q=0.9,normalize=1.)
    aAI= actionAngleIsochroneApprox(b=0.8,pot=lp)
    obs= numpy.array([1.56148083,0.35081535,-1.15481504,
                      0.88719443,-0.47713334,0.12019596])
    sdft= streamdf(_SIGV/220.,progenitor=Orbit(obs),pot=lp,aA=aAI,
                   leading=False,nTrackChunks=_NTRACKCHUNKS,
                   vsun=[0.,30.24*8.,0.],
                   tdisrupt=4.5/bovy_conversion.time_in_Gyr(220.,8.),
                   multi=_NTRACKCHUNKS)
    #Calculate the density as a function of l, p(l)
    txs= numpy.linspace(3.,12.4,_NLS)
    tlogps= multi.parallel_map((lambda x: sdft.callMarg([txs[x]/8.,None,None,None,None,None],
                                                        interp=True,ngl=_NGL,
                                                        nsigma=3)),
                              range(_NLS),
                              numcores=numpy.amin([_NLS,
                                                   multiprocessing.cpu_count()]))
    tlogps= numpy.array(tlogps)
    tlogps[numpy.isnan(tlogps)]= -100000000000000000.
    tps= numpy.exp(tlogps-logsumexp(tlogps))
    tps/= numpy.nansum(tps)*(txs[1]-txs[0])
    bovy_plot.bovy_print()
    bovy_plot.bovy_plot(txs,tps,'k-',lw=1.5,
                        xlabel=r'$X\,(\mathrm{kpc})$',
                        ylabel=r'$p(X)$',
                        xrange=[3.,12.4],
                        yrange=[0.,1.2*numpy.nanmax(tps)])
    bovy_plot.bovy_plot(txs,tps,'k-',lw=1.5,overplot=True)
    #Also plot the stream histogram
    #Read stream
    data= numpy.loadtxt(os.path.join(_STREAMSNAPDIR,'gd1_evol_hitres_01312.dat'),
                        delimiter=',')
    aadata= numpy.loadtxt(os.path.join(_STREAMSNAPAADIR,
                                       'gd1_evol_hitres_aa_01312.dat'),
                          delimiter=',')
    thetar= aadata[:,6]
    thetar= (numpy.pi+(thetar-numpy.median(thetar))) % (2.*numpy.pi)
    indx= thetar-numpy.pi < -(5.*numpy.median(numpy.fabs(thetar-numpy.median(thetar))))
    data= data[indx,:]
    bovy_plot.bovy_hist(data[:,1],bins=20,range=[3.,12.4],
                        histtype='step',normed=True,
                        overplot=True,
                        lw=1.5,color='k')
    bovy_plot.bovy_end_print(plotfilename)
예제 #11
0
def calculate_bin_error(samples,
                        fehbin,
                        agebin,
                        nbin,
                        iso_grid,
                        type='brokenexpflare',
                        loggcut=[1.8, 3.0],
                        teffcut=[4000, 5000],
                        n_sampling=1000,
                        progress=True,
                        mp=True,
                        fitIndx=None,
                        weights='padova',
                        distance_cut=False,
                        lowermass=None):
    randsamples = np.random.permutation(samples.T)[:n_sampling]
    m_sample = np.zeros(np.shape(randsamples)[0])
    if multi == False:
        for ii, params in enumerate(randsamples):
            if progress == True:
                print '' + str(round(float(ii) / float(n_sampling) * 100,
                                     2)) + '% complete!'
            m = calc_normalisation(params,
                                   nbin,
                                   iso_grid,
                                   fehbin=fehbin,
                                   agebin=agebin,
                                   loggcut=loggcut,
                                   teffcut=teffcut,
                                   type=type,
                                   verbose=False,
                                   fitIndx=fitIndx,
                                   gridfile=gridfile,
                                   weights=weights,
                                   distance_cut=distance_cut,
                                   lowermass=lowermass)[0]
            m_sample[ii] = m
    if mp == True:
        m_sample= multi.parallel_map((lambda x: calc_normalisation(randsamples[x], nbin, iso_grid, fehbin=fehbin, agebin=agebin,loggcut=loggcut, teffcut=teffcut, type=type, verbose=False, fitIndx=fitIndx, distance_cut=distance_cut, lowermass=lowermass)[0]),\
                range(np.shape(randsamples)[0]),numcores=numpy.amin([np.shape(randsamples)[0], multiprocessing.cpu_count()/2]))
    median = np.percentile(m_sample, 50)
    lowerr = np.percentile(m_sample, 16)
    uperr = np.percentile(m_sample, 84)
    return m_sample, median, lowerr, uperr
def calc_actions(snapfile=None):
    #Directories
    snapdir= 'snaps/'
    basefilename= snapfile.split('.')[0]
    nsnap= len(glob.glob(os.path.join(snapdir,basefilename+'_*.dat')))
    print "Processing %i snapshots ..." % nsnap
    #Setup potential
    lp= potential.LogarithmicHaloPotential(normalize=1.,q=0.9)
    if False:
        aA= actionAngleStaeckel(pot=lp,delta=1.20,c=True)
        snapaadir= 'snaps_aas/'
    else:
        aA= actionAngleIsochroneApprox(pot=lp,b=0.8)
        snapaadir= 'snaps_aai/'
    #Run each snapshot
    if True:
        calcThese= []
        for ii in range(nsnap):
            csvfilename= os.path.join(snapaadir,basefilename+'_aa_%s.dat' % str(ii).zfill(5))
            if os.path.exists(csvfilename):
                #Don't recalculate those that have already been calculated
                nstart= int(subprocess.check_output(['wc','-l',csvfilename]).split(' ')[0])
                if nstart < 10000:
                    calcThese.append(ii)
            else:
                calcThese.append(ii)
        nsnap= len(calcThese)
    if len(calcThese) == 0:
        print "All done with everything ..."
        return None
    args= (aA,snapdir,basefilename,snapaadir)
    print "Using %i cpus ..." % (numpy.amin([64,nsnap,
                                             multiprocessing.cpu_count()]))
    dummy= multi.parallel_map((lambda x: indiv_calc_actions(x,
                                                            *args)),
                              calcThese,
#                              range(nsnap),
                              numcores=numpy.amin([64,nsnap,
                                                      multiprocessing.cpu_count()]))
    return None
def plot_distanceareaintegral(savename,plotname,rmcenter=False,
                          onlygreen=False):
    if os.path.exists(savename):
        with open(savename,'rb') as savefile:
            area= pickle.load(savefile)
    else:
        # For samping over the absolute magnitude distribution
        iso= gaia_rc.load_iso()
        Gsamples= gaia_rc.sample_Gdist(iso,n=_NGSAMPLES)
        # l and b of the pixels
        theta, phi= healpy.pixelfunc.pix2ang(_NSIDE,
                                             numpy.arange(healpy.pixelfunc.nside2npix(_NSIDE)),
                                             nest=False)
        cosb= numpy.sin(theta)
        area= multi.parallel_map(lambda x: distanceAreaIntegrand(\
                dust._GREEN15DISTS[x],cosb,Gsamples,rmcenter,onlygreen),
                                 range(len(dust._GREEN15DISTS)),
                                 numcores=numpy.amin([16,
                                                      len(dust._GREEN15DISTS),
                                                      multiprocessing.cpu_count()]))

        save_pickles(savename,area)
    # Plot the power spectrum
    area= numpy.array(area)
    if True:       
        psdthis= ((area.T*dust._GREEN15DISTS**3.).T/numpy.sum((area.T*dust._GREEN15DISTS**3.),axis=1))
        psdx, psd= signal.periodogram(psdthis,
                                      fs=1./(dust._GREEN15DISTMODS[1]-dust._GREEN15DISTMODS[0]),
                                      detrend=lambda x: x,scaling='spectrum',
                                      axis=0)
        bovy_plot.bovy_print(fig_height=3.)
        matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{yfonts}"]
        healpy.visufunc.mollview(numpy.log10(psd[-2]),
                                 nest=False,
                                 xsize=4000,min=-10.,max=-3.,
                                 cmap='gist_yarg',
                                 title="")
        bovy_plot.bovy_end_print(plotname)               
    return None
예제 #14
0
def plot1d(options,args):
    """Make a plot of a quantity's best-fit vs. FeH and aFe"""
    if options.sample.lower() == 'g':
        npops= 62
    elif options.sample.lower() == 'k':
        npops= 54
    if options.sample.lower() == 'g':
        savefile= open('binmapping_g.sav','rb')
    elif options.sample.lower() == 'k':
        savefile= open('binmapping_k.sav','rb')
    fehs= pickle.load(savefile)
    afes= pickle.load(savefile)
    savefile.close()
    #First calculate the derivative properties
    if not options.multi is None:
        derivProps= multi.parallel_map((lambda x: calcAllDerivProps(x,options,args)),
                                  range(npops),
                                  numcores=numpy.amin([options.multi,
                                                       npops,
                                                       multiprocessing.cpu_count()]))
    else:
        derivProps= []
        for ii in range(npops):
            derivProps.append(calcAllDerivProps(ii,options,args))
    #Load into plotthis
    plotthis= numpy.zeros(npops)+numpy.nan
    for ii in range(npops):
        if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                         k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                or (options.sample.lower() == 'g' and ii < 6) \
                or (options.sample.lower() == 'k' and ii < 7):
            continue
        plotthis[ii]= derivProps[ii][options.subtype.lower()]
    #Now plot
    bovy_plot.bovy_print()
    monoAbundanceMW.plotPixelFunc(fehs,afes,plotthis,
                                  zlabel=labels[options.subtype.lower()])
    bovy_plot.bovy_end_print(options.outfilename)
    return None        
예제 #15
0
def create_frames(options,args):
    # First reload the model
    with open('gd1pepper%isampling.pkl' % options.nsnap,'rb') as savefile:
        sdf_pepper_leading= pickle.load(savefile)
    with open('gd1pepper%isampling_trailing.pkl' % options.nsnap,'rb') as savefile:
        sdf_pepper_trailing= pickle.load(savefile)
    # Output times
    timpacts= sdf_pepper_leading._uniq_timpact
    # Sample unperturbed aAt
    numpy.random.seed(1)
    Oml,anglel,dtl= super(streampepperdf,sdf_pepper_leading)._sample_aAt(\
        options.nparticles)
    Omt,anglet,dtt= super(streampepperdf,sdf_pepper_trailing)._sample_aAt(\
        options.nparticles)
    # Setup progenitor
    prog= sdf_pepper_leading._progenitor().flip()
    prog.integrate(numpy.linspace(0.,9./bovy_conversion.time_in_Gyr(V0,R0),
                                  10001),sdf_pepper_leading._pot)
    prog.flip()
    # Setup impacts
    if options.single:
        # Hit the leading arm and the trailing arm 1 Gyr later
        m= options.singlemimpact/bovy_conversion.mass_in_1010msol(V0,R0)/1000.
        t= timpacts[\
            numpy.argmin(\
                numpy.fabs(\
                    numpy.array(timpacts)\
                        -options.singletimpact\
                        /bovy_conversion.time_in_Gyr(V0,R0)))]
        sdf_pepper_leading.set_impacts(\
            impactb=[0.5*simulate_streampepper.rs(options.singlemimpact*10.**7.)],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[0.2],
            timpact=[t],
            GM=[m],rs=[simulate_streampepper.rs(options.singlemimpact*10.**7.)])
        # Trailing
        m= options.singlemimpact/bovy_conversion.mass_in_1010msol(V0,R0)/1000.
        t= timpacts[\
            numpy.argmin(\
                numpy.fabs(\
                    numpy.array(timpacts)\
                        -(options.singletimpact+1.)\
                        /bovy_conversion.time_in_Gyr(V0,R0)))]
        sdf_pepper_trailing.set_impacts(\
            impactb=[1.*simulate_streampepper.rs(options.singlemimpact*10.**7.)],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[-0.3],
            timpact=[t],
            GM=[m],rs=[simulate_streampepper.rs(options.singlemimpact*10.**7.)])
    elif options.pepper:
        # Sampling functions
        massrange=[options.Mmin,options.Mmax]
        plummer= False
        Xrs= 5.
        nsubhalo= simulate_streampepper.nsubhalo
        rs= simulate_streampepper.rs
        dNencdm= simulate_streampepper.dNencdm
        sample_GM= lambda: (10.**((-0.5)*massrange[0])\
                            +(10.**((-0.5)*massrange[1])\
                              -10.**((-0.5)*massrange[0]))\
                            *numpy.random.uniform())**(1./(-0.5))\
            /bovy_conversion.mass_in_msol(V0,R0)
        rate_range= numpy.arange(massrange[0]+0.5,massrange[1]+0.5,1)
        rate= numpy.sum([dNencdm(sdf_pepper_leading,10.**r,Xrs=Xrs,
                                 plummer=plummer)
                         for r in rate_range])
        rate= options.timescdm*rate
        sample_rs= lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10.,
                                plummer=plummer)
        # Pepper both
        sdf_pepper_leading.simulate(rate=rate,sample_GM=sample_GM,
                                    sample_rs=sample_rs,Xrs=Xrs)
        print numpy.amax(sdf_pepper_leading._GM)*bovy_conversion.mass_in_1010msol(V0,R0)
        sdf_pepper_trailing.simulate(rate=rate,sample_GM=sample_GM,
                                    sample_rs=sample_rs,Xrs=Xrs)
        print numpy.amax(sdf_pepper_trailing._GM)*bovy_conversion.mass_in_1010msol(V0,R0)
    else:
        # Hit both with zero
        sdf_pepper_leading.set_impacts(\
            impactb=[0.],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[0.2],
            timpact=[timpacts[0]],
            GM=[0.],rs=[1.])
        sdf_pepper_trailing.set_impacts(\
            impactb=[0.],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[-0.2],
            timpact=[timpacts[0]],
            GM=[0.],rs=[1.])
    # Now make all frames
    dum= multi.parallel_map(
        (lambda x: _plot_one_frame(x,options,prog,timpacts,
                                   sdf_pepper_leading,sdf_pepper_trailing,
                                   Oml,Omt,anglel,anglet,dtl,dtt)),
         range(len(timpacts)),
         numcores=numpy.amin([len(timpacts),30]))
    return None
예제 #16
0
 def volume(self,
            vol_func,
            xyz=False,
            MJ=None,
            JK=None,
            ndists=101,
            linearDist=False,
            relative=False,
            ncpu=None):
     """
     NAME:
        volume
     PURPOSE:
        Compute the effective volume of a spatial volume under this effective selection function
     INPUT:
        vol_func - function of 
                      (a) (ra/deg,dec/deg,dist/kpc)
                      (b) heliocentric Galactic X,Y,Z if xyz
                   that returns 1. inside the spatial volume under consideration and 0. outside of it, should be able to take array input of a certain shape and return an array with the same shape
        xyz= (False) if True, vol_func is a function of X,Y,Z (see above)
        MJ= (object-wide default) absolute magnitude in J or an array of samples of absolute  magnitudes in J for the tracer population
        JK= (object-wide default) J-Ks color or an array of samples of the J-Ks color 
        relative= (False) if True, compute the effective volume completeness = effective volume / true volume; computed using the same integration grid, so will be more robust against integration errors (especially due to the finite HEALPix grid for the angular integration). For simple volumes, a more precise effective volume can be computed by using relative=True and multiplying in the correct true volume
        ndists= (101) number of distances to use in the distance integration
        linearDist= (False) if True, integrate in distance rather than distance modulus
        ncpu= (None) if set to an integer, use this many CPUs to compute the effective selection function (only for non-zero extinction)
     OUTPUT
        effective volume
     HISTORY:
        2017-01-18 - Written - Bovy (UofT/CCA)
     """
     # Pre-compute coordinates for integrand evaluation
     if not hasattr(self,'_ra_cen_4vol') or \
             (hasattr(self,'_ndists_4vol') and
              (ndists != self._ndists_4vol or
               linearDist != self._linearDist_4vol)):
         theta,phi= healpy.pix2ang(\
             _BASE_NSIDE,numpy.arange(_BASE_NPIX)\
                 [True^self._tgasSel._exclude_mask_skyonly],nest=True)
         self._ra_cen_4vol = 180. / numpy.pi * phi
         self._dec_cen_4vol = 90. - 180. / numpy.pi * theta
         if linearDist:
             dists = numpy.linspace(0.001, 10., ndists)
             dms = 5. * numpy.log10(dists) + 10.
             self._deltadm_4vol = dists[1] - dists[0]
         else:
             dms = numpy.linspace(0., 18., ndists)
             self._deltadm_4vol = (dms[1] - dms[0]) * numpy.log(10.) / 5.
         self._dists_4vol = 10.**(0.2 * dms - 2.)
         self._tiled_dists3_4vol= numpy.tile(\
             self._dists_4vol**(3.-linearDist),(len(self._ra_cen_4vol),1))
         self._tiled_ra_cen_4vol = numpy.tile(self._ra_cen_4vol,
                                              (len(self._dists_4vol), 1)).T
         self._tiled_dec_cen_4vol = numpy.tile(self._dec_cen_4vol,
                                               (len(self._dists_4vol), 1)).T
         lb = bovy_coords.radec_to_lb(phi, numpy.pi / 2. - theta)
         l = numpy.tile(lb[:, 0], (len(self._dists_4vol), 1)).T.flatten()
         b = numpy.tile(lb[:, 1], (len(self._dists_4vol), 1)).T.flatten()
         XYZ_4vol= \
             bovy_coords.lbd_to_XYZ(l,b,
                numpy.tile(self._dists_4vol,
                           (len(self._ra_cen_4vol),1)).flatten())
         self._X_4vol = numpy.reshape(
             XYZ_4vol[:,
                      0], (len(self._ra_cen_4vol), len(self._dists_4vol)))
         self._Y_4vol = numpy.reshape(
             XYZ_4vol[:,
                      1], (len(self._ra_cen_4vol), len(self._dists_4vol)))
         self._Z_4vol = numpy.reshape(
             XYZ_4vol[:,
                      2], (len(self._ra_cen_4vol), len(self._dists_4vol)))
     # Cache effective-selection function
     MJ, JK = self._parse_mj_jk(MJ, JK)
     new_hash = hashlib.md5(numpy.array([MJ, JK])).hexdigest()
     if not hasattr(self,'_vol_MJ_hash') or new_hash != self._vol_MJ_hash \
          or (hasattr(self,'_ndists_4vol') and
              (ndists != self._ndists_4vol or
               linearDist != self._linearDist_4vol)):
         # Need to update the effective-selection function
         if isinstance(self._dmap3d, mwdust.Zero):  #easy bc same everywhere
             effsel_4vol = self(self._dists_4vol,
                                self._ra_cen_4vol[0],
                                self._dec_cen_4vol[0],
                                MJ=MJ,
                                JK=JK)
             self._effsel_4vol = numpy.tile(effsel_4vol,
                                            (len(self._ra_cen_4vol), 1))
         else:  # Need to treat each los separately
             if ncpu is None:
                 self._effsel_4vol = numpy.empty(
                     (len(self._ra_cen_4vol), len(self._dists_4vol)))
                 for ii,(ra_cen, dec_cen) \
                         in enumerate(tqdm.tqdm(zip(self._ra_cen_4vol,
                                                    self._dec_cen_4vol))):
                     self._effsel_4vol[ii] = self(self._dists_4vol,
                                                  ra_cen,
                                                  dec_cen,
                                                  MJ=MJ,
                                                  JK=JK)
             else:
                 multiOut= multi.parallel_map(\
                     lambda x: self(self._dists_4vol,
                                    self._ra_cen_4vol[x],
                                    self._dec_cen_4vol[x],MJ=MJ,JK=JK),
                     range(len(self._ra_cen_4vol)),
                     numcores=ncpu)
                 self._effsel_4vol = numpy.array(multiOut)
         self._vol_MJ_hash = new_hash
         self._ndists_4vol = ndists
         self._linearDist_4vol = linearDist
     out = 0.
     if xyz:
         out= numpy.sum(\
             self._effsel_4vol\
                 *vol_func(self._X_4vol,self._Y_4vol,self._Z_4vol)\
                 *self._tiled_dists3_4vol)
     else:
         out= numpy.sum(\
             self._effsel_4vol\
                 *vol_func(self._ra_cen_4vol,self._dec_cen_4vol,
                           self._dists_4vol)\
                 *self._tiled_dists3_4vol)
     if relative:
         if not hasattr(self, '_tgasEffSelUniform'):
             tgasSelUniform = tgasSelectUniform(comp=1.)
             self._tgasEffSelUniform = tgasEffectiveSelect(tgasSelUniform)
         true_volume = self._tgasEffSelUniform.volume(vol_func,
                                                      xyz=xyz,
                                                      ndists=ndists,
                                                      linearDist=linearDist,
                                                      relative=False)
     else:
         true_volume = 1.
     return out*healpy.nside2pixarea(_BASE_NSIDE)*self._deltadm_4vol\
         /true_volume
def plot_distanceintegral_smallpatch(savename, plotname):
    if os.path.exists(savename):
        with open(savename, 'rb') as savefile:
            area = pickle.load(savefile)
    else:
        # Load the patch
        hpIndx, dmap = read_patch()
        # For samping over the absolute magnitude distribution
        iso = gaia_rc.load_iso()
        Gsamples = gaia_rc.sample_Gdist(iso, n=_NGSAMPLES)
        # l and b of the pixels
        theta, phi = healpy.pixelfunc.pix2ang(_NSIDE_HIRES, hpIndx, nest=False)
        cosb = numpy.sin(theta)
        area= multi.parallel_map(lambda x: distanceIntegrandHires(\
                _HIRESGREEN15DISTS[x],theta,phi,cosb,Gsamples,dmap),
                                 range(len(_HIRESGREEN15DISTS)),
                                 numcores=numpy.amin([16,
                                                      len(_HIRESGREEN15DISTS),
                                                      multiprocessing.cpu_count()]))

        save_pickles(savename, area)
    # Plot the power spectrum
    if True:
        psdx, psd = signal.periodogram(
            area * _HIRESGREEN15DISTS**3. /
            numpy.sum(area * _HIRESGREEN15DISTS**3.),
            fs=1. / (_HIRESGREEN15DISTMODS[1] - _HIRESGREEN15DISTMODS[0]),
            detrend=lambda x: x,
            scaling='spectrum')
        bovy_plot.bovy_print(fig_height=3.)
        matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{yfonts}"]
        bovy_plot.bovy_plot(psdx[1:],
                            psd[1:],
                            'k-',
                            loglog=True,
                            xlabel=r'$2\pi\,k_\mu\,(\mathrm{mag}^{-1})$',
                            ylabel=r'$P_k$',
                            xrange=[0.04, 4.])
        bovy_plot.bovy_text(
            r'$\mathrm{normalized}\ D^3\,\nu_*(\mu|\theta)\,\textswab{S}(\mu)$',
            bottom_left=True,
            size=16.)
        bovy_plot.bovy_end_print(plotname)
    else:
        bovy_plot.bovy_print(fig_height=3.)
        matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{yfonts}"]
        bovy_plot.bovy_plot(
            _HIRESGREEN15DISTMODS,
            area * _HIRESGREEN15DISTS**3.,
            'k-',
            xlabel=r'$\mu\,(\mathrm{mag}^{-1})$',
            ylabel=r'$D^3\,\nu_*(\mu|\theta)\,\textswab{S}(\mu)$')
        bovy_plot.bovy_end_print(plotname)
    spl = interpolate.InterpolatedUnivariateSpline(_HIRESGREEN15DISTMODS,
                                                   area *
                                                   _HIRESGREEN15DISTS**3.,
                                                   k=5)
    fthder = [spl.derivatives(dm)[4] for dm in _HIRESGREEN15DISTMODS]
    print "Simpson error= ", 0.5**4. / 180. * numpy.mean(
        numpy.fabs(fthder)) / integrate.simps(area * _HIRESGREEN15DISTS**3.,
                                              dx=0.5)
    return None
예제 #18
0
 def __init__(self,
              pot=None,
              zmax=3. / 8.,
              gamma=1.,
              Rmax=3.,
              nR=25,
              nEz=25,
              nEr=25,
              nLz=25,
              numcores=1,
              **kwargs):
     """
     NAME:
        __init__
     PURPOSE:
        initialize an actionAngleAdiabaticGrid object
     INPUT:
        pot= potential or list of potentials (planarPotentials)
        zmax= zmax for building Ez grid
        Rmax = Rmax for building grids
        gamma= (default=1.) replace Lz by Lz+gamma Jz in effective potential
        nEz=, nEr=, nLz, nR= grid size
        numcores= number of cpus to use to parallellize
        +scipy.integrate.quad keywords
     OUTPUT:
     HISTORY:
         2012-07-27 - Written - Bovy (IAS@MPIA)
     """
     if pot is None:
         raise IOError("Must specify pot= for actionAngleAxi")
     self._gamma = gamma
     self._pot = pot
     self._zmax = zmax
     self._Rmax = Rmax
     self._Rmin = 0.01
     #Set up the actionAngleAdiabatic object that we will use to interpolate
     self._aA = actionAngleAdiabatic(pot=self._pot, gamma=self._gamma)
     #Build grid for Ez, first calculate Ez(zmax;R) function
     self._Rs = numpy.linspace(self._Rmin, self._Rmax, nR)
     self._EzZmaxs = numpy.array([
         galpy.potential.evaluatePotentials(r, self._zmax, self._pot) -
         galpy.potential.evaluatePotentials(r, 0., self._pot)
         for r in self._Rs
     ])
     self._EzZmaxsInterp = interpolate.InterpolatedUnivariateSpline(
         self._Rs, numpy.log(self._EzZmaxs), k=3)
     y = numpy.linspace(0., 1., nEz)
     jz = numpy.zeros((nR, nEz))
     jzEzzmax = numpy.zeros(nR)
     if numcores > 1:
         thisRs = (numpy.tile(self._Rs, (nEz, 1)).T).flatten()
         thisEzZmaxs = (numpy.tile(self._EzZmaxs, (nEz, 1)).T).flatten()
         thisy = (numpy.tile(y, (nR, 1))).flatten()
         jz = multi.parallel_map(
             (
                 lambda x: self._aA.Jz(
                     thisRs[x],
                     0.,
                     1.,  #these two r dummies
                     0.,
                     math.sqrt(2. * thisy[x] * thisEzZmaxs[x]),
                     **kwargs)[0]),
             range(nR * nEz),
             numcores=numcores)
         jz = numpy.reshape(jz, (nR, nEz))
         jzEzzmax[0:nR] = jz[:, nEz - 1]
     else:
         for ii in range(nR):
             for jj in range(nEz):
                 #Calculate Jz
                 jz[ii, jj] = self._aA.Jz(
                     self._Rs[ii],
                     0.,
                     1.,  #these two r dummies
                     0.,
                     math.sqrt(2. * y[jj] * self._EzZmaxs[ii]),
                     **kwargs)[0]
                 if jj == nEz - 1:
                     jzEzzmax[ii] = jz[ii, jj]
     for ii in range(nR):
         jz[ii, :] /= jzEzzmax[ii]
     #First interpolate Ez=Ezmax
     self._jzEzmaxInterp = interpolate.InterpolatedUnivariateSpline(
         self._Rs, numpy.log(jzEzzmax + 10.**-5.), k=3)
     self._jz = jz
     self._jzInterp = interpolate.RectBivariateSpline(self._Rs,
                                                      y,
                                                      jz,
                                                      kx=3,
                                                      ky=3,
                                                      s=0.)
     #JR grid
     self._Lzmin = 0.01
     self._Lzs= numpy.linspace(self._Lzmin,
                               self._Rmax\
                                   *galpy.potential.vcirc(self._pot,
                                                          self._Rmax),
                               nLz)
     self._Lzmax = self._Lzs[-1]
     #Calculate ER(vr=0,R=RL)
     self._RL = numpy.array(
         [galpy.potential.rl(self._pot, l) for l in self._Lzs])
     self._RLInterp = interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                               self._RL,
                                                               k=3)
     self._ERRL = numpy.array([
         galpy.potential.evaluatePotentials(self._RL[ii], 0., self._pot) +
         self._Lzs[ii]**2. / 2. / self._RL[ii]**2. for ii in range(nLz)
     ])
     self._ERRLmax = numpy.amax(self._ERRL) + 1.
     self._ERRLInterp = interpolate.InterpolatedUnivariateSpline(
         self._Lzs, numpy.log(-(self._ERRL - self._ERRLmax)), k=3)
     self._Ramax = 99.
     self._ERRa = numpy.array([
         galpy.potential.evaluatePotentials(self._Ramax, 0., self._pot) +
         self._Lzs[ii]**2. / 2. / self._Ramax**2. for ii in range(nLz)
     ])
     self._ERRamax = numpy.amax(self._ERRa) + 1.
     self._ERRaInterp = interpolate.InterpolatedUnivariateSpline(
         self._Lzs, numpy.log(-(self._ERRa - self._ERRamax)), k=3)
     y = numpy.linspace(0., 1., nEr)
     jr = numpy.zeros((nLz, nEr))
     jrERRa = numpy.zeros(nLz)
     if numcores > 1:
         thisRL = (numpy.tile(self._RL, (nEr - 1, 1)).T).flatten()
         thisLzs = (numpy.tile(self._Lzs, (nEr - 1, 1)).T).flatten()
         thisERRL = (numpy.tile(self._ERRL, (nEr - 1, 1)).T).flatten()
         thisERRa = (numpy.tile(self._ERRa, (nEr - 1, 1)).T).flatten()
         thisy = (numpy.tile(y[0:-1], (nLz, 1))).flatten()
         mjr = multi.parallel_map((lambda x: self._aA.JR(
             thisRL[x],
             numpy.sqrt(2. * (thisERRa[x] + thisy[x] *
                              (thisERRL[x] - thisERRa[x]) - galpy.potential.
                              evaluatePotentials(thisRL[x], 0., self._pot))
                        - thisLzs[x]**2. / thisRL[x]**2.), thisLzs[x] /
             thisRL[x], 0., 0., **kwargs)[0]),
                                  range((nEr - 1) * nLz),
                                  numcores=numcores)
         jr[:, 0:-1] = numpy.reshape(mjr, (nLz, nEr - 1))
         jrERRa[0:nLz] = jr[:, 0]
     else:
         for ii in range(nLz):
             for jj in range(nEr - 1):  #Last one is zero by construction
                 try:
                     jr[ii, jj] = self._aA.JR(
                         self._RL[ii],
                         numpy.sqrt(2. *
                                    (self._ERRa[ii] + y[jj] *
                                     (self._ERRL[ii] - self._ERRa[ii]) -
                                     galpy.potential.evaluatePotentials(
                                         self._RL[ii], 0., self._pot)) -
                                    self._Lzs[ii]**2. / self._RL[ii]**2.),
                         self._Lzs[ii] / self._RL[ii], 0., 0., **kwargs)[0]
                 except UnboundError:
                     raise
                 if jj == 0:
                     jrERRa[ii] = jr[ii, jj]
     for ii in range(nLz):
         jr[ii, :] /= jrERRa[ii]
     #First interpolate Ez=Ezmax
     self._jr = jr
     self._jrERRaInterp = interpolate.InterpolatedUnivariateSpline(
         self._Lzs, numpy.log(jrERRa + 10.**-5.), k=3)
     self._jrInterp = interpolate.RectBivariateSpline(self._Lzs,
                                                      y,
                                                      jr,
                                                      kx=3,
                                                      ky=3,
                                                      s=0.)
     return None
def plotVelComparisonDFMulti(options,args):
    #Read data etc.
    print "Reading the data ..."
    raw= read_rawdata(options)
    #Bin the data
    binned= pixelAfeFeh(raw,dfeh=options.dfeh,dafe=options.dafe)
    #Map the bins with ndata > minndata in 1D
    fehs, afes= [], []
    for ii in range(len(binned.fehedges)-1):
        for jj in range(len(binned.afeedges)-1):
            data= binned(binned.feh(ii),binned.afe(jj))
            if len(data) < options.minndata:
                continue
            #print binned.feh(ii), binned.afe(jj), len(data)
            fehs.append(binned.feh(ii))
            afes.append(binned.afe(jj))
    nabundancebins= len(fehs)
    fehs= numpy.array(fehs)
    afes= numpy.array(afes)
    gafes, gfehs, left_legend= getMultiComparisonBins(options)
    M= len(gfehs)
    if options.andistances:
        distancefacs= numpy.zeros_like(fehs)
        gdistancefacs= numpy.zeros_like(gfehs)
        for jj in range(M):
            #Find pop corresponding to this bin
            ii= numpy.argmin((gfehs[jj]-fehs)**2./0.1+(gafes[jj]-afes)**2./0.0025)
            print ii
            #Get the relevant data
            data= binned(fehs[ii],afes[ii])
            distancefacs[ii]= AnDistance.AnDistance(data.dered_g-data.dered_r,
                                                    data.feh)
            gdistancefacs[jj]= distancefacs[ii]
            options.fixdm= numpy.log10(distancefacs[ii])*5.
            #Apply distance factor to the data
            newraw= read_rawdata(options)
            newbinned= pixelAfeFeh(newraw,dfeh=options.dfeh,dafe=options.dafe)
            thisdataIndx= binned.callIndx(fehs[ii],afes[ii])
            binned.data.xc[thisdataIndx]= newbinned.data.xc[thisdataIndx]
            binned.data.yc[thisdataIndx]= newbinned.data.yc[thisdataIndx]
            binned.data.zc[thisdataIndx]= newbinned.data.zc[thisdataIndx]
            binned.data.plate[thisdataIndx]= newbinned.data.plate[thisdataIndx]
            binned.data.dered_r[thisdataIndx]= newbinned.data.dered_r[thisdataIndx]
    else:
        distancefacs=numpy.ones_like(fehs)
        gdistancefacs=numpy.ones_like(gfehs)
    ##########POTENTIAL PARAMETERS####################
    potparams1= numpy.array([numpy.log(2.5/8.),options.fixvc/220.,
                             numpy.log(400./8000.),0.2,0.])
    if options.group.lower() == 'aenhanced':
        potparams2= numpy.array([numpy.log(2.8/8.),options.fixvc/220.,
                                 numpy.log(400./8000.),0.266666666,0.])
        potparams3= numpy.array([numpy.log(2.8/8.),options.fixvc/220.,
                                 numpy.log(400./8000.),0.8,0.])
    elif options.group.lower() == 'aintermediate':
        potparams2= numpy.array([numpy.log(3.0/8.),options.fixvc/220.,
                                 numpy.log(400./8000.),0.3333333333333,0.])
        potparams3= numpy.array([numpy.log(3.0/8.),options.fixvc/220.,
                                 numpy.log(400./8000.),0.933333333333,0.])
    elif options.group.lower() == 'apoor':
        potparams2= numpy.array([numpy.log(2.6/8.),options.fixvc/220.,
                                 numpy.log(400./8000.),0.4,0.])
        potparams3= numpy.array([numpy.log(2.6/8.),options.fixvc/220.,
                                 numpy.log(400./8000.),1.0,0.])
    options.potential=  'dpdiskplhalofixbulgeflatwgasalt'
    #Check whether fits exist, if not, pop
    removeBins= numpy.ones(M,dtype='bool')
    for jj in range(M):
        #Find pop corresponding to this bin
        pop= numpy.argmin((gfehs[jj]-fehs)**2./0.1+(gafes[jj]-afes)**2./0.0025)
        #Load savefile
        if not options.init is None:
            #Load initial parameters from file
            savename= options.init
            spl= savename.split('.')
            newname= ''
            for ll in range(len(spl)-1):
                newname+= spl[ll]
                if not ll == len(spl)-2: newname+= '.'
            newname+= '_%i.' % pop
            newname+= spl[-1]
            if not os.path.exists(newname):
                removeBins[jj]= False
        else:
            raise IOError("base filename not specified ...")
    if numpy.sum(removeBins) == 0:
        raise IOError("None of the group bins have been fit ...")
    elif numpy.sum(removeBins) < M:
        #Some bins have not been fit yet, and have to be remove
        gfehs= list((numpy.array(gfehs))[removeBins])
        gafes= list((numpy.array(gafes))[removeBins])
        print "Only using %i bins out of %i ..." % (numpy.sum(removeBins),M)
        M= len(gfehs)
    data= []
    zs= []
    velps= numpy.zeros((len(binned.data),options.nv))
    velps[:,:]= numpy.nan
    velps2= numpy.zeros((len(binned.data),options.nv))
    velps2[:,:]= numpy.nan
    velps3= numpy.zeros((len(binned.data),options.nv))
    velps3[:,:]= numpy.nan
    cumulndata= 0
    if options.type.lower() == 'vz':
        if options.group == 'aenhanced':
            vs= numpy.linspace(-180.,180.,options.nv)
            xrange=[-180.,180.]
            bins= 39
        elif options.group == 'aintermediate':
            vs= numpy.linspace(-150.,150.,options.nv)
            xrange=[-150.,150.]
            bins= 33
        else: # options.group == 'aenhanced':
            vs= numpy.linspace(-120.,120.,options.nv)
            xrange=[-140.,140.]
            bins= 26
        xlabel=r'$V_Z\ (\mathrm{km\,s}^{-1})$'
    elif options.type.lower() == 'vr':
        if options.group == 'aenhanced':
            vs= numpy.linspace(-220.,220.,options.nv)
            xrange=[-220.,220.]
            bins= 39
        else: # options.group == 'aenhanced':
            vs= numpy.linspace(-150.,150.,options.nv)
            xrange=[-150.,150.]
            bins= 26
        xlabel=r'$V_R\ (\mathrm{km\,s}^{-1})$'
    elif options.type.lower() == 'vt':
        if options.group == 'aenhanced':
            vs= numpy.linspace(0.01,350.,options.nv)
            xrange=[0.,350.]
            bins= 39
        else: # options.group == 'aenhanced':
            vs= numpy.linspace(0.01,350.,options.nv)
            xrange=[0.,350.]
            bins= 39
        xlabel=r'$V_T\ (\mathrm{km\,s}^{-1})$'
    alts= True
    if not options.multi is None:
        #Generate list of temporary files
        tmpfiles= []
        for jj in range(M): 
            tfile, tmpfile= tempfile.mkstemp()
            os.close(tfile) #Close because it's open
            tmpfiles.append(tmpfile)
        try:
            dummy= multi.parallel_map((lambda x: run_calc_model_multi(x,M,gfehs,gafes,fehs,afes,options,
                                                                      vs,
                                                                      potparams1,potparams2,potparams3,
                                                                      distancefacs,
                                                                      binned,alts,True,tmpfiles)),
                                      range(M),
                                      numcores=numpy.amin([M,
                                                           multiprocessing.cpu_count(),
                                                           options.multi]))
            #Now read all of the temporary files
            for jj in range(M):
                tmpfile= open(tmpfiles[jj],'rb')
                tvelps= pickle.load(tmpfile)
                if tvelps is None:
                    continue
                tvelps2= pickle.load(tmpfile)
                tvelps3= pickle.load(tmpfile)
                data.extend(pickle.load(tmpfile))
                zs.extend(pickle.load(tmpfile))
                tndata= pickle.load(tmpfile)
                velps[cumulndata:cumulndata+tndata,:]= tvelps
                velps2[cumulndata:cumulndata+tndata,:]= tvelps2
                velps3[cumulndata:cumulndata+tndata,:]= tvelps3
                cumulndata+= tndata
                tmpfile.close()
        finally:
            for jj in range(M):
                os.remove(tmpfiles[jj])
    else:
        for jj in range(M):
            try:
                tvelps, tvelps2, tvelps3, tdata, tzs, tndata= run_calc_model_multi(jj,M,gfehs,gafes,fehs,afes,options,
                                                                                   vs,
                                                                                                                                potparams1,potparams2,potparams3,
                                                                      distancefacs,
                                                                                                                                binned,alts,
                                                                                                                                False,None)
            except TypeError:
                continue
            velps[cumulndata:cumulndata+tndata,:]= tvelps
            velps2[cumulndata:cumulndata+tndata,:]= tvelps2
            velps3[cumulndata:cumulndata+tndata,:]= tvelps3
            data.extend(tdata)
            zs.extend(tzs)
            cumulndata+= tndata
    bovy_plot.bovy_print()
    bovy_plot.bovy_hist(data,bins=26,normed=True,color='k',
                        histtype='step',
                        xrange=xrange,xlabel=xlabel)
    plotp= numpy.nansum(velps[:cumulndata,:],axis=0)/cumulndata
    print numpy.sum(plotp)*(vs[1]-vs[0])
    bovy_plot.bovy_plot(vs,plotp,'k-',overplot=True)
    if alts:
        plotp= numpy.nansum(velps2,axis=0)/cumulndata
        bovy_plot.bovy_plot(vs,plotp,'k--',overplot=True)
        plotp= numpy.nansum(velps3,axis=0)/cumulndata
        bovy_plot.bovy_plot(vs,plotp,'k:',overplot=True)
    if not left_legend is None:
        bovy_plot.bovy_text(left_legend,top_left=True,size=_legendsize)
    bovy_plot.bovy_text(r'$\mathrm{full\ subsample}$'
                        +'\n'+
                        '$%i \ \ \mathrm{stars}$' % 
                        len(data),top_right=True,
                        size=_legendsize)
    bovy_plot.bovy_end_print(args[0]+'model_data_g_'+options.group+'_'+options.type+'dist_all.'+options.ext)
    if options.all: return None
    #Plot zranges
    #First determine the ranges that have nstars in them
    rranges_nstars= 1000
    zs= numpy.array(zs)
    data= numpy.array(data)
    tdata_z= sorted(numpy.fabs(zs))
    nbins= numpy.ceil(len(tdata_z)/float(rranges_nstars))
    rranges_nstars= int(numpy.floor(float(len(tdata_z))/nbins))
    accum= rranges_nstars
    zranges= [0.0]
    while accum < len(tdata_z):
        zranges.append(tdata_z[accum])
        accum+= rranges_nstars
    zranges.append(5.0)
    print zranges
    #zranges= [0.5,1.,1.5,2.,3.,4.]
    nzranges= len(zranges)-1
    sigzsd= numpy.empty(nzranges)
    esigzsd= numpy.empty(nzranges)
    sigzs1= numpy.empty(nzranges)
    sigzs2= numpy.empty(nzranges)
    sigzs3= numpy.empty(nzranges)
    for ii in range(nzranges):
        indx= (numpy.fabs(zs) >= zranges[ii])*(numpy.fabs(zs) < zranges[ii+1])
        plotp= numpy.nansum(velps[indx,:],axis=0)/numpy.sum(indx)
        yrange= [0.,1.35*numpy.nanmax(plotp)]
        bovy_plot.bovy_print()
        bovy_plot.bovy_hist(data[indx],bins=26,normed=True,color='k',
                            histtype='step',
                            yrange=yrange,
                            xrange=xrange,xlabel=xlabel)
        sigzsd[ii]= numpy.std(data[indx][(numpy.fabs(data[indx]) < 100.)])
        esigzsd[ii]= sigzsd[ii]/numpy.sqrt(float(len(data[indx][(numpy.fabs(data[indx]) < 100.)])))
        sigzs1[ii]= numpy.sqrt(numpy.sum(vs**2.*plotp)/numpy.sum(plotp)-(numpy.sum(vs*plotp)/numpy.sum(plotp))**2.)
        bovy_plot.bovy_plot(vs,plotp,'k-',overplot=True)
        if alts:
            plotp= numpy.nansum(velps2[indx,:],axis=0)/numpy.sum(indx)
            sigzs2[ii]= numpy.sqrt(numpy.sum(vs**2.*plotp)/numpy.sum(plotp)-(numpy.sum(vs*plotp)/numpy.sum(plotp))**2.)
            bovy_plot.bovy_plot(vs,plotp,'k--',overplot=True)
            plotp= numpy.nansum(velps3[indx,:],axis=0)/numpy.sum(indx)
            sigzs3[ii]= numpy.sqrt(numpy.sum(vs**2.*plotp)/numpy.sum(plotp)-(numpy.sum(vs*plotp)/numpy.sum(plotp))**2.)
            bovy_plot.bovy_plot(vs,plotp,'k:',overplot=True)
        bovy_plot.bovy_text(r'$ %i\ \mathrm{pc} \leq |Z| < %i\ \mathrm{pc}$' % (int(1000*zranges[ii]),int(1000*zranges[ii+1])),
#                            +'\n'+
#                            '$%i \ \ \mathrm{stars}$' % (numpy.sum(indx)),
                            top_right=True,
                            size=_legendsize)
        bovy_plot.bovy_end_print(args[0]+'model_data_g_'+options.group+'_'+options.type+'dist_z%.1f_z%.1f.' % (zranges[ii],zranges[ii+1])+options.ext)
    #Plot velocity dispersion as a function of |Z|
    bovy_plot.bovy_print()
    bovy_plot.bovy_plot((((numpy.roll(zranges,-1)+zranges)/2.)[:-1]),sigzsd,
                        'ko',
                        xlabel=r'$|Z|\ (\mathrm{kpc})$',
                        ylabel=r'$\sigma_z\ (\mathrm{km\,s}^{-1})$',
                        xrange=[0.,4.],
                        yrange=[0.,60.])
    pyplot.errorbar(((numpy.roll(zranges,-1)+zranges)/2.)[:-1],sigzsd,
                    yerr=esigzsd,
                    marker='o',color='k',linestyle='none')
    bovy_plot.bovy_plot((((numpy.roll(zranges,-1)+zranges)/2.)[:-1]),sigzs1,
                        'r+',overplot=True,ms=10.)
    bovy_plot.bovy_plot((((numpy.roll(zranges,-1)+zranges)/2.)[:-1]),sigzs2,
                        'cx',overplot=True,ms=10.)
    bovy_plot.bovy_plot((((numpy.roll(zranges,-1)+zranges)/2.)[:-1]),sigzs3,
                        'gd',overplot=True,ms=10.)
    bovy_plot.bovy_end_print(args[0]+'model_data_g_'+options.group+'_'+options.type+'dist_szvsz.' +options.ext)
    return None
예제 #20
0
def astro_sampling(parser):
    options, args = parser.parse_args()
    if options.basti:
        zs = numpy.array([0.004, 0.008, 0.01, 0.0198, 0.03, 0.04])
    elif options.parsec:
        zs = numpy.arange(0.0005, 0.06005, 0.0005)
        # zs= numpy.arange(0.0005,0.06005,0.005)
    else:
        zs = numpy.arange(0.0005, 0.03005, 0.0005)
        # zs= numpy.arange(0.0005,0.03005,0.005)
    if os.path.exists(args[0]):
        savefile = open(args[0], "rb")
        plotthis = pickle.load(savefile)
        zs = pickle.load(savefile)
        lages = pickle.load(savefile)
        savefile.close()
        dlages = lages[1] - lages[0]
        if options.type == "massperrc":
            savefile = open(args[1], "rb")
            plotthis /= pickle.load(savefile)
            savefile.close()
        if options.type == "mass" and len(args) == 3:
            # Also load mass_coarseage and omega
            savefile = open(args[1], "rb")
            masscoarse = pickle.load(savefile)
            savefile.close()
            savefile = open(args[2], "rb")
            omega = pickle.load(savefile)
            savefile.close()
    else:
        nages = 31
        if options.type == "omega" or options.type == "numfrac" or options.coarseage:
            nages = 16
        lages = numpy.linspace(-1.0, 1.0, nages)
        dlages = lages[1] - lages[0]
        plotthis = numpy.zeros((len(zs), nages))
        multOut = multi.parallel_map(
            lambda x: _calc_one(zs[x], options, nages, lages, dlages), range(len(zs)), numcores=32
        )
        for ii in range(len(zs)):
            plotthis[ii, :] = multOut[ii]
        # Save
        savefile = open(args[0], "wb")
        pickle.dump(plotthis, savefile)
        pickle.dump(zs, savefile)
        pickle.dump(lages, savefile)
        savefile.close()
    # Plot
    # Fist cut out youngest ages, since they are irrelevant
    if _CUTLOWAGE:
        aindx = lages > numpy.log10(0.8)
        lages = lages[aindx]
        plotthis = plotthis[:, aindx]
    if options.type == "mass":
        vmin, vmax = 0.5, 2.3
        vmin2, vmax2 = 0.5, 2.0
        zlabel = r"$\langle M_{\mathrm{RC}} \rangle \,(M_\odot)$"
        # cmap= 'gist_yarg'
        cmap = "jet"
    elif options.type == "omega":
        vmin, vmax = 0.0, 0.03
        vmin2, vmax2 = 0.0, 0.015
        if options.allapogee:
            vmin, vmax = 0.0, 0.035
            zlabel = r"$\mathrm{Mass\ fraction\ in}\ (J-K_s)_0 > 0.5\ \mathrm{giants\ (\%)}$"
        elif options.redapogee:
            vmin, vmax = 0.0, 0.005
            vmin2, vmax2 = 0.0, 0.003
            zlabel = r"$\mathrm{Mass\ fraction\ in}\ (J-K_s)_0 > 0.8\ \mathrm{giants\ (\%)}$"
        else:
            zlabel = r"$\mathrm{Mass\ fraction\ in\ RC\ stars\ (\%)}$"
        # cmap= 'gist_yarg'
        cmap = "jet"
        plotthis *= 100.0
    elif options.type == "numfrac":
        vmin, vmax = 0.0, 0.005
        vmin2, vmax2 = 0.0, 0.004
        zlabel = r"$\mathrm{Number\ fraction\ in\ RC\ stars\ (\%)}$"
        # cmap= 'gist_yarg'
        cmap = "jet"
        plotthis *= 100.0
    elif options.type == "massperrc":
        vmin, vmax = 0.0, 50000.0
        vmin2, vmax2 = 0.0, 25000.0
        zlabel = r"$\mathrm{Stellar\ population\ mass\ per\ RC\ star}\,(M_\odot)$"
        # cmap= 'gist_yarg'
        cmap = "jet"
        if options.redapogee:
            vmin, vmax = 0.0, 100000.0
            vmin2, vmax2 = 0.0, 200000.0
            zlabel = r"$\mathrm{Mass\ fraction\ in}\ (J-K_s)_0 > 0.8\ \mathrm{giants\ (\%)}$"
    print numpy.nanmin(plotthis), numpy.nanmax(plotthis)
    if options.basti:  # Remap the Zs
        zs = numpy.array([0.004, 0.008, 0.01, 0.0198, 0.03, 0.04])
        regularzs = numpy.arange(0.0005, 0.03005, 0.0005) / 0.019 * 0.0198
        regularplotthis = numpy.zeros((nages, len(regularzs)))
        for jj in range(len(regularzs)):
            # Find z
            thisindx = numpy.argmin(numpy.fabs(regularzs[jj] - zs))
            for ii in range(nages):
                regularplotthis[ii, jj] = plotthis[ii, thisindx]
        zs = regularzs
        plotthis = regularplotthis
    if options.remapz:
        zs = zs[:-1]
        plotthis = plotthis[:-1, :]
        fehs = numpy.linspace(-1.05, isodist.Z2FEH(zs[-1], zsolar=0.017), len(zs))
        fehzs = isodist.FEH2Z(fehs, zsolar=0.017)
        new_plotthis = numpy.empty_like(plotthis)
        for ii in range(plotthis.shape[1]):
            goodz = True - numpy.isnan(plotthis[:, ii])
            tip = interpolate.InterpolatedUnivariateSpline(zs[goodz], plotthis[goodz, ii], k=3)
            new_plotthis[:, ii] = tip(fehzs)
            try:
                new_plotthis[fehs < numpy.nanmax(isodist.Z2FEH(zs[True - goodz], zsolar=0.017)), ii] = numpy.nan
            except ValueError:
                continue
        plotthis = new_plotthis
        xlabel = r"$[\mathrm{Fe/H}]\,(\mathrm{dex})$"
    else:
        xlabel = r"$Z$"
    bovy_plot.bovy_print(fig_height=7.0, fig_width=6.0)
    fig = pyplot.gcf()
    left, bottom, width, height = 0.1, 0.1, 0.8, 0.6
    axBottom = pyplot.axes([left, bottom, width, height])
    fig.sca(axBottom)
    if options.remapz:
        xlimits = [fehs[0], fehs[-1]]
    else:
        xlimits = [zs[0], zs[-1]]
    ylimits = [lages[0] - dlages, lages[-1] + dlages]
    bovy_plot.bovy_dens2d(
        plotthis.T,
        origin="lower",
        cmap=cmap,
        xrange=xlimits,
        yrange=ylimits,
        vmin=vmin,
        vmax=vmax,
        interpolation="nearest",
        colorbar=True,
        shrink=0.9,
        zlabel=zlabel,
        overplot=True,
    )
    extent = xlimits + ylimits
    pyplot.axis(extent)
    bovy_plot._add_axislabels(xlabel, r"$\log_{10}\,\mathrm{Age} / 1\,\mathrm{Gyr}$")
    bovy_plot._add_ticks()
    left, bottom, width, height = 0.1, 0.68, 0.64, 0.2
    axTop = pyplot.axes([left, bottom, width, height])
    fig.sca(axTop)
    # Plot the average over SFH
    lages = numpy.linspace(-1.0, 1.0, 16)
    if _CUTLOWAGE:
        aindx = lages > numpy.log10(0.8)
        lages = lages[aindx]
        if options.type == "mass":
            omega = omega[:, aindx]
            masscoarse = masscoarse[:, aindx]
    mtrend = numpy.zeros(len(zs))
    exppage = 10.0 ** lages * numpy.exp((10.0 ** (lages + 2.0)) / 800.0)  # e.g., Binney (2010)
    exexppage = 10.0 ** lages * numpy.exp((10.0 ** (lages + 2.0)) / 100.0)  # e.g., Binney (2010)
    page = 10.0 ** lages
    if options.type == "massperrc":
        mtrend = 1.0 / (numpy.sum(page * 1.0 / plotthis, axis=1) / numpy.sum(page))
        expmtrend = 1.0 / (numpy.sum(exppage * 1.0 / plotthis, axis=1) / numpy.sum(exppage))
        exexpmtrend = 1.0 / (numpy.sum(exexppage * 1.0 / plotthis, axis=1) / numpy.sum(exexppage))
    elif options.type == "mass" and len(args) == 3:
        if options.remapz:
            omega = omega[:-1, :]
            masscoarse = masscoarse[:-1, :]
        mtrend = numpy.nansum(page * omega, axis=1) / numpy.nansum(page * omega / masscoarse, axis=1)
        expmtrend = numpy.nansum(exppage * omega, axis=1) / numpy.nansum(exppage * omega / masscoarse, axis=1)
        exexpmtrend = numpy.nansum(exexppage * omega, axis=1) / numpy.nansum(exexppage * omega / masscoarse, axis=1)
    else:
        mtrend = numpy.sum(page * plotthis, axis=1) / numpy.sum(page)
        expmtrend = numpy.sum(exppage * plotthis, axis=1) / numpy.sum(exppage)
        exexpmtrend = numpy.sum(exexppage * plotthis, axis=1) / numpy.sum(exexppage)
    if options.remapz:
        zs = fehs
    pyplot.plot(zs, mtrend, "k-")
    pyplot.plot(zs, expmtrend, "k--")
    pyplot.plot(zs, exexpmtrend, "k-.")
    pyplot.ylim(vmin2, vmax2)
    pyplot.xlim(xlimits[0], xlimits[1])
    nullfmt = NullFormatter()  # no labels
    thisax = pyplot.gca()
    thisax.xaxis.set_major_formatter(nullfmt)
    bovy_plot._add_ticks()
    if options.type == "mass":
        pyplot.ylabel(zlabel)
    if options.basti:
        pyplot.annotate(
            r"$\mathrm{BaSTI}$",
            (0.5, 1.08),
            xycoords="axes fraction",
            horizontalalignment="center",
            verticalalignment="top",
            size=16.0,
        )
    elif options.imfmodel == "kroupa2003":
        pyplot.annotate(
            r"$\mathrm{Padova, Kroupa\ (2003)\ IMF}$",
            (0.5, 1.08),
            xycoords="axes fraction",
            horizontalalignment="center",
            verticalalignment="top",
            size=16.0,
        )
    elif "expsfh" in args[0]:
        pyplot.annotate(
            r"$\mathrm{Padova, p(\mathrm{Age}) \propto e^{\mathrm{Age}/(8\,\mathrm{Gyr})}}$",
            (0.5, 1.08),
            xycoords="axes fraction",
            horizontalalignment="center",
            verticalalignment="top",
            size=16.0,
        )
    elif options.parsec:
        pass
        # pyplot.annotate(r'$\mathrm{PARSEC}$',
        #                (0.5,1.08),xycoords='axes fraction',
        #                horizontalalignment='center',
        #                verticalalignment='top',size=16.)
    else:
        pyplot.annotate(
            r"$\mathrm{Padova}$",
            (0.5, 1.08),
            xycoords="axes fraction",
            horizontalalignment="center",
            verticalalignment="top",
            size=16.0,
        )
    bovy_plot.bovy_end_print(options.outfilename)
    return None
예제 #21
0
 def __init__(self,
              RZPot=None,rgrid=(0.01,2.,101),zgrid=(0.,0.2,101),logR=False,
              interpPot=False,interpRforce=False,interpzforce=False,
              interpDens=False,
              interpvcirc=False,
              interpdvcircdr=False,
              interpepifreq=False,interpverticalfreq=False,
              use_c=False,enable_c=False,zsym=True,
              numcores=None):
     """
     NAME:
        __init__
     PURPOSE:
        Initialize an interpRZPotential instance
     INPUT:
        RZPot - RZPotential to be interpolated
        rgrid - R grid to be given to linspace
        zgrid - z grid to be given to linspace
        logR - if True, rgrid is in the log of R
        interpPot, interpRfoce, interpzforce, interpDens,interpvcirc, interpeopifreq, interpverticalfreq, interpdvcircdr= if True, interpolate these functions
        use_c= use C to speed up the calculation
        enable_c= enable use of C for interpolations
        zsym= if True (default), the potential is assumed to be symmetric around z=0 (so you can use, e.g.,  zgrid=(0.,1.,101)).
        numcores= if set to an integer, use this many cores (only used for vcirc, dvcircdR, epifreq, and verticalfreq; NOT NECESSARILY FASTER, TIME TO MAKE SURE)
     OUTPUT:
        instance
     HISTORY:
        2010-07-21 - Written - Bovy (NYU)
        2013-01-24 - Started with new implementation - Bovy (IAS)
     """
     Potential.__init__(self,amp=1.)
     self.hasC= True
     self._origPot= RZPot
     self._rgrid= numpy.linspace(*rgrid)
     self._logR= logR
     if self._logR:
         self._rgrid= numpy.exp(self._rgrid)
         self._logrgrid= numpy.log(self._rgrid)
     self._zgrid= numpy.linspace(*zgrid)
     self._interpPot= interpPot
     self._interpRforce= interpRforce
     self._interpzforce= interpzforce
     self._interpDens= interpDens
     self._interpvcirc= interpvcirc
     self._interpdvcircdr= interpdvcircdr
     self._interpepifreq= interpepifreq
     self._interpverticalfreq= interpverticalfreq
     self._enable_c= enable_c*ext_loaded
     self._zsym= zsym
     if interpPot:
         if use_c*ext_loaded:
             self._potGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid)
         else:
             from galpy.potential import evaluatePotentials
             potGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
             for ii in range(len(self._rgrid)):
                 for jj in range(len(self._zgrid)):
                     potGrid[ii,jj]= evaluatePotentials(self._rgrid[ii],self._zgrid[jj],self._origPot)
             self._potGrid= potGrid
         if self._logR:
             self._potInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                              self._zgrid,
                                                              self._potGrid,
                                                              kx=3,ky=3,s=0.)
         else:
             self._potInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                              self._zgrid,
                                                              self._potGrid,
                                                              kx=3,ky=3,s=0.)
         if enable_c*ext_loaded:
             self._potGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._potGrid)
     if interpRforce:
         if use_c*ext_loaded:
             self._rforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,rforce=True)
         else:
             from galpy.potential import evaluateRforces
             rforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
             for ii in range(len(self._rgrid)):
                 for jj in range(len(self._zgrid)):
                     rforceGrid[ii,jj]= evaluateRforces(self._rgrid[ii],self._zgrid[jj],self._origPot)
             self._rforceGrid= rforceGrid
         if self._logR:
             self._rforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                                 self._zgrid,
                                                                 self._rforceGrid,
                                                                 kx=3,ky=3,s=0.)
         else:
             self._rforceInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                                 self._zgrid,
                                                                 self._rforceGrid,
                                                                 kx=3,ky=3,s=0.)
         if enable_c*ext_loaded:
             self._rforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._rforceGrid)
     if interpzforce:
         if use_c*ext_loaded:
             self._zforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,zforce=True)
         else:
             from galpy.potential import evaluatezforces
             zforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
             for ii in range(len(self._rgrid)):
                 for jj in range(len(self._zgrid)):
                     zforceGrid[ii,jj]= evaluatezforces(self._rgrid[ii],self._zgrid[jj],self._origPot)
             self._zforceGrid= zforceGrid
         if self._logR:
             self._zforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                                 self._zgrid,
                                                                 self._zforceGrid,
                                                                 kx=3,ky=3,s=0.)
         else:
             self._zforceInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                                 self._zgrid,
                                                                 self._zforceGrid,
                                                                 kx=3,ky=3,s=0.)
         if enable_c*ext_loaded:
             self._zforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._zforceGrid)
     if interpDens:
         if False:
             raise NotImplementedError("Using C to calculate an interpolation grid for the density is not supported currently")
             self._densGrid, err= calc_dens_c(self._origPot,self._rgrid,self._zgrid)
         else:
             from galpy.potential import evaluateDensities
             densGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
             for ii in range(len(self._rgrid)):
                 for jj in range(len(self._zgrid)):
                     densGrid[ii,jj]= evaluateDensities(self._rgrid[ii],self._zgrid[jj],self._origPot)
             self._densGrid= densGrid
         if self._logR:
             self._densInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                               self._zgrid,
                                                               numpy.log(self._densGrid+10.**-10.),
                                                               kx=3,ky=3,s=0.)
         else:
             self._densInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                               self._zgrid,
                                                               numpy.log(self._densGrid+10.**-10.),
                                                               kx=3,ky=3,s=0.)
         if False:
             self._densGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._densGrid)
     if interpvcirc:
         from galpy.potential import vcirc
         if not numcores is None:
             self._vcircGrid= multi.parallel_map((lambda x: vcirc(self._origPot,self._rgrid[x])),
                                                 range(len(self._rgrid)),numcores=numcores)
         else:
             self._vcircGrid= numpy.array([vcirc(self._origPot,r) for r in self._rgrid])
         if self._logR:
             self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._vcircGrid,k=3)
         else:
             self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._vcircGrid,k=3)
     if interpdvcircdr:
         from galpy.potential import dvcircdR
         if not numcores is None:
             self._dvcircdrGrid= multi.parallel_map((lambda x: dvcircdR(self._origPot,self._rgrid[x])),
                                                    range(len(self._rgrid)),numcores=numcores)
         else:
             self._dvcircdrGrid= numpy.array([dvcircdR(self._origPot,r) for r in self._rgrid])
         if self._logR:
             self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._dvcircdrGrid,k=3)
         else:
             self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._dvcircdrGrid,k=3)
     if interpepifreq:
         from galpy.potential import epifreq
         if not numcores is None:
             self._epifreqGrid= multi.parallel_map((lambda x: epifreq(self._origPot,self._rgrid[x])),
                                                   range(len(self._rgrid)),numcores=numcores)
         else:
             self._epifreqGrid= numpy.array([epifreq(self._origPot,r) for r in self._rgrid])
         indx= True-numpy.isnan(self._epifreqGrid)
         if numpy.sum(indx) < 4:
             if self._logR:
                 self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=1)
             else:
                 self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=1)
         else:
             if self._logR:
                 self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=3)
             else:
                 self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=3)
     if interpverticalfreq:
         from galpy.potential import verticalfreq
         if not numcores is None:
             self._verticalfreqGrid= multi.parallel_map((lambda x: verticalfreq(self._origPot,self._rgrid[x])),
                                                    range(len(self._rgrid)),numcores=numcores)
         else:
             self._verticalfreqGrid= numpy.array([verticalfreq(self._origPot,r) for r in self._rgrid])
         if self._logR:
             self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._verticalfreqGrid,k=3)
         else:
             self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._verticalfreqGrid,k=3)
     return None
예제 #22
0
def calc_model(params,options,data,vs,normintstuff=None):
    out= numpy.zeros((len(data),options.nv))
    #Model
    vo= get_vo(params,options,1)
    ro= get_ro(params,options)
    pot= setup_potential(params,options,1)
    aA= setup_aA(pot,options)
    dfparams= get_dfparams(params,0,options,log=False)
    if options.dfmodel.lower() == 'qdf':
        #Normalize
        hr= dfparams[0]/ro
        sr= dfparams[1]/vo
        sz= dfparams[2]/vo
        hsr= dfparams[3]/ro
        hsz= dfparams[4]/ro
        #Setup
        qdf= quasiisothermaldf(hr,sr,sz,hsr,hsz,pot=pot,aA=aA,cutcounter=True)
    if not params[6] == 0.:
        print "Calculating normalization of qdf ..."
        normalization_qdf= calc_normint(qdf,0,normintstuff,params,1,options,
                                        -numpy.finfo(numpy.dtype(numpy.float64)).max)
        print "Calculating normalization of outliers ..."
        normalization_out= calc_normint(qdf,0,normintstuff,params,1,options,
                                        0.,fqdf=0.)   
    else:
        normalization_qdf= 0.
        normalization_out= 1.
    #Get coordinates
    R= ((ro*_REFR0-data.xc)**2.+data.yc**2.)**(0.5)/ro/_REFR0
    z= (data.zc+_ZSUN)/ro/_REFR0
    if 'vr' in options.type.lower() or 'vt' in options.type.lower():
        cov_vxvyvz= numpy.zeros((len(data),3,3))
        cov_vxvyvz[:,0,0]= data.vxc_err**2.
        cov_vxvyvz[:,1,1]= data.vyc_err**2.
        cov_vxvyvz[:,2,2]= data.vzc_err**2.
        cov_vxvyvz[:,0,1]= data.vxvyc_rho*data.vxc_err*data.vyc_err
        cov_vxvyvz[:,0,2]= data.vxvzc_rho*data.vxc_err*data.vzc_err
        cov_vxvyvz[:,1,2]= data.vyvzc_rho*data.vyc_err*data.vzc_err
        #Rotate vxvyvz to vRvTvz
        cosphi= (_REFR0*ro-data.xc)/R/ro/_REFR0
        sinphi= data.yc/R/ro/_REFR0
        for rr in range(len(data.xc)):
            rot= numpy.array([[cosphi[rr],sinphi[rr]],
                              [-sinphi[rr],cosphi[rr]]])
            sxy= cov_vxvyvz[rr,0:2,0:2]
            sRT= numpy.dot(rot,numpy.dot(sxy,rot.T))
            cov_vxvyvz[rr,0:2,0:2]= sRT
    else:
        cov_vxvyvz= None # FOR MULTI
    if not options.multi is None:
        multOut= multi.parallel_map((lambda x: _calc_model_one(x,R,z,vs,qdf,options,data,params,cov_vxvyvz,vo,norm=normalization_qdf/normalization_out/12.)),
                                    range(len(data)),
                                    numcores=numpy.amin([len(data),
                                                         multiprocessing.cpu_count(),
                                                         options.multi]))
        for ii in range(len(data)):
            out[ii,:]= multOut[ii]
    else:
        for ii in range(len(data)):
            if options.type.lower() == 'vz':
                #thisp= numpy.array([qdf.pvz(v/_REFV0/vo,R[ii],z[ii],ngl=options.ngl,gl=True) for v in vs])
                thisp= qdf.pvz(vs/_REFV0/vo,R[ii]+numpy.zeros(len(vs)),z[ii]+numpy.zeros(len(vs)),ngl=options.ngl,gl=True)
                if not params[6] == 0.:
                    thisp+= params[6]*normalization_qdf/normalization_out/12./_SZHALO*_REFV0*vo*numpy.exp(-0.5*vs**2./_SZHALO**2.)*vo**2./numpy.sqrt(2.*math.pi)
                ndimage.filters.gaussian_filter1d(thisp,
                                                  data.vzc_err[ii]/(vs[1]-vs[0]),
                                                  output=thisp)
            elif options.type.lower() == 'vr':
                thisp= numpy.array([qdf.pvR(v/_REFV0/vo,R[ii],z[ii],ngl=options.ngl,gl=True) for v in vs])
                ndimage.filters.gaussian_filter1d(thisp,
                                                  numpy.sqrt(cov_vxvyvz[ii,0,0])/(vs[1]-vs[0]),
                                              output=thisp)
            elif options.type.lower() == 'vt':
                if options.fitdvt:
                    dvt= get_dvt(params,options)
                else:
                    dvt= 0.
                thisp= numpy.array([qdf.pvT(v/_REFV0/vo+dvt/vo,R[ii],z[ii],ngl=options.ngl,gl=True) for v in vs])
                ndimage.filters.gaussian_filter1d(thisp,
                                                  numpy.sqrt(cov_vxvyvz[ii,1,1])/(vs[1]-vs[0]),
                                                  output=thisp)
            out[ii,:]= thisp/numpy.sum(thisp)/(vs[1]-vs[0])
    return out
예제 #23
0
def generate(locations,
             type='exp',
             sample='lowlow',
             extmap='green15',
             nls=101,
             nmock=1000,
             H0=-1.49,
             _dmapg15=None,
             ncpu=1):
    """
    NAME:
       generate
    PURPOSE:
       generate mock data following a given density
    INPUT:
       locations - locations to be included in the sample
       type= ('exp') type of density profile to sample from
       sample= ('lowlow') for selecting mock parameters
       extmap= ('green15') extinction map to use ('marshall06' and others use Green15 to fill in unobserved regions)
       nls= (101) number of longitude bins to use for each field
       nmock= (1000) number of mock data points to generate
       H0= (-1.49) absolute magnitude (can be array w/ sampling spread)
       ncpu= (1) number of cpus to use to compute the probability
    OUTPUT:
       mockdata recarray with tags 'RC_GALR_H', 'RC_GALPHI_H', 'RC_GALZ_H'
    HISTORY:
       2015-04-03 - Written - Bovy (IAS)
    """
    if isinstance(H0, float): H0 = [H0]
    # Setup the density function and its initial parameters
    rdensfunc = fitDens._setup_densfunc(type)
    mockparams = _setup_mockparams_densfunc(type, sample)
    densfunc = lambda x, y, z: rdensfunc(x, y, z, params=mockparams)
    # Setup the extinction map
    global dmap
    global dmapg15
    if _dmapg15 is None: dmapg15 = mwdust.Green15(filter='2MASS H')
    else: dmapg15 = _dmapg15
    if isinstance(extmap, mwdust.DustMap3D.DustMap3D):
        dmap = extmap
    elif extmap.lower() == 'green15':
        dmap = dmapg15
    elif extmap.lower() == 'marshall06':
        dmap = mwdust.Marshall06(filter='2MASS H')
    elif extmap.lower() == 'sale14':
        dmap = mwdust.Sale14(filter='2MASS H')
    elif extmap.lower() == 'drimmel03':
        dmap = mwdust.Drimmel03(filter='2MASS H')
    # Use brute-force rejection sampling to make no approximations
    # First need to estimate the max probability to use in rejection;
    # Loop through all locations and compute sampling probability on grid in
    # (l,b,D)
    # First restore the APOGEE selection function (assumed pre-computed)
    global apo
    selectFile = '../savs/selfunc-nospdata.sav'
    if os.path.exists(selectFile):
        with open(selectFile, 'rb') as savefile:
            apo = pickle.load(savefile)
    # Now compute the necessary coordinate transformations and evaluate the
    # maximum probability
    distmods = numpy.linspace(7., 15.5, 301)
    ds = 10.**(distmods / 5 - 2.)
    nbs = nls
    lnprobs = numpy.empty((len(locations), len(distmods), nbs, nls))
    radii = []
    lcens, bcens = [], []
    lnprobs = multi.parallel_map(lambda x: _calc_lnprob(
        locations[x], nls, nbs, ds, distmods, H0, densfunc),
                                 range(len(locations)),
                                 numcores=numpy.amin([
                                     len(locations),
                                     multiprocessing.cpu_count(), ncpu
                                 ]))
    lnprobs = numpy.array(lnprobs)
    for ll, loc in enumerate(locations):
        lcen, bcen = apo.glonGlat(loc)
        rad = apo.radius(loc)
        radii.append(rad)  # save for later
        lcens.append(lcen[0])
        bcens.append(bcen[0])
    maxp = (numpy.exp(numpy.nanmax(lnprobs)) -
            10.**-8.) * 1.1  # Just to be sure
    # Now generate mock data using rejection sampling
    nout = 0
    arlocations = numpy.array(locations)
    arradii = numpy.array(radii)
    arlcens = numpy.array(lcens)
    arbcens = numpy.array(bcens)
    out = numpy.recarray((nmock, ),
                         dtype=[('RC_DIST_H', 'f8'), ('RC_DM_H', 'f8'),
                                ('RC_GALR_H', 'f8'), ('RC_GALPHI_H', 'f8'),
                                ('RC_GALZ_H', 'f8')])
    while nout < nmock:
        nnew = 2 * (nmock - nout)
        # nnew new locations
        locIndx = numpy.floor(
            numpy.random.uniform(size=nnew) * len(locations)).astype('int')
        newlocations = arlocations[locIndx]
        # Point within these locations
        newds_coord = numpy.random.uniform(size=nnew)
        newds= 10.**((newds_coord*(numpy.amax(distmods)-numpy.amin(distmods))\
            +numpy.amin(distmods))/5.-2.)
        newdls_coord = numpy.random.uniform(size=nnew)
        newdls= newdls_coord*2.*arradii[locIndx]\
            -arradii[locIndx]
        newdbs_coord = numpy.random.uniform(size=nnew)
        newdbs= newdbs_coord*2.*arradii[locIndx]\
            -arradii[locIndx]
        newr2s = newdls**2. + newdbs**2.
        keepIndx = newr2s < arradii[locIndx]**2.
        newlocations = newlocations[keepIndx]
        newds_coord = newds_coord[keepIndx]
        newdls_coord = newdls_coord[keepIndx]
        newdbs_coord = newdbs_coord[keepIndx]
        newds = newds[keepIndx]
        newdls = newdls[keepIndx]
        newdbs = newdbs[keepIndx]
        newls = newdls + arlcens[locIndx][keepIndx]
        newbs = newdbs + arbcens[locIndx][keepIndx]
        # Reject?
        tps = numpy.zeros_like(newds)
        for nloc in list(set(newlocations)):
            lindx = newlocations == nloc
            pindx = arlocations == nloc
            coord = numpy.array([
                newds_coord[lindx] * (len(distmods) - 1.),
                newdbs_coord[lindx] * (nbs - 1.),
                newdls_coord[lindx] * (nls - 1.)
            ])
            tps[lindx]= \
                numpy.exp(ndimage.interpolation.map_coordinates(\
                    lnprobs[pindx][0],
                    coord,cval=-10.,
                    order=1))-10.**-8.
        XYZ = bovy_coords.lbd_to_XYZ(newls, newbs, newds, degree=True)
        Rphiz = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0],
                                             XYZ[:, 1],
                                             XYZ[:, 2],
                                             Xsun=define_rcsample._R0,
                                             Ysun=0.,
                                             Zsun=define_rcsample._Z0)
        testp = numpy.random.uniform(size=len(newds)) * maxp
        keepIndx = tps > testp
        if numpy.sum(keepIndx) > nmock - nout:
            rangeIndx = numpy.zeros(len(keepIndx), dtype='int')
            rangeIndx[keepIndx] = numpy.arange(numpy.sum(keepIndx))
            keepIndx *= (rangeIndx < nmock - nout)
        out['RC_DIST_H'][nout:nout + numpy.sum(keepIndx)] = newds[keepIndx]
        out['RC_DM_H'][nout:nout+numpy.sum(keepIndx)]= newds_coord[keepIndx]*(numpy.amax(distmods)-numpy.amin(distmods))\
            +numpy.amin(distmods)
        out['RC_GALR_H'][nout:nout + numpy.sum(keepIndx)] = Rphiz[0][keepIndx]
        out['RC_GALPHI_H'][nout:nout +
                           numpy.sum(keepIndx)] = Rphiz[1][keepIndx]
        out['RC_GALZ_H'][nout:nout + numpy.sum(keepIndx)] = Rphiz[2][keepIndx]
        nout = nout + numpy.sum(keepIndx)
    return (out, lnprobs)
def create_frames(options, args):
    # First reload the model
    with open('gd1pepper%isampling.pkl' % options.nsnap, 'rb') as savefile:
        sdf_pepper_leading = pickle.load(savefile)
    with open('gd1pepper%isampling_trailing.pkl' % options.nsnap,
              'rb') as savefile:
        sdf_pepper_trailing = pickle.load(savefile)
    # Output times
    timpacts = sdf_pepper_leading._uniq_timpact
    # Sample unperturbed aAt
    numpy.random.seed(1)
    Oml,anglel,dtl= super(streampepperdf,sdf_pepper_leading)._sample_aAt(\
        options.nparticles)
    Omt,anglet,dtt= super(streampepperdf,sdf_pepper_trailing)._sample_aAt(\
        options.nparticles)
    # Setup progenitor
    prog = sdf_pepper_leading._progenitor().flip()
    prog.integrate(
        numpy.linspace(0., 9. / bovy_conversion.time_in_Gyr(V0, R0), 10001),
        sdf_pepper_leading._pot)
    prog.flip()
    # Setup impacts
    if options.single:
        # Hit the leading arm and the trailing arm 1 Gyr later
        m = options.singlemimpact / bovy_conversion.mass_in_1010msol(
            V0, R0) / 1000.
        t= timpacts[\
            numpy.argmin(\
                numpy.fabs(\
                    numpy.array(timpacts)\
                        -options.singletimpact\
                        /bovy_conversion.time_in_Gyr(V0,R0)))]
        sdf_pepper_leading.set_impacts(\
            impactb=[0.5*simulate_streampepper.rs(options.singlemimpact*10.**7.)],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[0.2],
            timpact=[t],
            GM=[m],rs=[simulate_streampepper.rs(options.singlemimpact*10.**7.)])
        # Trailing
        m = options.singlemimpact / bovy_conversion.mass_in_1010msol(
            V0, R0) / 1000.
        t= timpacts[\
            numpy.argmin(\
                numpy.fabs(\
                    numpy.array(timpacts)\
                        -(options.singletimpact+1.)\
                        /bovy_conversion.time_in_Gyr(V0,R0)))]
        sdf_pepper_trailing.set_impacts(\
            impactb=[1.*simulate_streampepper.rs(options.singlemimpact*10.**7.)],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[-0.3],
            timpact=[t],
            GM=[m],rs=[simulate_streampepper.rs(options.singlemimpact*10.**7.)])
    elif options.pepper:
        # Sampling functions
        massrange = [options.Mmin, options.Mmax]
        plummer = False
        Xrs = 5.
        nsubhalo = simulate_streampepper.nsubhalo
        rs = simulate_streampepper.rs
        dNencdm = simulate_streampepper.dNencdm
        sample_GM= lambda: (10.**((-0.5)*massrange[0])\
                            +(10.**((-0.5)*massrange[1])\
                              -10.**((-0.5)*massrange[0]))\
                            *numpy.random.uniform())**(1./(-0.5))\
            /bovy_conversion.mass_in_msol(V0,R0)
        rate_range = numpy.arange(massrange[0] + 0.5, massrange[1] + 0.5, 1)
        rate = numpy.sum([
            dNencdm(sdf_pepper_leading, 10.**r, Xrs=Xrs, plummer=plummer)
            for r in rate_range
        ])
        rate = options.timescdm * rate
        sample_rs = lambda x: rs(x * bovy_conversion.mass_in_1010msol(V0, R0) *
                                 10.**10.,
                                 plummer=plummer)
        # Pepper both
        sdf_pepper_leading.simulate(rate=rate,
                                    sample_GM=sample_GM,
                                    sample_rs=sample_rs,
                                    Xrs=Xrs)
        print numpy.amax(
            sdf_pepper_leading._GM) * bovy_conversion.mass_in_1010msol(V0, R0)
        sdf_pepper_trailing.simulate(rate=rate,
                                     sample_GM=sample_GM,
                                     sample_rs=sample_rs,
                                     Xrs=Xrs)
        print numpy.amax(
            sdf_pepper_trailing._GM) * bovy_conversion.mass_in_1010msol(
                V0, R0)
    else:
        # Hit both with zero
        sdf_pepper_leading.set_impacts(\
            impactb=[0.],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[0.2],
            timpact=[timpacts[0]],
            GM=[0.],rs=[1.])
        sdf_pepper_trailing.set_impacts(\
            impactb=[0.],
            subhalovel=numpy.array([[-25.,155.,30.]])/V0,
            impact_angle=[-0.2],
            timpact=[timpacts[0]],
            GM=[0.],rs=[1.])
    # Now make all frames
    dum = multi.parallel_map((lambda x: _plot_one_frame(
        x, options, prog, timpacts, sdf_pepper_leading, sdf_pepper_trailing,
        Oml, Omt, anglel, anglet, dtl, dtt)),
                             range(len(timpacts)),
                             numcores=numpy.amin([len(timpacts), 30]))
    return None
예제 #25
0
    def __init__(self,pot=None,delta=None,Rmax=5.,
                 nE=25,npsi=25,nLz=30,numcores=1,
                 **kwargs):
        """
        NAME:
           __init__
        PURPOSE:
           initialize an actionAngleStaeckelGrid object
        INPUT:
           pot= potential or list of potentials

           delta= focus of prolate confocal coordinate system (can be Quantity)

           Rmax = Rmax for building grids (natural units)

           nE=, npsi=, nLz= grid size

           numcores= number of cpus to use to parallellize

           ro= distance from vantage point to GC (kpc; can be Quantity)

           vo= circular velocity at ro (km/s; can be Quantity)

        OUTPUT:
         
           instance

        HISTORY:

            2012-11-29 - Written - Bovy (IAS)

        """
        actionAngle.__init__(self,
                             ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))
        if pot is None:
            raise IOError("Must specify pot= for actionAngleStaeckelGrid")
        self._pot= pot
        if delta is None:
            raise IOError("Must specify delta= for actionAngleStaeckelGrid")
        if ext_loaded and 'c' in kwargs and kwargs['c']:
            self._c= True
        else:
            self._c= False
        self._delta= delta
        if _APY_LOADED and isinstance(self._delta,units.Quantity):
            self._delta= self._delta.to(units.kpc).value/self._ro
        self._Rmax= Rmax
        self._Rmin= 0.01
        #Set up the actionAngleStaeckel object that we will use to interpolate
        self._aA= actionAngleStaeckel.actionAngleStaeckel(pot=self._pot,delta=self._delta,c=self._c)
        #Build grid
        self._Lzmin= 0.01
        self._Lzs= numpy.linspace(self._Lzmin,
                                  self._Rmax\
                                      *galpy.potential.vcirc(self._pot,
                                                             self._Rmax),
                                  nLz)
        self._Lzmax= self._Lzs[-1]
        self._nLz= nLz
        #Calculate E_c(R=RL), energy of circular orbit
        self._RL= numpy.array([galpy.potential.rl(self._pot,l) for l in self._Lzs])
        self._RLInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                 self._RL,k=3)
        self._ERL= _evaluatePotentials(self._pot,self._RL,
                                       numpy.zeros(self._nLz))\
                                       +self._Lzs**2./2./self._RL**2.
        self._ERLmax= numpy.amax(self._ERL)+1.
        self._ERLInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                  numpy.log(-(self._ERL-self._ERLmax)),k=3)
        self._Ramax= 200./8.
        self._ERa= _evaluatePotentials(self._pot,self._Ramax,0.) +self._Lzs**2./2./self._Ramax**2.
        #self._EEsc= numpy.array([self._ERL[ii]+galpy.potential.vesc(self._pot,self._RL[ii])**2./4. for ii in range(nLz)])
        self._ERamax= numpy.amax(self._ERa)+1.
        self._ERaInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                  numpy.log(-(self._ERa-self._ERamax)),k=3)
        y= numpy.linspace(0.,1.,nE)
        self._nE= nE
        psis= numpy.linspace(0.,1.,npsi)*numpy.pi/2.
        self._npsi= npsi
        jr= numpy.zeros((nLz,nE,npsi))
        jz= numpy.zeros((nLz,nE,npsi))
        u0= numpy.zeros((nLz,nE))
        jrLzE= numpy.zeros((nLz))
        jzLzE= numpy.zeros((nLz))
        #First calculate u0
        thisLzs= (numpy.tile(self._Lzs,(nE,1)).T).flatten()
        thisERL= (numpy.tile(self._ERL,(nE,1)).T).flatten()
        thisERa= (numpy.tile(self._ERa,(nE,1)).T).flatten()
        thisy= (numpy.tile(y,(nLz,1))).flatten()
        thisE= _invEfunc(_Efunc(thisERa,thisERL)+thisy*(_Efunc(thisERL,thisERL)-_Efunc(thisERa,thisERL)),thisERL)
        if isinstance(self._pot,galpy.potential.interpRZPotential) and hasattr(self._pot,'_origPot'):
            u0pot= self._pot._origPot
        else:
            u0pot= self._pot
        if self._c:
            mu0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(thisE,thisLzs,
                                                                  u0pot,
                                                                  self._delta)[0]
        else:
            if numcores > 1:
                mu0= multi.parallel_map((lambda x: self.calcu0(thisE[x],
                                                               thisLzs[x])),
                                        range(nE*nLz),
                                        numcores=numcores)
            else:
                mu0= list(map((lambda x: self.calcu0(thisE[x],
                                                     thisLzs[x])),
                              range(nE*nLz)))
        u0= numpy.reshape(mu0,(nLz,nE))
        thisR= self._delta*numpy.sinh(u0)
        thisv= numpy.reshape(self.vatu0(thisE.flatten(),thisLzs.flatten(),
                                        u0.flatten(),
                                        thisR.flatten()),(nLz,nE))
        self.thisv= thisv
        #reshape
        thisLzs= numpy.reshape(thisLzs,(nLz,nE))
        thispsi= numpy.tile(psis,(nLz,nE,1)).flatten()
        thisLzs= numpy.tile(thisLzs.T,(npsi,1,1)).T.flatten()
        thisR= numpy.tile(thisR.T,(npsi,1,1)).T.flatten()
        thisv= numpy.tile(thisv.T,(npsi,1,1)).T.flatten()
        mjr, mlz, mjz= self._aA(thisR, #R
                                thisv*numpy.cos(thispsi), #vR
                                thisLzs/thisR, #vT
                                numpy.zeros(len(thisR)), #z
                                thisv*numpy.sin(thispsi), #vz
                                fixed_quad=True) 
        if isinstance(self._pot,galpy.potential.interpRZPotential) and hasattr(self._pot,'_origPot'):
            #Interpolated potentials have problems with extreme orbits
            indx= (mjr == 9999.99)
            indx+= (mjz == 9999.99)
            #Re-calculate these using the original potential, hopefully not too slow
            tmpaA= actionAngleStaeckel.actionAngleStaeckel(pot=self._pot._origPot,delta=self._delta,c=self._c)
            mjr[indx], dum, mjz[indx]= tmpaA(thisR[indx], #R
                                             thisv[indx]*numpy.cos(thispsi[indx]), #vR
                                             thisLzs[indx]/thisR[indx], #vT
                                             numpy.zeros(numpy.sum(indx)), #z
                                             thisv[indx]*numpy.sin(thispsi[indx]), #vz
                                             fixed_quad=True)
        jr= numpy.reshape(mjr,(nLz,nE,npsi))
        jz= numpy.reshape(mjz,(nLz,nE,npsi))
        for ii in range(nLz):
            jrLzE[ii]= numpy.nanmax(jr[ii,(jr[ii,:,:] != 9999.99)])#:,:])
            jzLzE[ii]= numpy.nanmax(jz[ii,(jz[ii,:,:] != 9999.99)])#:,:])
        jrLzE[(jrLzE == 0.)]= numpy.nanmin(jrLzE[(jrLzE > 0.)])
        jzLzE[(jzLzE == 0.)]= numpy.nanmin(jzLzE[(jzLzE > 0.)])
        for ii in range(nLz):
            jr[ii,:,:]/= jrLzE[ii]
            jz[ii,:,:]/= jzLzE[ii]
        #Deal w/ 9999.99
        jr[(jr > 1.)]= 1.
        jz[(jz > 1.)]= 1.
        #Deal w/ NaN
        jr[numpy.isnan(jr)]= 0.
        jz[numpy.isnan(jz)]= 0.
        #First interpolate the maxima
        self._jr= jr
        self._jz= jz
        self._u0= u0
        self._jrLzE= jrLzE
        self._jzLzE= jzLzE
        self._jrLzInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                   numpy.log(jrLzE+10.**-5.),k=3)
        self._jzLzInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                   numpy.log(jzLzE+10.**-5.),k=3)
        #Interpolate u0
        self._logu0Interp= interpolate.RectBivariateSpline(self._Lzs,
                                                           y,
                                                           numpy.log(u0),
                                                           kx=3,ky=3,s=0.)
        #spline filter jr and jz, such that they can be used with ndimage.map_coordinates
        self._jrFiltered= ndimage.spline_filter(numpy.log(self._jr+10.**-10.),order=3)
        self._jzFiltered= ndimage.spline_filter(numpy.log(self._jz+10.**-10.),order=3)
        # Check the units
        self._check_consistent_units()
        return None
예제 #26
0
def generate(locations,
             type='exp',
             sample='lowlow',
             extmap='green15',
             nls=101,
             nmock=1000,
             H0=-1.49,
             _dmapg15=None,             
             ncpu=1):
    """
    NAME:
       generate
    PURPOSE:
       generate mock data following a given density
    INPUT:
       locations - locations to be included in the sample
       type= ('exp') type of density profile to sample from
       sample= ('lowlow') for selecting mock parameters
       extmap= ('green15') extinction map to use ('marshall06' and others use Green15 to fill in unobserved regions)
       nls= (101) number of longitude bins to use for each field
       nmock= (1000) number of mock data points to generate
       H0= (-1.49) absolute magnitude (can be array w/ sampling spread)
       ncpu= (1) number of cpus to use to compute the probability
    OUTPUT:
       mockdata recarray with tags 'RC_GALR_H', 'RC_GALPHI_H', 'RC_GALZ_H'
    HISTORY:
       2015-04-03 - Written - Bovy (IAS)
    """
    if isinstance(H0,float): H0= [H0]
    # Setup the density function and its initial parameters
    rdensfunc= fitDens._setup_densfunc(type)
    mockparams= _setup_mockparams_densfunc(type,sample)
    densfunc= lambda x,y,z: rdensfunc(x,y,z,params=mockparams)   
    # Setup the extinction map
    global dmap
    global dmapg15
    if _dmapg15 is None: dmapg15= mwdust.Green15(filter='2MASS H')
    else: dmapg15= _dmapg15
    if isinstance(extmap,mwdust.DustMap3D.DustMap3D):
        dmap= extmap
    elif extmap.lower() == 'green15':
        dmap= dmapg15
    elif extmap.lower() == 'marshall06':
        dmap= mwdust.Marshall06(filter='2MASS H')
    elif extmap.lower() == 'sale14':
        dmap= mwdust.Sale14(filter='2MASS H')
    elif extmap.lower() == 'drimmel03':
        dmap= mwdust.Drimmel03(filter='2MASS H')
    # Use brute-force rejection sampling to make no approximations
    # First need to estimate the max probability to use in rejection;
    # Loop through all locations and compute sampling probability on grid in 
    # (l,b,D)
    # First restore the APOGEE selection function (assumed pre-computed)
    global apo
    selectFile= '../savs/selfunc-nospdata.sav'
    if os.path.exists(selectFile):
        with open(selectFile,'rb') as savefile:
            apo= pickle.load(savefile)
    # Now compute the necessary coordinate transformations and evaluate the 
    # maximum probability
    distmods= numpy.linspace(7.,15.5,301)
    ds= 10.**(distmods/5-2.)
    nbs= nls
    lnprobs= numpy.empty((len(locations),len(distmods),nbs,nls))
    radii= []
    lcens, bcens= [], []
    lnprobs= multi.parallel_map(lambda x: _calc_lnprob(locations[x],nls,nbs,
                                                       ds,distmods,
                                                       H0,
                                                       densfunc),
                                range(len(locations)),
                                numcores=numpy.amin([len(locations),
                                                     multiprocessing.cpu_count(),ncpu]))
    lnprobs= numpy.array(lnprobs)
    for ll, loc in enumerate(locations):
        lcen, bcen= apo.glonGlat(loc)
        rad= apo.radius(loc)
        radii.append(rad) # save for later
        lcens.append(lcen[0])
        bcens.append(bcen[0])
    maxp= (numpy.exp(numpy.nanmax(lnprobs))-10.**-8.)*1.1 # Just to be sure
    # Now generate mock data using rejection sampling
    nout= 0
    arlocations= numpy.array(locations)
    arradii= numpy.array(radii)
    arlcens= numpy.array(lcens)
    arbcens= numpy.array(bcens)
    out= numpy.recarray((nmock,),
                        dtype=[('RC_DIST_H','f8'),
                               ('RC_DM_H','f8'),
                               ('RC_GALR_H','f8'),
                               ('RC_GALPHI_H','f8'),
                               ('RC_GALZ_H','f8')])
    while nout < nmock:
        nnew= 2*(nmock-nout)
        # nnew new locations
        locIndx= numpy.floor(numpy.random.uniform(size=nnew)*len(locations)).astype('int')
        newlocations= arlocations[locIndx]
        # Point within these locations
        newds_coord= numpy.random.uniform(size=nnew)
        newds= 10.**((newds_coord*(numpy.amax(distmods)-numpy.amin(distmods))\
            +numpy.amin(distmods))/5.-2.)
        newdls_coord= numpy.random.uniform(size=nnew)
        newdls= newdls_coord*2.*arradii[locIndx]\
            -arradii[locIndx]
        newdbs_coord= numpy.random.uniform(size=nnew)
        newdbs= newdbs_coord*2.*arradii[locIndx]\
            -arradii[locIndx]
        newr2s= newdls**2.+newdbs**2.
        keepIndx= newr2s < arradii[locIndx]**2.
        newlocations= newlocations[keepIndx]
        newds_coord= newds_coord[keepIndx]
        newdls_coord= newdls_coord[keepIndx]
        newdbs_coord= newdbs_coord[keepIndx]
        newds= newds[keepIndx]
        newdls= newdls[keepIndx]
        newdbs= newdbs[keepIndx]
        newls= newdls+arlcens[locIndx][keepIndx]
        newbs= newdbs+arbcens[locIndx][keepIndx]
        # Reject?
        tps= numpy.zeros_like(newds)
        for nloc in list(set(newlocations)):
            lindx= newlocations == nloc
            pindx= arlocations == nloc
            coord= numpy.array([newds_coord[lindx]*(len(distmods)-1.),
                                newdbs_coord[lindx]*(nbs-1.),
                                newdls_coord[lindx]*(nls-1.)])
            tps[lindx]= \
                numpy.exp(ndimage.interpolation.map_coordinates(\
                    lnprobs[pindx][0],
                    coord,cval=-10.,
                    order=1))-10.**-8.
        XYZ= bovy_coords.lbd_to_XYZ(newls,newbs,newds,degree=True)
        Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
                                            Xsun=define_rcsample._R0,
                                            Ysun=0.,
                                            Zsun=define_rcsample._Z0)
        testp= numpy.random.uniform(size=len(newds))*maxp
        keepIndx= tps > testp 
        if numpy.sum(keepIndx) > nmock-nout:
            rangeIndx= numpy.zeros(len(keepIndx),dtype='int')
            rangeIndx[keepIndx]= numpy.arange(numpy.sum(keepIndx))
            keepIndx*= (rangeIndx < nmock-nout)
        out['RC_DIST_H'][nout:nout+numpy.sum(keepIndx)]= newds[keepIndx]
        out['RC_DM_H'][nout:nout+numpy.sum(keepIndx)]= newds_coord[keepIndx]*(numpy.amax(distmods)-numpy.amin(distmods))\
            +numpy.amin(distmods)
        out['RC_GALR_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[0][keepIndx]
        out['RC_GALPHI_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[1][keepIndx]
        out['RC_GALZ_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[2][keepIndx]
        nout= nout+numpy.sum(keepIndx)
    return (out,lnprobs)
예제 #27
0
def plot_vs_jkz(parser):
    options,args= parser.parse_args()
    if options.basti:
        zs= numpy.array([0.004,0.008,0.01,0.0198,0.03,0.04])
    elif options.parsec:
        zs= numpy.arange(0.0005,0.06005,0.0005)
    else:
        zs= numpy.arange(0.0005,0.03005,0.0005)
    if os.path.exists(args[0]):
        savefile= open(args[0],'rb')
        plotthis= pickle.load(savefile)
        jks= pickle.load(savefile)
        zs= pickle.load(savefile)
        savefile.close()
    else:
        njks= 101
        jks= numpy.linspace(0.5,0.8,njks)
        plotthis= numpy.zeros((njks,len(zs)))
        funcargs= (zs,options,njks,jks)
        multOut= multi.parallel_map((lambda x: indiv_calc(x,
                                                          *funcargs)),
                                    range(len(zs)),
                                    numcores=numpy.amin([64,len(zs),
                                                         multiprocessing.cpu_count()]))
        for ii in range(len(zs)):
            plotthis[:,ii]= multOut[ii]
        #Save
        save_pickles(args[0],plotthis,jks,zs)
    #Plot
    if options.type == 'sig':
        if options.band.lower() == 'age':
            if options.relative:
                raise NotImplementedError("relative age not implemented yet")
            else:
                vmin, vmax= 0.,.5
                zlabel= r'$\mathrm{FWHM} / 2\sqrt{2\,\ln 2}$'
        else:
            if options.relative:
                vmin, vmax= 0.8,1.2
                zlabel= r'$\mathrm{FWHM}/\mathrm{FWHM}_{\mathrm{fiducial}}$'
            else:
                vmin, vmax= 0., 0.4
                zlabel= r'$\mathrm{FWHM} / 2\sqrt{2\,\ln 2}$'
    elif options.type == 'mode':
        if options.band.lower() == 'age':
            if options.relative:
                raise NotImplementedError("relative age not implemented yet")
            else:
                vmin, vmax= 0.,1.
                zlabel= r'$\Delta\displaystyle\arg\!\max_{\substack{\log_{10}\mathrm{Age}}}{p(\log_{10}\mathrm{Age}|[J-K_s]_0)}$'
        else:
            if options.relative:
                vmin, vmax= -0.05,0.05
                zlabel= r'$\Delta\displaystyle\arg\!\max_{\substack{K_s}}{p(M_{K_s}|[J-K_s]_0)}$'
            else:
                vmin, vmax= -1.8, -1.5
                if options.band.lower() == 'h':
                    zlabel= r'$\displaystyle\arg\!\max_{\substack{H}}{p(M_{H}|[J-K_s]_0)}$'
                else:
                    zlabel= r'$\displaystyle\arg\!\max_{\substack{K_s}}{p(M_{K_s}|[J-K_s]_0)}$'
    if options.basti:#Remap the Zs
        zs= numpy.array([0.004,0.008,0.01,0.0198,0.03,0.04])
        regularzs= numpy.arange(0.0005,0.04005,0.0005)
        njks= len(jks)
        regularplotthis= numpy.zeros((njks,len(regularzs)))
        for jj in range(len(regularzs)):
            #Find z
            thisindx= numpy.argmin(numpy.fabs(regularzs[jj]-zs))
            for ii in range(njks):
                regularplotthis[ii,jj]= plotthis[ii,thisindx]
        zs= regularzs
        plotthis= regularplotthis
    if options.relative and os.path.exists(options.infilename):
        savefile= open(options.infilename,'rb')
        plotthisrel= pickle.load(savefile)
        savefile.close()
        if options.basti:
            plotthisrel= plotthisrel[:,:80]
        elif not options.parsec:
            plotthisrel= plotthisrel[:,:60]
        if options.type == 'mode':
            plotthis-= plotthisrel
        elif options.type == 'sig':
            plotthis/= plotthisrel
    bovy_plot.bovy_print()
    if options.type == 'sig':
        plotthis[numpy.isnan(plotthis)]= vmax
    if options.relative:
        #Only plot between the cuts
        for ii in range(plotthis.shape[0]):
            indx= zs >= rcmodel.jkzcut(jks[ii],upper=True)
            indx+= zs <= rcmodel.jkzcut(jks[ii],upper=False)
            plotthis[ii,indx]= numpy.nan
    bovy_plot.bovy_dens2d(plotthis.T,origin='lower',cmap='jet',
                          xrange=[jks[0],jks[-1]],
                          yrange=[zs[0],zs[-1]],
                          vmin=vmin,vmax=vmax,
                          xlabel=r'$(J-K_s)_0$',
                          ylabel=r'$Z$',
                          interpolation='nearest',
                          colorbar=True,
                          shrink=0.78,
                          zlabel=zlabel)
    #Overplot cuts
    bovy_plot.bovy_plot(jks,rcmodel.jkzcut(jks),
                        'w--',lw=2.,overplot=True)
    bovy_plot.bovy_plot(jks,rcmodel.jkzcut(jks,upper=True),
                        'w--',lw=2.,overplot=True)
    if options.basti:
        pyplot.annotate(r'$\mathrm{BaSTI}$',
                        (0.5,1.08),xycoords='axes fraction',
                        horizontalalignment='center',
                        verticalalignment='top',size=16.)
    elif not options.parsec:
        pyplot.annotate(r'$\mathrm{Padova}$',
                        (0.5,1.08),xycoords='axes fraction',
                        horizontalalignment='center',
                        verticalalignment='top',size=16.)
    elif options.imfmodel == 'kroupa2003':
        pyplot.annotate(r'$\mathrm{Kroupa\ (2003)\ IMF}$',
                        (0.5,1.08),xycoords='axes fraction',
                        horizontalalignment='center',
                        verticalalignment='top',size=16.)
    elif 'expsfh' in args[0]:
        pyplot.annotate(r'$\mathrm{p(\mathrm{Age}) \propto e^{\mathrm{Age}/(8\,\mathrm{Gyr})}}$',
                        (0.5,1.08),xycoords='axes fraction',
                        horizontalalignment='center',
                        verticalalignment='top',size=16.)
    elif not options.eta is None:
        pyplot.annotate(r'$\eta_{\mathrm{Reimers}} = %.1f$' % options.eta,
                        (0.5,1.08),xycoords='axes fraction',
                        horizontalalignment='center',
                        verticalalignment='top',size=16.)
    elif False:
        pyplot.annotate(r'$\mathrm{Padova}$',
                        (0.5,1.08),xycoords='axes fraction',
                        horizontalalignment='center',
                        verticalalignment='top',size=16.)
    bovy_plot.bovy_end_print(options.outfilename)
    return None
예제 #28
0
    def __init__(self,
                 RZPot=None,rgrid=(numpy.log(0.01),numpy.log(20.),101),
                 zgrid=(0.,1.,101),logR=True,
                 interpPot=False,interpRforce=False,interpzforce=False,
                 interpDens=False,
                 interpvcirc=False,
                 interpdvcircdr=False,
                 interpepifreq=False,interpverticalfreq=False,
                 ro=None,vo=None,
                 use_c=False,enable_c=False,zsym=True,
                 numcores=None):
        """
        NAME:

           __init__

        PURPOSE:

           Initialize an interpRZPotential instance

        INPUT:

           RZPot - RZPotential to be interpolated

           rgrid - R grid to be given to linspace as in rs= linspace(*rgrid)

           zgrid - z grid to be given to linspace as in zs= linspace(*zgrid)

           logR - if True, rgrid is in the log of R so logrs= linspace(*rgrid)

           interpPot, interpRforce, interpzforce, interpDens,interpvcirc, interpepifreq, interpverticalfreq, interpdvcircdr= if True, interpolate these functions

           use_c= use C to speed up the calculation of the grid

           enable_c= enable use of C for interpolations

           zsym= if True (default), the potential is assumed to be symmetric around z=0 (so you can use, e.g.,  zgrid=(0.,1.,101)).

           numcores= if set to an integer, use this many cores (only used for vcirc, dvcircdR, epifreq, and verticalfreq; NOT NECESSARILY FASTER, TIME TO MAKE SURE)

           ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)

        OUTPUT:

           instance

        HISTORY:

           2010-07-21 - Written - Bovy (NYU)

           2013-01-24 - Started with new implementation - Bovy (IAS)

        """
        if isinstance(RZPot,interpRZPotential):
            from galpy.potential import PotentialError
            raise PotentialError('Cannot setup interpRZPotential with another interpRZPotential')
        # Propagate ro and vo
        roSet= True
        voSet= True
        if ro is None:
            if isinstance(RZPot,list):
                ro= RZPot[0]._ro
                roSet= RZPot[0]._roSet
            else:
                ro= RZPot._ro
                roSet= RZPot._roSet
        if vo is None:
            if isinstance(RZPot,list):
                vo= RZPot[0]._vo
                voSet= RZPot[0]._voSet
            else:
                vo= RZPot._vo
                voSet= RZPot._voSet
        Potential.__init__(self,amp=1.,ro=ro,vo=vo)
        # Turn off physical if it hadn't been on
        if not roSet: self._roSet= False
        if not voSet: self._voSet= False
        self._origPot= RZPot
        self._rgrid= numpy.linspace(*rgrid)
        self._logR= logR
        if self._logR:
            self._rgrid= numpy.exp(self._rgrid)
            self._logrgrid= numpy.log(self._rgrid)
        self._zgrid= numpy.linspace(*zgrid)
        self._interpPot= interpPot
        self._interpRforce= interpRforce
        self._interpzforce= interpzforce
        self._interpDens= interpDens
        self._interpvcirc= interpvcirc
        self._interpdvcircdr= interpdvcircdr
        self._interpepifreq= interpepifreq
        self._interpverticalfreq= interpverticalfreq
        self._enable_c= enable_c*ext_loaded
        self.hasC= self._enable_c
        self._zsym= zsym
        if interpPot:
            if use_c*ext_loaded:
                self._potGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid)
            else:
                from galpy.potential import evaluatePotentials
                potGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
                for ii in range(len(self._rgrid)):
                    for jj in range(len(self._zgrid)):
                        potGrid[ii,jj]= evaluatePotentials(self._origPot,self._rgrid[ii],self._zgrid[jj])
                self._potGrid= potGrid
            if self._logR:
                self._potInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                                 self._zgrid,
                                                                 self._potGrid,
                                                                 kx=3,ky=3,s=0.)
            else:
                self._potInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                                 self._zgrid,
                                                                 self._potGrid,
                                                                 kx=3,ky=3,s=0.)
            if enable_c*ext_loaded:
                self._potGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._potGrid)
        if interpRforce:
            if use_c*ext_loaded:
                self._rforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,rforce=True)
            else:
                from galpy.potential import evaluateRforces
                rforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
                for ii in range(len(self._rgrid)):
                    for jj in range(len(self._zgrid)):
                        rforceGrid[ii,jj]= evaluateRforces(self._origPot,self._rgrid[ii],self._zgrid[jj])
                self._rforceGrid= rforceGrid
            if self._logR:
                self._rforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                                    self._zgrid,
                                                                    self._rforceGrid,
                                                                    kx=3,ky=3,s=0.)
            else:
                self._rforceInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                                    self._zgrid,
                                                                    self._rforceGrid,
                                                                    kx=3,ky=3,s=0.)
            if enable_c*ext_loaded:
                self._rforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._rforceGrid)
        if interpzforce:
            if use_c*ext_loaded:
                self._zforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,zforce=True)
            else:
                from galpy.potential import evaluatezforces
                zforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
                for ii in range(len(self._rgrid)):
                    for jj in range(len(self._zgrid)):
                        zforceGrid[ii,jj]= evaluatezforces(self._origPot,self._rgrid[ii],self._zgrid[jj])
                self._zforceGrid= zforceGrid
            if self._logR:
                self._zforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                                    self._zgrid,
                                                                    self._zforceGrid,
                                                                    kx=3,ky=3,s=0.)
            else:
                self._zforceInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                                    self._zgrid,
                                                                    self._zforceGrid,
                                                                    kx=3,ky=3,s=0.)
            if enable_c*ext_loaded:
                self._zforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._zforceGrid)
        if interpDens:
            from galpy.potential import evaluateDensities
            densGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
            for ii in range(len(self._rgrid)):
                for jj in range(len(self._zgrid)):
                    densGrid[ii,jj]= evaluateDensities(self._origPot,self._rgrid[ii],self._zgrid[jj])
            self._densGrid= densGrid
            if self._logR:
                self._densInterp= interpolate.RectBivariateSpline(self._logrgrid,
                                                                  self._zgrid,
                                                                  numpy.log(self._densGrid+10.**-10.),
                                                                  kx=3,ky=3,s=0.)
            else:
                self._densInterp= interpolate.RectBivariateSpline(self._rgrid,
                                                                  self._zgrid,
                                                                  numpy.log(self._densGrid+10.**-10.),
                                                                  kx=3,ky=3,s=0.)
        if interpvcirc:
            from galpy.potential import vcirc
            if not numcores is None:
                self._vcircGrid= multi.parallel_map((lambda x: vcirc(self._origPot,self._rgrid[x])),
                                                    list(range(len(self._rgrid))),numcores=numcores)
            else:
                self._vcircGrid= numpy.array([vcirc(self._origPot,r) for r in self._rgrid])
            if self._logR:
                self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._vcircGrid,k=3)
            else:
                self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._vcircGrid,k=3)
        if interpdvcircdr:
            from galpy.potential import dvcircdR
            if not numcores is None:
                self._dvcircdrGrid= multi.parallel_map((lambda x: dvcircdR(self._origPot,self._rgrid[x])),
                                                       list(range(len(self._rgrid))),numcores=numcores)
            else:
                self._dvcircdrGrid= numpy.array([dvcircdR(self._origPot,r) for r in self._rgrid])
            if self._logR:
                self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._dvcircdrGrid,k=3)
            else:
                self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._dvcircdrGrid,k=3)
        if interpepifreq:
            from galpy.potential import epifreq
            if not numcores is None:
                self._epifreqGrid= numpy.array(multi.parallel_map((lambda x: epifreq(self._origPot,self._rgrid[x])),
                                                      list(range(len(self._rgrid))),numcores=numcores))
            else:
                self._epifreqGrid= numpy.array([epifreq(self._origPot,r) for r in self._rgrid])
            indx= True^numpy.isnan(self._epifreqGrid)
            if numpy.sum(indx) < 4:
                if self._logR:
                    self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=1)
                else:
                    self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=1)
            else:
                if self._logR:
                    self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=3)
                else:
                    self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=3)
        if interpverticalfreq:
            from galpy.potential import verticalfreq
            if not numcores is None:
                self._verticalfreqGrid= multi.parallel_map((lambda x: verticalfreq(self._origPot,self._rgrid[x])),
                                                       list(range(len(self._rgrid))),numcores=numcores)
            else:
                self._verticalfreqGrid= numpy.array([verticalfreq(self._origPot,r) for r in self._rgrid])
            if self._logR:
                self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._verticalfreqGrid,k=3)
            else:
                self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._verticalfreqGrid,k=3)
        return None
예제 #29
0
    def _determine_impact_coordtransform(self,deltaAngleTrackImpact,
                                         nTrackChunksImpact,
                                         timpact,impact_angle):
        """Function that sets up the transformation between (x,v) and (O,theta)"""
        # Integrate the progenitor backward to the time of impact
        self._gap_progenitor_setup()
        # Sign of delta angle tells us whether the impact happens to the
        # leading or trailing arm, self._sigMeanSign contains this info
        if impact_angle > 0.:
            self._gap_leading= True
        else:
            self._gap_leading= False
        if (self._gap_leading and not self._leading) \
                or (not self._gap_leading and self._leading):
            raise ValueError('Modeling leading (trailing) impact for trailing (leading) arm; this is not allowed because it is nonsensical in this framework')
        self._impact_angle= numpy.fabs(impact_angle)
        self._gap_sigMeanSign= 1.
        if (self._gap_leading and self._progenitor_Omega_along_dOmega/self._sigMeanSign < 0.) \
                or (not self._gap_leading and self._progenitor_Omega_along_dOmega/self._sigMeanSign > 0.):
            self._gap_sigMeanSign= -1.
        # Determine how much orbital time is necessary for the progenitor's orbit at the time of impact to cover the part of the stream near the impact; we cover the whole leading (or trailing) part of the stream
        if nTrackChunksImpact is None:
            #default is floor(self._deltaAngleTrackImpact/0.15)+1
            self._nTrackChunksImpact= int(numpy.floor(self._deltaAngleTrackImpact/0.15))+1
        else:
            self._nTrackChunksImpact= nTrackChunksImpact
        dt= self._deltaAngleTrackImpact\
            /self._progenitor_Omega_along_dOmega\
            /self._sigMeanSign*self._gap_sigMeanSign
        self._gap_trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunksImpact-1) #to be sure that we cover it
        #Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle (same as in streamdf, but just done at the time of impact rather than the current time)
        prog_stream_offset=\
            _determine_stream_track_single(self._aA,
                                           self._gap_progenitor,
                                           self._timpact, # around the t of imp
                                           self._progenitor_angle-self._timpact*self._progenitor_Omega,
                                           self._gap_sigMeanSign,
                                           self._dsigomeanProgDirection,
                                           lambda da: super(streamgapdf,self).meanOmega(da,offset_sign=self._gap_sigMeanSign,tdisrupt=self._tdisrupt-self._timpact),
                                           0.) #angle = 0
        auxiliaryTrack= Orbit(prog_stream_offset[3])
        if dt < 0.:
            self._gap_trackts= numpy.linspace(0.,-2.*dt,2.*self._nTrackChunksImpact-1)
            #Flip velocities before integrating
            auxiliaryTrack= auxiliaryTrack.flip()
        auxiliaryTrack.integrate(self._gap_trackts,self._pot)
        if dt < 0.:
            #Flip velocities again
            auxiliaryTrack._orb.orbit[:,1]= -auxiliaryTrack._orb.orbit[:,1]
            auxiliaryTrack._orb.orbit[:,2]= -auxiliaryTrack._orb.orbit[:,2]
            auxiliaryTrack._orb.orbit[:,4]= -auxiliaryTrack._orb.orbit[:,4]
        #Calculate the actions, frequencies, and angle for this auxiliary orbit
        acfs= self._aA.actionsFreqs(auxiliaryTrack(0.),maxn=3)
        auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\
)
        auxiliary_Omega_along_dOmega= \
            numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection)
        # compute the transformation using _determine_stream_track_single
        allAcfsTrack= numpy.empty((self._nTrackChunksImpact,9))
        alljacsTrack= numpy.empty((self._nTrackChunksImpact,6,6))
        allinvjacsTrack= numpy.empty((self._nTrackChunksImpact,6,6))
        thetasTrack= numpy.linspace(0.,self._deltaAngleTrackImpact,
                                    self._nTrackChunksImpact)
        ObsTrack= numpy.empty((self._nTrackChunksImpact,6))
        ObsTrackAA= numpy.empty((self._nTrackChunksImpact,6))
        detdOdJps= numpy.empty((self._nTrackChunksImpact))
        if self._multi is None:
            for ii in range(self._nTrackChunksImpact):
                multiOut= _determine_stream_track_single(self._aA,
                                           auxiliaryTrack,
                                           self._gap_trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track, no timpact bc gap_tracks is relative to timpact
                                           self._progenitor_angle-self._timpact*self._progenitor_Omega,
                                           self._gap_sigMeanSign,
                                           self._dsigomeanProgDirection,
                                           lambda da: super(streamgapdf,self).meanOmega(da,offset_sign=self._gap_sigMeanSign,tdisrupt=self._tdisrupt-self._timpact),
                                           thetasTrack[ii])
                allAcfsTrack[ii,:]= multiOut[0]
                alljacsTrack[ii,:,:]= multiOut[1]
                allinvjacsTrack[ii,:,:]= multiOut[2]
                ObsTrack[ii,:]= multiOut[3]
                ObsTrackAA[ii,:]= multiOut[4]
                detdOdJps[ii]= multiOut[5]
        else:
            multiOut= multi.parallel_map(\
                (lambda x: _determine_stream_track_single(self._aA,
                                           auxiliaryTrack,
                                           self._gap_trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track, no timpact bc gap_tracks is relative to timpact
                                           self._progenitor_angle-self._timpact*self._progenitor_Omega,
                                           self._gap_sigMeanSign,
                                           self._dsigomeanProgDirection,
                                           lambda da: super(streamgapdf,self).meanOmega(da,offset_sign=self._gap_sigMeanSign,tdisrupt=self._tdisrupt-self._timpact),
                                           thetasTrack[x])),
                range(self._nTrackChunksImpact),
                numcores=numpy.amin([self._nTrackChunksImpact,
                                     multiprocessing.cpu_count(),
                                     self._multi]))
            for ii in range(self._nTrackChunksImpact):
                allAcfsTrack[ii,:]= multiOut[ii][0]
                alljacsTrack[ii,:,:]= multiOut[ii][1]
                allinvjacsTrack[ii,:,:]= multiOut[ii][2]
                ObsTrack[ii,:]= multiOut[ii][3]
                ObsTrackAA[ii,:]= multiOut[ii][4]
                detdOdJps[ii]= multiOut[ii][5]
        #Repeat the track calculation using the previous track, to get closer to it
        for nn in range(self.nTrackIterations):
            if self._multi is None:
                for ii in range(self._nTrackChunksImpact):
                    multiOut= _determine_stream_track_single(self._aA,
                                                             Orbit(ObsTrack[ii,:]),
                                                             0.,
                                                             self._progenitor_angle-self._timpact*self._progenitor_Omega,
                                                             self._gap_sigMeanSign,
                                                             self._dsigomeanProgDirection,
                                                             lambda da: super(streamgapdf,self).meanOmega(da,offset_sign=self._gap_sigMeanSign,tdisrupt=self._tdisrupt-self._timpact),
                                                             thetasTrack[ii])
                    allAcfsTrack[ii,:]= multiOut[0]
                    alljacsTrack[ii,:,:]= multiOut[1]
                    allinvjacsTrack[ii,:,:]= multiOut[2]
                    ObsTrack[ii,:]= multiOut[3]
                    ObsTrackAA[ii,:]= multiOut[4]
                    detdOdJps[ii]= multiOut[5]
            else:
                multiOut= multi.parallel_map(\
                    (lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0.,
                                                              self._progenitor_angle-self._timpact*self._progenitor_Omega,
                                                              self._gap_sigMeanSign,
                                                              self._dsigomeanProgDirection,
                                                              lambda da: super(streamgapdf,self).meanOmega(da,offset_sign=self._gap_sigMeanSign,tdisrupt=self._tdisrupt-self._timpact),
                                                              thetasTrack[x])),
                    range(self._nTrackChunksImpact),
                    numcores=numpy.amin([self._nTrackChunksImpact,
                                         multiprocessing.cpu_count(),
                                         self._multi]))
                for ii in range(self._nTrackChunksImpact):
                    allAcfsTrack[ii,:]= multiOut[ii][0]
                    alljacsTrack[ii,:,:]= multiOut[ii][1]
                    allinvjacsTrack[ii,:,:]= multiOut[ii][2]
                    ObsTrack[ii,:]= multiOut[ii][3]
                    ObsTrackAA[ii,:]= multiOut[ii][4]
                    detdOdJps[ii]= multiOut[ii][5]
        #Store the track
        self._gap_thetasTrack= thetasTrack
        self._gap_ObsTrack= ObsTrack
        self._gap_ObsTrackAA= ObsTrackAA
        self._gap_allAcfsTrack= allAcfsTrack
        self._gap_alljacsTrack= alljacsTrack
        self._gap_allinvjacsTrack= allinvjacsTrack
        self._gap_detdOdJps= detdOdJps
        self._gap_meandetdOdJp= numpy.mean(self._gap_detdOdJps)
        self._gap_logmeandetdOdJp= numpy.log(self._gap_meandetdOdJp)
        #Also calculate _ObsTrackXY in XYZ,vXYZ coordinates
        self._gap_ObsTrackXY= numpy.empty_like(self._gap_ObsTrack)
        TrackX= self._gap_ObsTrack[:,0]*numpy.cos(self._gap_ObsTrack[:,5])
        TrackY= self._gap_ObsTrack[:,0]*numpy.sin(self._gap_ObsTrack[:,5])
        TrackZ= self._gap_ObsTrack[:,3]
        TrackvX, TrackvY, TrackvZ=\
            bovy_coords.cyl_to_rect_vec(self._gap_ObsTrack[:,1],
                                        self._gap_ObsTrack[:,2],
                                        self._gap_ObsTrack[:,4],
                                        self._gap_ObsTrack[:,5])
        self._gap_ObsTrackXY[:,0]= TrackX
        self._gap_ObsTrackXY[:,1]= TrackY
        self._gap_ObsTrackXY[:,2]= TrackZ
        self._gap_ObsTrackXY[:,3]= TrackvX
        self._gap_ObsTrackXY[:,4]= TrackvY
        self._gap_ObsTrackXY[:,5]= TrackvZ
        return None
예제 #30
0
def plotbestr(options,args):
    """Make a plot of a quantity's best-fit vs. FeH and aFe"""
    if options.sample.lower() == 'g':
        npops= 62
    elif options.sample.lower() == 'k':
        npops= 54
    if options.sample.lower() == 'g':
        savefile= open('binmapping_g.sav','rb')
    elif options.sample.lower() == 'k':
        savefile= open('binmapping_k.sav','rb')
    fehs= pickle.load(savefile)
    afes= pickle.load(savefile)
    savefile.close()
    #First calculate the derivative properties
    if not options.multi is None:
        derivProps= multi.parallel_map((lambda x: calcAllSurfErr(x,options,args)),
                                  range(npops),
                                  numcores=numpy.amin([options.multi,
                                                       npops,
                                                       multiprocessing.cpu_count()]))
    else:
        derivProps= []
        for ii in range(npops):
            derivProps.append(calcAllSurfErr(ii,options,args))
    #If a second argument is given, this gives a set of rs at which also to calculate the surface density
    if len(args) > 1:
        if os.path.exists(args[1]):
            surffile= open(args[1],'rb')
            altsurfrs= pickle.load(surffile)
            surffile.close()
            calcExtra= True
        else:
            raise IOError("extra savefilename with surface-densities has to exist when it is specified")
    else:
        calcExtra= False
    if not calcExtra: #Fiducial, for which we also calculate everything at the mean radius of each MAP
        #Load g orbits
        orbitsfile= 'gOrbitsNew.sav'
        savefile= open(orbitsfile,'rb')
        orbits= pickle.load(savefile)
        savefile.close()
        #Cut to S/N, logg, and EBV
        indx= (orbits.sna > 15.)*(orbits.logga > 4.2)*(orbits.ebv < 0.3)
        orbits= orbits[indx]
        #Load the orbits into the pixel structure
        pix= pixelAfeFeh(orbits,dfeh=0.1,dafe=0.05)
        #Now calculate meanr
        rmean= numpy.zeros(npops)
        for ii in range(npops):
            data= pix(fehs[ii],afes[ii])
            vals= data.densrmean*8.
            if False:#True:
                rmean[ii]= numpy.mean(vals)
            else:
                rmean[ii]= numpy.median(vals)
    #Load into plotthis
    plotthis= numpy.zeros(npops)+numpy.nan
    plotthis_y= numpy.zeros(npops)+numpy.nan
    plotthis_y_err= numpy.zeros(npops)+numpy.nan
    plotthiskz_y= numpy.zeros(npops)+numpy.nan
    plotthiskz_y_err= numpy.zeros(npops)+numpy.nan
    altplotthis= numpy.zeros(npops)+numpy.nan
    altplotthis_y= numpy.zeros(npops)+numpy.nan
    altplotthis_y_err= numpy.zeros(npops)+numpy.nan
    altplotthiskz_y= numpy.zeros(npops)+numpy.nan
    altplotthiskz_y_err= numpy.zeros(npops)+numpy.nan
    for ii in range(npops):
        if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                         k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                or (options.sample.lower() == 'g' and (ii < 0 or ii == 50)) \
                or (options.sample.lower() == 'k' and ii < 7):
            continue
        #Determine best-r
        #indx= numpy.argmin(derivProps[ii][:,2]/numpy.fabs(derivProps[ii][:,1]))
        indx= numpy.argmin(numpy.fabs(derivProps[ii][:,2]))
        if indx == 0: indx= int(numpy.floor(numpy.random.uniform()*10))
        plotthis[ii]= derivProps[ii][indx,0]
        plotthis_y[ii]= derivProps[ii][indx,1]
        plotthis_y_err[ii]= derivProps[ii][indx,3]
        plotthiskz_y[ii]= derivProps[ii][indx,4]
        plotthiskz_y_err[ii]= derivProps[ii][indx,5]
        if calcExtra:
            indx= numpy.argmin(numpy.fabs(derivProps[ii][:,0]-altsurfrs[ii]))
            altplotthis[ii]= derivProps[ii][indx,0]
            altplotthis_y[ii]= derivProps[ii][indx,1]
            altplotthis_y_err[ii]= derivProps[ii][indx,3]           
            altplotthiskz_y[ii]= derivProps[ii][indx,4]
            altplotthiskz_y_err[ii]= derivProps[ii][indx,5]           
        else:
            indx= numpy.argmin(numpy.fabs(derivProps[ii][:,0]-rmean[ii]))
            altplotthis[ii]= derivProps[ii][indx,0]
            altplotthis_y[ii]= derivProps[ii][indx,1]
            altplotthis_y_err[ii]= derivProps[ii][indx,3]           
            altplotthiskz_y[ii]= derivProps[ii][indx,4]
            altplotthiskz_y_err[ii]= derivProps[ii][indx,5]           
    #Now plot
    bovy_plot.bovy_print()
    monoAbundanceMW.plotPixelFunc(fehs,afes,plotthis,
                                  zlabel=r'$R_\Sigma\ (\mathrm{kpc})$')
    bovy_plot.bovy_end_print(options.outfilename)
    bovy_plot.bovy_print()
    print plotthis, plotthis_y
    bovy_plot.bovy_plot(plotthis,plotthis_y,'ko',
                        xlabel=r'$R\ (\mathrm{kpc})$',
                        ylabel=r'$\Sigma(R,|Z| \leq 1.1\,\mathrm{kpc})\ (M_\odot\,\mathrm{pc}^{-2})$',
                        xrange=[4.,10.],
                        yrange=[10,1050.],#,numpy.nanmin(plotthis_y)-10.,
#                                numpy.nanmax(plotthis_y)+10.],
                        semilogy=True)
    pyplot.errorbar(plotthis,
                    plotthis_y,
                    yerr=plotthis_y_err,
                    elinewidth=1.,capsize=3,zorder=0,
                    color='k',linestyle='none')  
    trs= numpy.linspace(4.3,9.,1001)
    pyplot.plot(trs,72.*numpy.exp(-(trs-8.)/3.),'k--')
    pyplot.plot(trs,72.*numpy.exp(-(trs-8.)/2.),'k-.')
    pyplot.plot(trs,72.*numpy.exp(-(trs-8.)/4.),'k:')
    #Fit exponential
    #indx= (plotthis < 8.)
    #plotthis= plotthis[indx]
    #plotthis_y= plotthis_y[indx]
    #plotthis_y_err= plotthis_y_err[indx]
    exp_params= optimize.fmin_powell(expcurve,
                                     numpy.log(numpy.array([72.,2.5])),
                                     args=(plotthis,plotthis_y,plotthis_y_err))
    pyplot.plot(trs,numpy.exp(exp_params[0]-(trs-8.)/numpy.exp(exp_params[1])),
                'k-',lw=2.)
    print numpy.exp(exp_params) 
    bovy_plot.bovy_end_print(options.outfilename.replace('.png','_rvssurf.png'))
    #Save
    if calcExtra:
        save_pickles(options.outfilename.replace('.png','_rvssurf.sav'),
                     plotthis,plotthis_y,plotthis_y_err,
                     plotthiskz_y,plotthiskz_y_err,
                     altplotthis,altplotthis_y,altplotthis_y_err,
                     altplotthiskz_y,altplotthiskz_y_err)
    else:
        save_pickles(options.outfilename.replace('.png','_rvssurf.sav'),
                     plotthis,plotthis_y,plotthis_y_err,
                     plotthiskz_y,plotthiskz_y_err,
                     altplotthis,altplotthis_y,altplotthis_y_err,
                     altplotthiskz_y,altplotthiskz_y_err)
    return None        
예제 #31
0
def plotbestz(options,args):
    """Make a plot of a quantity's best-fit vs. FeH and aFe"""
    if options.sample.lower() == 'g':
        npops= 62
    elif options.sample.lower() == 'k':
        npops= 54
    if options.sample.lower() == 'g':
        savefile= open('binmapping_g.sav','rb')
    elif options.sample.lower() == 'k':
        savefile= open('binmapping_k.sav','rb')
    fehs= pickle.load(savefile)
    afes= pickle.load(savefile)
    savefile.close()
    #First calculate the derivative properties
    if not options.multi is None:
        derivProps= multi.parallel_map((lambda x: calcAllSurfErrZ(x,options,args)),
                                  range(npops),
                                  numcores=numpy.amin([options.multi,
                                                       npops,
                                                       multiprocessing.cpu_count()]))
    else:
        derivProps= []
        for ii in range(npops):
            derivProps.append(calcAllSurfErrZ(ii,options,args))
    #Load into plotthis
    plotthis= numpy.zeros(npops)+numpy.nan
    plotthis_y= numpy.zeros(npops)+numpy.nan
    plotthis_y_err= numpy.zeros(npops)+numpy.nan
    for ii in range(npops):
        if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                         k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                or (options.sample.lower() == 'g' and ii < 6) \
                or (options.sample.lower() == 'k' and ii < 7):
            continue
        #Determine best-r
        #indx= numpy.argmin(derivProps[ii][:,2]/numpy.fabs(derivProps[ii][:,1]))
        indx= numpy.argmin(numpy.fabs(derivProps[ii][:,2]))
        plotthis[ii]= derivProps[ii][indx,0]
        plotthis_y[ii]= derivProps[ii][indx,1]
        plotthis_y_err[ii]= derivProps[ii][indx,3]
    #Now plot
    bovy_plot.bovy_print()
    print plotthis, plotthis_y
    bovy_plot.bovy_plot(plotthis,plotthis_y,'ko',
                        xlabel=r'$Z\ (\mathrm{kpc})$',
                        ylabel=r'$\Sigma(R_0,|Z|)\ (M_\odot\,\mathrm{pc}^{-2})$',
                        xrange=[0.,5.],
                        yrange=[10.,1050.],#,numpy.nanmin(plotthis_y)-10.,
#                                numpy.nanmax(plotthis_y)+10.],
                        semilogy=True)
    pyplot.errorbar(plotthis,
                    plotthis_y,
                    yerr=plotthis_y_err,
                    elinewidth=1.,capsize=3,zorder=0,
                    color='k',linestyle='none')  
    #trs= numpy.linspace(4.3,9.,1001)
    #pyplot.plot(trs,72.*numpy.exp(-(trs-8.)/3.),'k--')
    #pyplot.plot(trs,72.*numpy.exp(-(trs-8.)/2.),'k-.')
    #pyplot.plot(trs,72.*numpy.exp(-(trs-8.)/4.),'k:')
    bovy_plot.bovy_end_print(options.outfilename.replace('.png','_zvssurf.png'))
    return None        
예제 #32
0
    def __init__(self,pot=None,zmax=1.,gamma=1.,Rmax=5.,
                 nR=16,nEz=16,nEr=31,nLz=31,numcores=1,
                 **kwargs):
        """
        NAME:
           __init__
        PURPOSE:
           initialize an actionAngleAdiabaticGrid object
        INPUT:

           pot= potential or list of potentials

           zmax= zmax for building Ez grid

           Rmax = Rmax for building grids

           gamma= (default=1.) replace Lz by Lz+gamma Jz in effective potential

           nEz=, nEr=, nLz, nR= grid size

           numcores= number of cpus to use to parallellize

           c= if True, use C to calculate actions

           +scipy.integrate.quad keywords
        OUTPUT:
        HISTORY:
            2012-07-27 - Written - Bovy (IAS@MPIA)
        """
        if pot is None: #pragma: no cover
            raise IOError("Must specify pot= for actionAngleAxi")
        if kwargs.has_key('c') and kwargs['c']:
            self._c= True
            kwargs.pop('c')
        else:
            self._c= False
            if kwargs.has_key('c'): kwargs.pop('c')
        self._gamma= gamma
        self._pot= pot
        self._zmax= zmax
        self._Rmax= Rmax
        self._Rmin= 0.01
        #Set up the actionAngleAdiabatic object that we will use to interpolate
        self._aA= actionAngleAdiabatic(pot=self._pot,gamma=self._gamma,
                                       c=self._c)
        #Build grid for Ez, first calculate Ez(zmax;R) function
        self._Rs= numpy.linspace(self._Rmin,self._Rmax,nR)
        self._EzZmaxs= galpy.potential.evaluatePotentials(self._Rs,self._zmax*numpy.ones(nR),self._pot)\
            -galpy.potential.evaluatePotentials(self._Rs,numpy.zeros(nR),self._pot)
        self._EzZmaxsInterp= interpolate.InterpolatedUnivariateSpline(self._Rs,numpy.log(self._EzZmaxs),k=3)
        y= numpy.linspace(0.,1.,nEz)
        jz= numpy.zeros((nR,nEz))
        jzEzzmax= numpy.zeros(nR)
        thisRs= (numpy.tile(self._Rs,(nEz,1)).T).flatten()
        thisEzZmaxs= (numpy.tile(self._EzZmaxs,(nEz,1)).T).flatten()
        thisy= (numpy.tile(y,(nR,1))).flatten()
        if self._c:
            jz= self._aA(thisRs,
                         numpy.zeros(len(thisRs)),
                         numpy.ones(len(thisRs)),#these two r dummies
                         numpy.zeros(len(thisRs)),
                         numpy.sqrt(2.*thisy*thisEzZmaxs),
                         **kwargs)[2]
            jz= numpy.reshape(jz,(nR,nEz))
            jzEzzmax[0:nR]= jz[:,nEz-1]
        else:
            if numcores > 1:
                jz= multi.parallel_map((lambda x: self._aA(thisRs[x],0.,1.,#these two r dummies
                                                              0.,math.sqrt(2.*thisy[x]*thisEzZmaxs[x]),
                                                           _justjz=True,
                                                           **kwargs)[2]),
                                       range(nR*nEz),numcores=numcores)
                jz= numpy.reshape(jz,(nR,nEz))
                jzEzzmax[0:nR]= jz[:,nEz-1]
            else:
                for ii in range(nR):
                    for jj in range(nEz):
                        #Calculate Jz
                        jz[ii,jj]= self._aA(self._Rs[ii],0.,1.,#these two r dummies
                                            0.,numpy.sqrt(2.*y[jj]*self._EzZmaxs[ii]),
                                            _justjz=True,**kwargs)[2]
                        if jj == nEz-1: 
                            jzEzzmax[ii]= jz[ii,jj]
        for ii in range(nR): jz[ii,:]/= jzEzzmax[ii]
        #First interpolate Ez=Ezmax
        self._jzEzmaxInterp= interpolate.InterpolatedUnivariateSpline(self._Rs,numpy.log(jzEzzmax+10.**-5.),k=3)
        self._jz= jz
        self._jzInterp= interpolate.RectBivariateSpline(self._Rs,
                                                        y,
                                                        jz,
                                                        kx=3,ky=3,s=0.)
        #JR grid
        self._Lzmin= 0.01
        self._Lzs= numpy.linspace(self._Lzmin,
                                  self._Rmax\
                                      *galpy.potential.vcirc(self._pot,
                                                             self._Rmax),
                                  nLz)
        self._Lzmax= self._Lzs[-1]
        #Calculate ER(vr=0,R=RL)
        self._RL= numpy.array([galpy.potential.rl(self._pot,l) for l in self._Lzs])
        self._RLInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                 self._RL,k=3)
        self._ERRL= galpy.potential.evaluatePotentials(self._RL,numpy.zeros(nLz),self._pot) +self._Lzs**2./2./self._RL**2.
        self._ERRLmax= numpy.amax(self._ERRL)+1.
        self._ERRLInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                   numpy.log(-(self._ERRL-self._ERRLmax)),k=3)
        self._Ramax= 99.
        self._ERRa= galpy.potential.evaluatePotentials(self._Ramax,0.,self._pot) +self._Lzs**2./2./self._Ramax**2.
        self._ERRamax= numpy.amax(self._ERRa)+1.
        self._ERRaInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                   numpy.log(-(self._ERRa-self._ERRamax)),k=3)
        y= numpy.linspace(0.,1.,nEr)
        jr= numpy.zeros((nLz,nEr))
        jrERRa= numpy.zeros(nLz)
        thisRL= (numpy.tile(self._RL,(nEr-1,1)).T).flatten()
        thisLzs= (numpy.tile(self._Lzs,(nEr-1,1)).T).flatten()
        thisERRL= (numpy.tile(self._ERRL,(nEr-1,1)).T).flatten()
        thisERRa= (numpy.tile(self._ERRa,(nEr-1,1)).T).flatten()
        thisy= (numpy.tile(y[0:-1],(nLz,1))).flatten()
        if self._c:
            mjr= self._aA(thisRL,
                          numpy.sqrt(2.*(thisERRa+thisy*(thisERRL-thisERRa)-galpy.potential.evaluatePotentials(thisRL,numpy.zeros((nEr-1)*nLz),self._pot))-thisLzs**2./thisRL**2.),
                          thisLzs/thisRL,
                          numpy.zeros(len(thisRL)),
                          numpy.zeros(len(thisRL)),
                          **kwargs)[0]
            jr[:,0:-1]= numpy.reshape(mjr,(nLz,nEr-1))
            jrERRa[0:nLz]= jr[:,0]
        else:
            if numcores > 1:
                mjr= multi.parallel_map((lambda x: self._aA(thisRL[x],
                                                          numpy.sqrt(2.*(thisERRa[x]+thisy[x]*(thisERRL[x]-thisERRa[x])-galpy.potential.evaluatePotentials(thisRL[x],0.,self._pot))-thisLzs[x]**2./thisRL[x]**2.),
                                                               thisLzs[x]/thisRL[x],
                                                               0.,0.,
                                                            _justjr=True,
                                                            **kwargs)[0]),
                                        range((nEr-1)*nLz),
                                        numcores=numcores)
                jr[:,0:-1]= numpy.reshape(mjr,(nLz,nEr-1))
                jrERRa[0:nLz]= jr[:,0]
            else:
                for ii in range(nLz):
                    for jj in range(nEr-1): #Last one is zero by construction
                        try:
                            jr[ii,jj]= self._aA(self._RL[ii],
                                                numpy.sqrt(2.*(self._ERRa[ii]+y[jj]*(self._ERRL[ii]-self._ERRa[ii])-galpy.potential.evaluatePotentials(self._RL[ii],0.,self._pot))-self._Lzs[ii]**2./self._RL[ii]**2.),
                                                self._Lzs[ii]/self._RL[ii],
                                                0.,0.,
                                                _justjr=True,
                                                   **kwargs)[0]
                        except UnboundError: #pragma: no cover
                            raise
                        if jj == 0: 
                            jrERRa[ii]= jr[ii,jj]
        for ii in range(nLz): jr[ii,:]/= jrERRa[ii]
        #First interpolate Ez=Ezmax
        self._jr= jr
        self._jrERRaInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                     numpy.log(jrERRa+10.**-5.),k=3)
        self._jrInterp= interpolate.RectBivariateSpline(self._Lzs,
                                                        y,
                                                        jr,
                                                        kx=3,ky=3,s=0.)
        return None
예제 #33
0
def plot2d(options,args):
    """Make a plot of a quantity's best-fit vs. FeH and aFe"""
    if options.sample.lower() == 'g':
        npops= 62
    elif options.sample.lower() == 'k':
        npops= 54
    if options.sample.lower() == 'g':
        savefile= open('binmapping_g.sav','rb')
    elif options.sample.lower() == 'k':
        savefile= open('binmapping_k.sav','rb')
    fehs= pickle.load(savefile)
    afes= pickle.load(savefile)
    savefile.close()
    #First calculate the derivative properties
    if not options.multi is None:
        derivProps= multi.parallel_map((lambda x: calcAllDerivProps(x,options,args)),
                                  range(npops),
                                  numcores=numpy.amin([options.multi,
                                                       npops,
                                                       multiprocessing.cpu_count()]))
    else:
        derivProps= []
        for ii in range(npops):
            derivProps.append(calcAllDerivProps(ii,options,args))
    xprop= options.subtype.split(',')[0]
    yprop= options.subtype.split(',')[1]
    if xprop == 'fracfaint' or yprop == 'fracfaint':
        #Read the data
        print "Reading the data ..."
        raw= read_rawdata(options)
        #Bin the data
        binned= pixelAfeFeh(raw,dfeh=0.1,dafe=0.05)
        for ii in range(npops):
            if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                            k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                                            or (options.sample.lower() == 'g' and (ii == 50 or ii == 57)) \
                                            or (options.sample.lower() == 'k' and ii < 7):
                                            continue
            data= binned(fehs[ii],afes[ii])
            indx= (data.dered_r > 17.8)
            derivProps[ii]['fracfaint']= numpy.sum(indx)/float(len(indx))
            derivProps[ii]['fracfaint_err']= 0.
    if xprop == 'nfaint' or yprop == 'nfaint':
        #Read the data
        print "Reading the data ..."
        raw= read_rawdata(options)
        #Bin the data
        binned= pixelAfeFeh(raw,dfeh=0.1,dafe=0.05)
        for ii in range(npops):
            if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                            k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                                            or (options.sample.lower() == 'g' and ii < 6) \
                                            or (options.sample.lower() == 'k' and ii < 7):
                                            continue
            data= binned(fehs[ii],afes[ii])
            indx= (data.dered_r > 17.8)
            derivProps[ii]['nfaint']= numpy.sum(indx)
            derivProps[ii]['nfaint_err']= 0.
    if xprop == 'hz' or yprop == 'hz':
        for ii in range(npops):
            if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                            k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                                            or (options.sample.lower() == 'g' and ii < 6) \
                                            or (options.sample.lower() == 'k' and ii < 7):
                                            continue
            hz, hzerr= monoAbundanceMW.hz(fehs[ii],afes[ii],
                                          k=(options.sample.lower() == 'k'),
                                          err=True)
            derivProps[ii]['hz']= hz
            derivProps[ii]['hz_err']= hzerr    
    if xprop == 'hr' or yprop == 'hr':
        for ii in range(npops):
            if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                            k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                                            or (options.sample.lower() == 'g' and ii < 6) \
                                            or (options.sample.lower() == 'k' and ii < 7):
                                            continue
            hr, hrerr= monoAbundanceMW.hr(fehs[ii],afes[ii],
                                          k=(options.sample.lower() == 'k'),
                                          err=True)
            derivProps[ii]['hr']= hr
            derivProps[ii]['hr_err']= hrerr    
    #Load into plotthis
    plotthis_x= numpy.zeros(npops)+numpy.nan
    plotthis_y= numpy.zeros(npops)+numpy.nan
    plotthis_x_err= numpy.zeros(npops)+numpy.nan
    plotthis_y_err= numpy.zeros(npops)+numpy.nan
    for ii in range(npops):
        if numpy.log(monoAbundanceMW.hr(fehs[ii],afes[ii],
                                         k=(options.sample.lower() == 'k')) /8.) > -0.5 \
                or (options.sample.lower() == 'g' and ii < 6) \
                or (options.sample.lower() == 'k' and ii < 7):
            continue
        plotthis_x[ii]= derivProps[ii][xprop]
        plotthis_y[ii]= derivProps[ii][yprop]
        plotthis_x_err[ii]= derivProps[ii][xprop+'_err']
        plotthis_y_err[ii]= derivProps[ii][yprop+'_err']
    #Now plot
    bovy_plot.bovy_print(fig_width=6.)
    bovy_plot.bovy_plot(plotthis_x,plotthis_y,
                        s=25.,c=afes,
                        cmap='jet',
                        xlabel=labels[xprop],ylabel=labels[yprop],
                        clabel=r'$[\alpha/\mathrm{Fe}]$',
                        xrange=ranges[xprop],yrange=ranges[yprop],
                        vmin=0.,vmax=0.5,
                        scatter=True,edgecolors='none',
                        colorbar=True)
    colormap = cm.jet
    for ii in range(npops):
        if numpy.isnan(plotthis_x[ii]): continue
        pyplot.errorbar(plotthis_x[ii],
                        plotthis_y[ii],
                        xerr=plotthis_x_err[ii],
                        yerr=plotthis_y_err[ii],
                        color=colormap(_squeeze(afes[ii],
                                                numpy.amax([numpy.amin(afes)]),
                                                            numpy.amin([numpy.amax(afes)]))),
                        elinewidth=1.,capsize=3,zorder=0)  
    bovy_plot.bovy_end_print(options.outfilename)
    return None        
예제 #34
0
    def __init__(self,
                 pot=None,
                 zmax=1.,
                 gamma=1.,
                 Rmax=5.,
                 nR=16,
                 nEz=16,
                 nEr=31,
                 nLz=31,
                 numcores=1,
                 **kwargs):
        """
        NAME:
           __init__
        PURPOSE:
           initialize an actionAngleAdiabaticGrid object
        INPUT:

           pot= potential or list of potentials

           zmax= zmax for building Ez grid

           Rmax = Rmax for building grids

           gamma= (default=1.) replace Lz by Lz+gamma Jz in effective potential

           nEz=, nEr=, nLz, nR= grid size

           numcores= number of cpus to use to parallellize

           c= if True, use C to calculate actions

           ro= distance from vantage point to GC (kpc; can be Quantity)

           vo= circular velocity at ro (km/s; can be Quantity)

           +scipy.integrate.quad keywords

        OUTPUT:

           instance

        HISTORY:

            2012-07-27 - Written - Bovy (IAS@MPIA)

        """
        actionAngle.__init__(self,
                             ro=kwargs.get('ro', None),
                             vo=kwargs.get('vo', None))
        if pot is None:  #pragma: no cover
            raise IOError("Must specify pot= for actionAngleAxi")
        self._c = kwargs.pop('c', False)
        self._gamma = gamma
        self._pot = pot
        self._zmax = zmax
        self._Rmax = Rmax
        self._Rmin = 0.01
        #Set up the actionAngleAdiabatic object that we will use to interpolate
        self._aA = actionAngleAdiabatic(pot=self._pot,
                                        gamma=self._gamma,
                                        c=self._c)
        #Build grid for Ez, first calculate Ez(zmax;R) function
        self._Rs = numpy.linspace(self._Rmin, self._Rmax, nR)
        self._EzZmaxs= _evaluatePotentials(self._pot,self._Rs,
                                           self._zmax*numpy.ones(nR))\
                                           -_evaluatePotentials(self._pot,self._Rs,numpy.zeros(nR))
        self._EzZmaxsInterp = interpolate.InterpolatedUnivariateSpline(
            self._Rs, numpy.log(self._EzZmaxs), k=3)
        y = numpy.linspace(0., 1., nEz)
        jz = numpy.zeros((nR, nEz))
        jzEzzmax = numpy.zeros(nR)
        thisRs = (numpy.tile(self._Rs, (nEz, 1)).T).flatten()
        thisEzZmaxs = (numpy.tile(self._EzZmaxs, (nEz, 1)).T).flatten()
        thisy = (numpy.tile(y, (nR, 1))).flatten()
        if self._c:
            jz = self._aA(
                thisRs,
                numpy.zeros(len(thisRs)),
                numpy.ones(len(thisRs)),  #these two r dummies
                numpy.zeros(len(thisRs)),
                numpy.sqrt(2. * thisy * thisEzZmaxs),
                **kwargs)[2]
            jz = numpy.reshape(jz, (nR, nEz))
            jzEzzmax[0:nR] = jz[:, nEz - 1]
        else:
            if numcores > 1:
                jz = multi.parallel_map(
                    (
                        lambda x: self._aA(
                            thisRs[x],
                            0.,
                            1.,  #these two r dummies
                            0.,
                            math.sqrt(2. * thisy[x] * thisEzZmaxs[x]),
                            _justjz=True,
                            **kwargs)[2]),
                    range(nR * nEz),
                    numcores=numcores)
                jz = numpy.reshape(jz, (nR, nEz))
                jzEzzmax[0:nR] = jz[:, nEz - 1]
            else:
                for ii in range(nR):
                    for jj in range(nEz):
                        #Calculate Jz
                        jz[ii, jj] = self._aA(
                            self._Rs[ii],
                            0.,
                            1.,  #these two r dummies
                            0.,
                            numpy.sqrt(2. * y[jj] * self._EzZmaxs[ii]),
                            _justjz=True,
                            **kwargs)[2]
                        if jj == nEz - 1:
                            jzEzzmax[ii] = jz[ii, jj]
        for ii in range(nR):
            jz[ii, :] /= jzEzzmax[ii]
        #First interpolate Ez=Ezmax
        self._jzEzmaxInterp = interpolate.InterpolatedUnivariateSpline(
            self._Rs, numpy.log(jzEzzmax + 10.**-5.), k=3)
        self._jz = jz
        self._jzInterp = interpolate.RectBivariateSpline(self._Rs,
                                                         y,
                                                         jz,
                                                         kx=3,
                                                         ky=3,
                                                         s=0.)
        #JR grid
        self._Lzmin = 0.01
        self._Lzs= numpy.linspace(self._Lzmin,
                                  self._Rmax\
                                      *galpy.potential.vcirc(self._pot,
                                                             self._Rmax),
                                  nLz)
        self._Lzmax = self._Lzs[-1]
        #Calculate ER(vr=0,R=RL)
        self._RL = numpy.array(
            [galpy.potential.rl(self._pot, l) for l in self._Lzs])
        self._RLInterp = interpolate.InterpolatedUnivariateSpline(self._Lzs,
                                                                  self._RL,
                                                                  k=3)
        self._ERRL = _evaluatePotentials(
            self._pot, self._RL,
            numpy.zeros(nLz)) + self._Lzs**2. / 2. / self._RL**2.
        self._ERRLmax = numpy.amax(self._ERRL) + 1.
        self._ERRLInterp = interpolate.InterpolatedUnivariateSpline(
            self._Lzs, numpy.log(-(self._ERRL - self._ERRLmax)), k=3)
        self._Ramax = 99.
        self._ERRa = _evaluatePotentials(
            self._pot, self._Ramax, 0.) + self._Lzs**2. / 2. / self._Ramax**2.
        self._ERRamax = numpy.amax(self._ERRa) + 1.
        self._ERRaInterp = interpolate.InterpolatedUnivariateSpline(
            self._Lzs, numpy.log(-(self._ERRa - self._ERRamax)), k=3)
        y = numpy.linspace(0., 1., nEr)
        jr = numpy.zeros((nLz, nEr))
        jrERRa = numpy.zeros(nLz)
        thisRL = (numpy.tile(self._RL, (nEr - 1, 1)).T).flatten()
        thisLzs = (numpy.tile(self._Lzs, (nEr - 1, 1)).T).flatten()
        thisERRL = (numpy.tile(self._ERRL, (nEr - 1, 1)).T).flatten()
        thisERRa = (numpy.tile(self._ERRa, (nEr - 1, 1)).T).flatten()
        thisy = (numpy.tile(y[0:-1], (nLz, 1))).flatten()
        if self._c:
            mjr = self._aA(
                thisRL,
                numpy.sqrt(2. *
                           (thisERRa + thisy *
                            (thisERRL - thisERRa) - _evaluatePotentials(
                                self._pot, thisRL, numpy.zeros(
                                    (nEr - 1) * nLz))) -
                           thisLzs**2. / thisRL**2.), thisLzs / thisRL,
                numpy.zeros(len(thisRL)), numpy.zeros(len(thisRL)),
                **kwargs)[0]
            jr[:, 0:-1] = numpy.reshape(mjr, (nLz, nEr - 1))
            jrERRa[0:nLz] = jr[:, 0]
        else:
            if numcores > 1:
                mjr = multi.parallel_map((lambda x: self._aA(
                    thisRL[x],
                    numpy.sqrt(2. *
                               (thisERRa[x] + thisy[x] *
                                (thisERRL[x] - thisERRa[x]) -
                                _evaluatePotentials(self._pot, thisRL[
                                    x], 0.)) - thisLzs[x]**2. / thisRL[x]**2.),
                    thisLzs[x] / thisRL[x],
                    0.,
                    0.,
                    _justjr=True,
                    **kwargs)[0]),
                                         range((nEr - 1) * nLz),
                                         numcores=numcores)
                jr[:, 0:-1] = numpy.reshape(mjr, (nLz, nEr - 1))
                jrERRa[0:nLz] = jr[:, 0]
            else:
                for ii in range(nLz):
                    for jj in range(nEr -
                                    1):  #Last one is zero by construction
                        try:
                            jr[ii, jj] = self._aA(
                                self._RL[ii],
                                numpy.sqrt(2. *
                                           (self._ERRa[ii] + y[jj] *
                                            (self._ERRL[ii] - self._ERRa[ii]) -
                                            _evaluatePotentials(
                                                self._pot, self._RL[ii], 0.)) -
                                           self._Lzs[ii]**2. /
                                           self._RL[ii]**2.),
                                self._Lzs[ii] / self._RL[ii],
                                0.,
                                0.,
                                _justjr=True,
                                **kwargs)[0]
                        except UnboundError:  #pragma: no cover
                            raise
                        if jj == 0:
                            jrERRa[ii] = jr[ii, jj]
        for ii in range(nLz):
            jr[ii, :] /= jrERRa[ii]
        #First interpolate Ez=Ezmax
        self._jr = jr
        self._jrERRaInterp = interpolate.InterpolatedUnivariateSpline(
            self._Lzs, numpy.log(jrERRa + 10.**-5.), k=3)
        self._jrInterp = interpolate.RectBivariateSpline(self._Lzs,
                                                         y,
                                                         jr,
                                                         kx=3,
                                                         ky=3,
                                                         s=0.)
        # Check the units
        self._check_consistent_units()
        return None
예제 #35
0
 def __init__(
     self, pot=None, zmax=3.0 / 8.0, gamma=1.0, Rmax=3.0, nR=25, nEz=25, nEr=25, nLz=25, numcores=1, **kwargs
 ):
     """
     NAME:
        __init__
     PURPOSE:
        initialize an actionAngleAdiabaticGrid object
     INPUT:
        pot= potential or list of potentials (planarPotentials)
        zmax= zmax for building Ez grid
        Rmax = Rmax for building grids
        gamma= (default=1.) replace Lz by Lz+gamma Jz in effective potential
        nEz=, nEr=, nLz, nR= grid size
        numcores= number of cpus to use to parallellize
        +scipy.integrate.quad keywords
     OUTPUT:
     HISTORY:
         2012-07-27 - Written - Bovy (IAS@MPIA)
     """
     if pot is None:
         raise IOError("Must specify pot= for actionAngleAxi")
     self._gamma = gamma
     self._pot = pot
     self._zmax = zmax
     self._Rmax = Rmax
     self._Rmin = 0.01
     # Set up the actionAngleAdiabatic object that we will use to interpolate
     self._aA = actionAngleAdiabatic(pot=self._pot, gamma=self._gamma)
     # Build grid for Ez, first calculate Ez(zmax;R) function
     self._Rs = numpy.linspace(self._Rmin, self._Rmax, nR)
     self._EzZmaxs = numpy.array(
         [
             galpy.potential.evaluatePotentials(r, self._zmax, self._pot)
             - galpy.potential.evaluatePotentials(r, 0.0, self._pot)
             for r in self._Rs
         ]
     )
     self._EzZmaxsInterp = interpolate.InterpolatedUnivariateSpline(self._Rs, numpy.log(self._EzZmaxs), k=3)
     y = numpy.linspace(0.0, 1.0, nEz)
     jz = numpy.zeros((nR, nEz))
     jzEzzmax = numpy.zeros(nR)
     if numcores > 1:
         thisRs = (numpy.tile(self._Rs, (nEz, 1)).T).flatten()
         thisEzZmaxs = (numpy.tile(self._EzZmaxs, (nEz, 1)).T).flatten()
         thisy = (numpy.tile(y, (nR, 1))).flatten()
         jz = multi.parallel_map(
             (
                 lambda x: self._aA.Jz(
                     thisRs[x],
                     0.0,
                     1.0,  # these two r dummies
                     0.0,
                     math.sqrt(2.0 * thisy[x] * thisEzZmaxs[x]),
                     **kwargs
                 )[0]
             ),
             range(nR * nEz),
             numcores=numcores,
         )
         jz = numpy.reshape(jz, (nR, nEz))
         jzEzzmax[0:nR] = jz[:, nEz - 1]
     else:
         for ii in range(nR):
             for jj in range(nEz):
                 # Calculate Jz
                 jz[ii, jj] = self._aA.Jz(
                     self._Rs[ii],
                     0.0,
                     1.0,  # these two r dummies
                     0.0,
                     math.sqrt(2.0 * y[jj] * self._EzZmaxs[ii]),
                     **kwargs
                 )[0]
                 if jj == nEz - 1:
                     jzEzzmax[ii] = jz[ii, jj]
     for ii in range(nR):
         jz[ii, :] /= jzEzzmax[ii]
     # First interpolate Ez=Ezmax
     self._jzEzmaxInterp = interpolate.InterpolatedUnivariateSpline(
         self._Rs, numpy.log(jzEzzmax + 10.0 ** -5.0), k=3
     )
     self._jz = jz
     self._jzInterp = interpolate.RectBivariateSpline(self._Rs, y, jz, kx=3, ky=3, s=0.0)
     # JR grid
     self._Lzmin = 0.01
     self._Lzs = numpy.linspace(self._Lzmin, self._Rmax * galpy.potential.vcirc(self._pot, self._Rmax), nLz)
     self._Lzmax = self._Lzs[-1]
     # Calculate ER(vr=0,R=RL)
     self._RL = numpy.array([galpy.potential.rl(self._pot, l) for l in self._Lzs])
     self._RLInterp = interpolate.InterpolatedUnivariateSpline(self._Lzs, self._RL, k=3)
     self._ERRL = numpy.array(
         [
             galpy.potential.evaluatePotentials(self._RL[ii], 0.0, self._pot)
             + self._Lzs[ii] ** 2.0 / 2.0 / self._RL[ii] ** 2.0
             for ii in range(nLz)
         ]
     )
     self._ERRLmax = numpy.amax(self._ERRL) + 1.0
     self._ERRLInterp = interpolate.InterpolatedUnivariateSpline(
         self._Lzs, numpy.log(-(self._ERRL - self._ERRLmax)), k=3
     )
     self._Ramax = 99.0
     self._ERRa = numpy.array(
         [
             galpy.potential.evaluatePotentials(self._Ramax, 0.0, self._pot)
             + self._Lzs[ii] ** 2.0 / 2.0 / self._Ramax ** 2.0
             for ii in range(nLz)
         ]
     )
     self._ERRamax = numpy.amax(self._ERRa) + 1.0
     self._ERRaInterp = interpolate.InterpolatedUnivariateSpline(
         self._Lzs, numpy.log(-(self._ERRa - self._ERRamax)), k=3
     )
     y = numpy.linspace(0.0, 1.0, nEr)
     jr = numpy.zeros((nLz, nEr))
     jrERRa = numpy.zeros(nLz)
     if numcores > 1:
         thisRL = (numpy.tile(self._RL, (nEr - 1, 1)).T).flatten()
         thisLzs = (numpy.tile(self._Lzs, (nEr - 1, 1)).T).flatten()
         thisERRL = (numpy.tile(self._ERRL, (nEr - 1, 1)).T).flatten()
         thisERRa = (numpy.tile(self._ERRa, (nEr - 1, 1)).T).flatten()
         thisy = (numpy.tile(y[0:-1], (nLz, 1))).flatten()
         mjr = multi.parallel_map(
             (
                 lambda x: self._aA.JR(
                     thisRL[x],
                     numpy.sqrt(
                         2.0
                         * (
                             thisERRa[x]
                             + thisy[x] * (thisERRL[x] - thisERRa[x])
                             - galpy.potential.evaluatePotentials(thisRL[x], 0.0, self._pot)
                         )
                         - thisLzs[x] ** 2.0 / thisRL[x] ** 2.0
                     ),
                     thisLzs[x] / thisRL[x],
                     0.0,
                     0.0,
                     **kwargs
                 )[0]
             ),
             range((nEr - 1) * nLz),
             numcores=numcores,
         )
         jr[:, 0:-1] = numpy.reshape(mjr, (nLz, nEr - 1))
         jrERRa[0:nLz] = jr[:, 0]
     else:
         for ii in range(nLz):
             for jj in range(nEr - 1):  # Last one is zero by construction
                 try:
                     jr[ii, jj] = self._aA.JR(
                         self._RL[ii],
                         numpy.sqrt(
                             2.0
                             * (
                                 self._ERRa[ii]
                                 + y[jj] * (self._ERRL[ii] - self._ERRa[ii])
                                 - galpy.potential.evaluatePotentials(self._RL[ii], 0.0, self._pot)
                             )
                             - self._Lzs[ii] ** 2.0 / self._RL[ii] ** 2.0
                         ),
                         self._Lzs[ii] / self._RL[ii],
                         0.0,
                         0.0,
                         **kwargs
                     )[0]
                 except UnboundError:
                     raise
                 if jj == 0:
                     jrERRa[ii] = jr[ii, jj]
     for ii in range(nLz):
         jr[ii, :] /= jrERRa[ii]
     # First interpolate Ez=Ezmax
     self._jr = jr
     self._jrERRaInterp = interpolate.InterpolatedUnivariateSpline(self._Lzs, numpy.log(jrERRa + 10.0 ** -5.0), k=3)
     self._jrInterp = interpolate.RectBivariateSpline(self._Lzs, y, jr, kx=3, ky=3, s=0.0)
     return None