def calc_effsel(args, options, sample=None): # Work-horse function to compute the effective selection function, # sample is a data sample of stars to consider for the (JK,Z) sampling # Setup selection function selectFile = '../savs/selfunc-nospdata.sav' if os.path.exists(selectFile): with open(selectFile, 'rb') as savefile: apo = pickle.load(savefile) else: # Setup selection function apo = apogee.select.apogeeSelect() # Delete these because they're big and we don't need them del apo._specdata del apo._photdata save_pickles(selectFile, apo) # Get the full data sample for the locations (need all locations where # stars could be observed, so the whole sample, not just the subsample # being analyzed) data = get_rcsample() locations = list(set(list(data['LOCATION_ID']))) # Load the dust map and setup the effective selection function if options.dmap.lower() == 'green15': dmap3d = mwdust.Green15(filter='2MASS H') elif options.dmap.lower() == 'marshall06': dmap3d = mwdust.Marshall06(filter='2MASS H') elif options.dmap.lower() == 'drimmel03': dmap3d = mwdust.Drimmel03(filter='2MASS H') elif options.dmap.lower() == 'sale14': dmap3d = mwdust.Sale14(filter='2MASS H') elif options.dmap.lower() == 'zero': dmap3d = mwdust.Zero(filter='2MASS H') # Sample the M_H distribution if options.samplemh: if sample is None: sample = data MH = sample['H0'] - sample['RC_DM'] MH = numpy.random.permutation(MH)[:1000] # do 1,000 max else: MH = -1.49 apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=dmap3d, MH=MH) # Distances at which to calculate the effective selection function distmods = numpy.linspace(options.dm_min, options.dm_max, options.ndm) ds = 10.**(distmods / 5 - 2.) # Now compute all selection functions out= multi.parallel_map((lambda x: _calc_effsel_onelocation(\ locations[x],apof,apo,ds)), range(len(locations)), numcores=numpy.amin([len(locations), multiprocessing.cpu_count(),options.multi])) # Save out out = numpy.array(out) save_pickles(args[0], locations, out, distmods, ds) return None
def stparas(input, dnumodel=0, bcmodel=0, dustmodel=0, dnucor=0, useav=0, plot=0): # IAU XXIX Resolution, Mamajek et al. (2015) r_sun = 6.957e10 gconst = 6.67408e-8 gm = 1.3271244e26 m_sun = gm / gconst rho_sun = m_sun / (4. / 3. * np.pi * r_sun**3) g_sun = gconst * m_sun / r_sun**2. # solar constants numaxsun = 3090. dnusun = 135.1 teffsun = 5777. Msun = 4.74 # NB this is fixed to MESA BCs! # assumed uncertainty in bolometric corrections err_bc = 0.02 # assumed uncertainty in extinction err_ext = 0.02 # load model if they're not passed on if (dnumodel == 0): dnumodel = asfgrid.Seism() if (bcmodel == 0): bcmodel = h5py.File('bcgrid.h5', 'r') if (dustmodel == 0.): dustmodel = mwdust.Green15() # object containing output values out = resdata() ## extinction coefficients extfactors = extinction() ######################################################################################## # case 1: input is parallax + colors ######################################################################################## if ((input.plx > 0.)): # only K-band for now teffgrid = np.array(bcmodel['teffgrid']) avgrid = np.array(bcmodel['avgrid']) interp = RegularGridInterpolator((np.array(bcmodel['teffgrid']),\ np.array(bcmodel['logggrid']),np.array(bcmodel['fehgrid']),\ np.array(bcmodel['avgrid'])),np.array(bcmodel['bc_k'])) ### Monte Carlo starts here # number of samples nsample = 1e5 # length scale for exp decreasing vol density prior in pc L = 1350. # get a rough maximum distance tempdis = 1. / input.plx tempdise = input.plxe / input.plx**2 maxds = tempdis + 5. * tempdise ds = np.arange(1., 10000, 1.) lh = np.exp((-1. / (2. * input.plxe**2)) * (input.plx - 1. / ds)**2) prior = (ds**2 / (2. * L**3.)) * np.exp(-ds / L) dis = lh * prior dis2 = dis / np.sum(dis) norm = dis2 / np.max(dis2) um = np.where((ds > tempdis) & (norm < 0.001))[0] if (len(um) > 0): maxds = np.min(ds[um]) else: maxds = 10000. print 'using max distance:', maxds ds = np.linspace(1., maxds, 10000) lh = (1./(np.sqrt(2.*np.pi)*input.plxe))*\ np.exp( (-1./(2.*input.plxe**2))*(input.plx-1./ds)**2) prior = (ds**2 / (2. * L**3.)) * np.exp(-ds / L) prior = np.zeros(len(lh)) + 1. dis = lh * prior dis2 = dis / np.sum(dis) # sample distances following the discrete distance posterior np.random.seed(seed=10) dsamp = np.random.choice(ds, p=dis2, size=nsample) equ = ephem.Equatorial(input.ra * np.pi / 180., input.dec * np.pi / 180., epoch=ephem.J2000) gal = ephem.Galactic(equ) lon_deg = gal.lon * 180. / np.pi lat_deg = gal.lat * 180. / np.pi avs = 3.1 * dustmodel(lon_deg, lat_deg, dsamp / 1000.) # NB the next line means that useav is not actually working yet # avs = np.zeros(len(dsamp))+useav ext = avs * extfactors.ak ext = 0. # already in BC if (input.teff == -99.): teff = casagrande(jkmag, 0.0) teffe = 100. else: teff = input.teff teffe = input.teffe np.random.seed(seed=11) teffsamp = teff + np.random.randn(nsample) * teffe map = input.kmag mape = input.kmage np.random.seed(seed=12) map_samp = map + np.random.randn(nsample) * mape absmag = -5. * np.log10(dsamp) - ext + map_samp + 5. if (input.teff < np.min(teffgrid)): return out if (input.teff > np.max(teffgrid)): return out #if (out.av > np.max(avgrid)): # return out #if (out.av < np.min(avgrid)): # return out if ((input.teff > -99.) & (input.logg > -99.) & (input.feh > -99.)): #bc = interp(np.array([input.teff,input.logg,input.feh,0.]))[0] arr = np.zeros((len(avs), 4)) arr[:, 0] = np.zeros(len(avs)) + input.teff arr[:, 1] = np.zeros(len(avs)) + input.logg arr[:, 2] = np.zeros(len(avs)) + input.feh arr[:, 3] = np.zeros(len(avs)) + avs um = np.where(arr[:, 3] < 0.)[0] arr[um, 3] = 0. #pdb.set_trace() bc = interp(arr) #pdb.set_trace() #pdb.set_trace() Mvbol = absmag + bc lum = 10**((Mvbol - Msun) / (-2.5)) t = teffsamp / teffsun rad = (lum * t**(-4.))**0.5 #pdb.set_trace() out.teff = input.teff out.teffe = input.teffe ''' out.lum=np.median(lum) out.lumep=np.percentile(lum,84.1)-out.lum out.lumem=out.lum-np.percentile(lum,15.9) out.rad=np.median(rad) out.radep=np.percentile(rad,84.1)-out.rad out.radem=out.rad-np.percentile(rad,15.9) out.dis=np.median(dsamp) out.disep=np.percentile(dsamp,84.1)-out.dis out.disem=out.dis-np.percentile(dsamp,15.9) ''' out.avs = np.median(avs) out.avsep = np.percentile(avs, 84.1) - out.avs out.avsem = out.avs - np.percentile(avs, 15.9) #pdb.set_trace() out.rad, out.radep, out.radem, radbn = getstat(rad) out.lum, out.lumep, out.lumem, lumbn = getstat(lum) out.dis, out.disep, out.disem, disbn = getstat(dsamp) #out.avs,out.avsep,out.avsem=getstat(avs) #pdb.set_trace() out.teff = input.teff out.teffep = input.teffe out.teffem = input.teffe out.logg = input.logg out.loggep = input.logge out.loggem = input.logge out.feh = input.feh out.fehep = input.fehe out.fehem = input.fehe if (plot == 1): plt.ion() plt.clf() plt.subplot(3, 2, 1) plt.hist(teffsamp, bins=100) plt.title('Teff') plt.subplot(3, 2, 2) plt.hist(lum, bins=lumbn) plt.title('Lum') plt.subplot(3, 2, 3) plt.hist(rad, bins=radbn) plt.title('Rad') plt.subplot(3, 2, 4) plt.hist(absmag, bins=100) plt.title('absmag') plt.subplot(3, 2, 5) plt.hist(dsamp, bins=disbn) plt.title('distance') plt.subplot(3, 2, 6) plt.hist(avs, bins=100) plt.title('Av') #pdb.set_trace() print ' ' print 'teff(K):', out.teff, '+/-', out.teffe print 'dis(pc):', out.dis, '+', out.disep, '-', out.disem print 'av(mag):', out.avs, '+', out.avsep, '-', out.avsem print 'rad(rsun):', out.rad, '+', out.radep, '-', out.radem print 'lum(lsun):', out.lum, '+', out.lumep, '-', out.lumem print '-----' #raw_input(':') #pdb.set_trace() ######################################################################################## # case 1: input is spectroscopy + seismology ######################################################################################## if ((input.dnu > -99.) & (input.teff > -99.)): # seismic logg, density, M and R from scaling relations; this is iterated, # since Dnu scaling relation correction depends on M dmass = 1. fdnu = 1. dnuo = input.dnu oldmass = 1.0 nit = 0. while (nit < 5): numaxn = input.numax / numaxsun numaxne = input.numaxe / numaxsun dnun = (dnuo / fdnu) / dnusun dnune = input.dnue / dnusun teffn = input.teff / teffsun teffne = input.teffe / teffsun out.rad = (numaxn) * (dnun)**(-2.) * np.sqrt(teffn) out.rade = np.sqrt( (input.numaxe/input.numax)**2. + \ 4.*(input.dnue/input.dnu)**2. + \ 0.25*(input.teffe/input.teff)**2.)*out.rad out.mass = out.rad**3. * (dnun)**2. out.masse = np.sqrt( 9.*(out.rade/out.rad)**2. + \ 4.*(input.dnue/input.dnu)**2. )*out.mass out.rho = rho_sun * (dnun**2.) out.rhoe = np.sqrt(4. * (input.dnue / input.dnu)**2.) * out.rho g = g_sun * numaxn * teffn**0.5 ge = np.sqrt ( (input.numaxe/input.numax)**2. + \ (0.5*input.teffe/input.teff)**2. ) * g out.logg = np.log10(g) out.logge = ge / (g * np.log(10.)) # Dnu scaling relation correction from Sharma et al. 2016 if (dnucor == 1): if (input.clump == 1): evstate = 2 else: evstate = 1 #pdb.set_trace() dnu, numax, fdnu = dnumodel.get_dnu_numax(evstate, input.feh, input.teff, out.mass, out.mass, out.logg, isfeh=True) #print out.mass,fdnu dmass = abs((oldmass - out.mass) / out.mass) oldmass = out.mass nit = nit + 1 print fdnu #pdb.set_trace() out.lum = out.rad**2. * teffn**4. out.lume = np.sqrt((2. * out.rade / out.rad)**2. + (4. * input.teffe / input.teff)**2.) * out.lum print ' ' print 'teff(K):', input.teff, '+/-', input.teffe print 'feh(dex):', input.feh, '+/-', input.fehe print 'logg(dex):', out.logg, '+/-', out.logge print 'rho(cgs):', out.rho, '+/-', out.rhoe print 'rad(rsun):', out.rad, '+/-', out.rade print 'mass(msun):', out.mass, '+/-', out.masse print 'lum(lsun):', out.lum, '+/-', out.lume print '-----' out.teff = input.teff out.teffep = input.teffe out.teffem = input.teffe out.feh = input.feh out.fehep = input.fehe out.fehem = input.fehe out.loggep = out.logge out.loggem = out.logge out.radep = out.rade out.radem = out.rade out.rhoep = out.rhoe out.rhoem = out.rhoe out.massep = out.masse out.massem = out.masse out.lumep = out.lume out.lumem = out.lume ddis = 1. ext = 0.0 err_ = 0.01 olddis = 100.0 # pick an apparent magnitude from input map = -99. if (input.vmag > -99.): map = input.vmag mape = input.vmage str = 'bc_v' avtoext = extfactors.av if (input.vtmag > -99.): map = input.vtmag mape = input.vtmage str = 'bc_vt' avtoext = extfactors.avt if (input.jmag > -99.): map = input.jmag mape = input.jmage str = 'bc_j' avtoext = extfactors.aj if (input.kmag > -99.): map = input.kmag mape = input.kmage str = 'bc_k' avtoext = extfactors.ak if (input.gamag > -99.): map = input.gamag mape = input.gamage str = 'bc_ga' avtoext = extfactors.aga # if apparent mag is given, calculate distance if (map > -99.): print 'using ' + str print 'using coords: ', input.ra, input.dec equ = ephem.Equatorial(input.ra * np.pi / 180., input.dec * np.pi / 180., epoch=ephem.J2000) gal = ephem.Galactic(equ) lon_deg = gal.lon * 180. / np.pi lat_deg = gal.lat * 180. / np.pi # iterated since BC depends on extinction nit = 0 while (nit < 5): if (nit == 0.): out.avs = 0.0 else: out.avs = 3.1 * dustmodel(lon_deg, lat_deg, out.dis / 1000.)[0] #print lon_deg,lat_deg,out.dis if (useav != 0.): out.avs = useav if (out.avs < 0.): out.avs = 0.0 ext = out.avs * avtoext # bolometric correction interpolated from MESA interp = RegularGridInterpolator((np.array(bcmodel['teffgrid']),\ np.array(bcmodel['logggrid']),np.array(bcmodel['fehgrid']),\ np.array(bcmodel['avgrid'])),np.array(bcmodel[str])) #pdb.set_trace() bc = interp( np.array([input.teff, out.logg, input.feh, out.avs]))[0] #bc = interp(np.array([input.teff,out.logg,input.feh,0.]))[0] Mvbol = -2.5 * (np.log10(out.lum)) + Msun Mvbole = np.sqrt( (-2.5 / (out.lum * np.log(10.)))**2 * out.lume**2) Mabs = Mvbol - bc Mabse = np.sqrt(Mvbole**2 + err_bc**2) ext = 0. # ext already applied in BC logplx = (Mabs - 5. - map + ext) / 5. logplxe = np.sqrt((Mabse / 5.)**2. + (mape / 5.)**2. + (err_ext / 5.)**2.) out.plx = 10.**logplx out.plxe = np.log(10) * 10.**logplx * logplxe out.dis = 1. / out.plx out.dise = out.plxe / out.plx**2. ddis = abs((olddis - out.dis) / out.dis) #print olddis,out.dis,ddis,ext olddis = out.dis nit = nit + 1 #print out.dis,out.avs #pdb.set_trace() print 'Av(mag):', out.avs print 'plx(mas):', out.plx * 1e3, '+/-', out.plxe * 1e3 print 'dis(pc):', out.dis, '+/-', out.dise out.disep = out.dise out.disem = out.dise out.mabs = Mabs return out
def plot_effsel_location(location, plotname): # Setup selection function selectFile = '../savs/selfunc-nospdata.sav' if os.path.exists(selectFile): with open(selectFile, 'rb') as savefile: apo = pickle.load(savefile) else: # Setup selection function apo = apogee.select.apogeeSelect() # Delete these because they're big and we don't need them del apo._specdata del apo._photdata save_pickles(selectFile, apo) effselFile = '../savs/effselfunc-%i.sav' % location if not os.path.exists(effselFile): # Distances at which to calculate the effective selection function distmods = numpy.linspace(7., 15.5, 301) ds = 10.**(distmods / 5 - 2.) # Setup default effective selection function do_samples = True gd = mwdust.Green15(filter='2MASS H', load_samples=do_samples) apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=gd) sf_default = apof(location, ds) # Also calculate for a sample of MH data = get_rcsample() MH = data['H0'] - data['RC_DM'] MH = numpy.random.permutation(MH)[:1000] sf_jkz = apof(location, ds, MH=MH) # Go through the samples sf_samples = numpy.zeros((20, len(ds))) if do_samples: for ii in range(20): # Swap in a sample for bestfit in the Green et al. (2015) dmap gd.substitute_sample(ii) apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=gd) sf_samples[ii] = apof(location, ds) zerodust = mwdust.Zero(filter='2MASS H') apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=zerodust) sf_zero = apof(location, ds) drimmel = mwdust.Drimmel03(filter='2MASS H') apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=drimmel) sf_drimmel = apof(location, ds) marshall = mwdust.Marshall06(filter='2MASS H') apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=marshall) try: sf_marshall = apof(location, ds) except IndexError: sf_marshall = -numpy.ones_like(ds) sale = mwdust.Sale14(filter='2MASS H') apof = apogee.select.apogeeEffectiveSelect(apo, dmap3d=sale) try: sf_sale = apof(location, ds) except (TypeError, ValueError): sf_sale = -numpy.ones_like(ds) save_pickles(effselFile, distmods, sf_default, sf_jkz, sf_samples, sf_zero, sf_drimmel, sf_marshall, sf_sale) else: with open(effselFile, 'rb') as savefile: distmods = pickle.load(savefile) sf_default = pickle.load(savefile) sf_jkz = pickle.load(savefile) sf_samples = pickle.load(savefile) sf_zero = pickle.load(savefile) sf_drimmel = pickle.load(savefile) sf_marshall = pickle.load(savefile) sf_sale = pickle.load(savefile) # Now plot bovy_plot.bovy_print(fig_height=3.) rc('text.latex', preamble=r'\usepackage{amsmath}' + '\n' + r'\usepackage{amssymb}' + '\n' + r'\usepackage{yfonts}') if _PLOTDIST: distmods = 10.**(distmods / 5 - 2.) xrange = [0., 12.] xlabel = r'$D\,(\mathrm{kpc})$' ylabel = r'$\textswab{S}(\mathrm{location},D)$' else: xrange = [7., 15.8], xlabel = r'$\mathrm{distance\ modulus}\ \mu$' ylabel = r'$\textswab{S}(\mathrm{location},\mu)$' line_default = bovy_plot.bovy_plot(distmods, sf_default, 'b-', lw=_LW, zorder=12, xrange=xrange, xlabel=xlabel, yrange=[0., 1.2 * numpy.amax(sf_zero)], ylabel=ylabel) pyplot.fill_between(distmods, sf_default-_EXAGGERATE_ERRORS\ *(sf_default-numpy.amin(sf_samples,axis=0)), sf_default+_EXAGGERATE_ERRORS\ *(numpy.amax(sf_samples,axis=0)-sf_default), color='0.65',zorder=0) line_jkz = bovy_plot.bovy_plot(distmods, sf_jkz, 'g-.', lw=2. * _LW, overplot=True, zorder=13) line_zero = bovy_plot.bovy_plot(distmods, sf_zero, 'k--', lw=_LW, overplot=True, zorder=7) line_drimmel = bovy_plot.bovy_plot(distmods, sf_drimmel, '-', color='gold', lw=_LW, overplot=True, zorder=8) line_marshall = bovy_plot.bovy_plot(distmods, sf_marshall, 'r-', lw=_LW, overplot=True, zorder=9) line_sale = bovy_plot.bovy_plot(distmods, sf_sale, 'c-', lw=_LW, overplot=True, zorder=10) if location == 4378: pyplot.legend( (line_default[0], line_jkz[0], line_zero[0]), (r'$\mathrm{Green\ et\ al.\ (2015)}$', r'$\mathrm{Green\ et\ al.} + p(M_H)$', r'$\mathrm{zero\ extinction}$'), loc='lower right', #bbox_to_anchor=(.91,.375), numpoints=8, prop={'size': 14}, frameon=False) elif location == 4312: pyplot.legend( (line_sale[0], line_marshall[0], line_drimmel[0]), (r'$\mathrm{Sale\ et\ al.\ (2014)}$', r'$\mathrm{Marshall\ et\ al.\ (2006)}$', r'$\mathrm{Drimmel\ et\ al.\ (2003)}$'), loc='lower right', #bbox_to_anchor=(.91,.375), numpoints=8, prop={'size': 14}, frameon=False) # Label lcen, bcen = apo.glonGlat(location) if numpy.fabs(bcen) < 0.1: bcen = 0. bovy_plot.bovy_text(r'$(l,b) = (%.1f,%.1f)$' % (lcen, bcen), top_right=True, size=16.) bovy_plot.bovy_end_print(plotname) return None
def plot_dustnearplane(plotname, green=False): if green: savefile = _SAVEFILE_GREEN else: savefile = _SAVEFILE_MARSHALL # Grid ls = numpy.linspace(15., 70., _NL) bs = numpy.linspace(-2., 2., _NB) if not os.path.exists(savefile): # Setup dust map if green: dmap = mwdust.Green15(filter='2MASS H') else: dmap = mwdust.Marshall06(filter='2MASS H') plotthis = numpy.empty((_NL, _NB)) rad = 0.5 # deg for jj in range(_NB): print jj for ii in range(_NL): pa, ah = dmap.dust_vals_disk(ls[ii], bs[jj], 7., rad) plotthis[ii, jj] = numpy.sum(pa * ah) / numpy.sum(pa) save_pickles(savefile, plotthis) else: with open(savefile, 'rb') as f: plotthis = pickle.load(f) # Now plot bovy_plot.bovy_print(fig_width=8.4, fig_height=4.) bovy_plot.bovy_dens2d( plotthis[::-1].T, origin='lower', cmap=cm.coolwarm, # interpolation='nearest', colorbar=True, shrink=0.45, vmin=0., vmax=2. - 0.75 * green, aspect=3., xrange=[ls[-1] + (ls[1] - ls[0]) / 2., ls[0] - (ls[1] - ls[0]) / 2.], yrange=[bs[0] - (bs[1] - bs[0]) / 2., bs[-1] + (bs[1] - bs[0]) / 2.], xlabel=r'$l\,\mathrm{(deg)}$', ylabel=r'$b\,\mathrm{(deg)}$', zlabel=r'$A_H\,(\mathrm{mag})$', zorder=0) bovy_plot.bovy_text(r'$D = 7\,\mathrm{kpc}$', top_left=True, color='w', size=16.) # Overplot fields glons = [34., 64., 27.] glats = [0., 0., 0.] colors = ['w', 'w', 'y'] xs = numpy.linspace(-1.5, 1.5, 201) ys = numpy.sqrt(1.5**2. - xs**2.) for glon, glat, c in zip(glons, glats, colors): bovy_plot.bovy_plot(xs + glon, ys + glat, '-', overplot=True, zorder=1, lw=2., color=c) bovy_plot.bovy_plot(xs + glon, -ys + glat, '-', overplot=True, zorder=1, lw=2., color=c) bovy_plot.bovy_end_print(plotname) return None
def plot_ah_location(location, plotname): # Setup selection function selectFile = '../savs/selfunc-nospdata.sav' if os.path.exists(selectFile): with open(selectFile, 'rb') as savefile: apo = pickle.load(savefile) else: # Setup selection function apo = apogee.select.apogeeSelect() # Delete these because they're big and we don't need them del apo._specdata del apo._photdata save_pickles(selectFile, apo) glon, glat = apo.glonGlat(location) glon = glon[0] glat = glat[0] ahFile = '../savs/ah-%i.sav' % location if not os.path.exists(ahFile): # Distances at which to calculate the extinction distmods = numpy.linspace(7., 15.5, 301) ds = 10.**(distmods / 5 - 2.) # Setup Green et al. (2015) dust map gd = mwdust.Green15(filter='2MASS H') pa, ah = gd.dust_vals_disk(glon, glat, ds, apo.radius(location)) meanah_default = numpy.sum(numpy.tile(pa, (len(ds), 1)).T * ah, axis=0) / numpy.sum(pa) stdah_default= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\ *ah**2.,axis=0)\ /numpy.sum(pa)-meanah_default**2.) # Marshall et al. (2006) marshall = mwdust.Marshall06(filter='2MASS H') try: pa, ah = marshall.dust_vals_disk(glon, glat, ds, apo.radius(location)) except IndexError: meanah_marshall = -numpy.ones_like(ds) stdah_marshall = -numpy.ones_like(ds) else: meanah_marshall = numpy.sum(numpy.tile(pa, (len(ds), 1)).T * ah, axis=0) / numpy.sum(pa) stdah_marshall= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\ *ah**2.,axis=0)\ /numpy.sum(pa)-meanah_marshall**2.) if True: # Drimmel et al. (2003) drimmel = mwdust.Drimmel03(filter='2MASS H') pa, ah = drimmel.dust_vals_disk(glon, glat, ds, apo.radius(location)) meanah_drimmel = numpy.sum(numpy.tile(pa, (len(ds), 1)).T * ah, axis=0) / numpy.sum(pa) stdah_drimmel= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\ *ah**2.,axis=0)\ /numpy.sum(pa)-meanah_drimmel**2.) else: meanah_drimmel = -numpy.ones_like(ds) stdah_drimmel = -numpy.ones_like(ds) if True: # Sale et al. (2014) sale = mwdust.Sale14(filter='2MASS H') try: pa, ah = sale.dust_vals_disk(glon, glat, ds, apo.radius(location)) meanah_sale = numpy.sum(numpy.tile(pa, (len(ds), 1)).T * ah, axis=0) / numpy.sum(pa) except (TypeError, ValueError): meanah_sale = -numpy.ones_like(ds) stdah_sale = -numpy.ones_like(ds) else: stdah_sale= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\ *ah**2.,axis=0)\ /numpy.sum(pa)-meanah_sale**2.) else: meanah_sale = -numpy.ones_like(ds) stdah_sale = -numpy.ones_like(ds) save_pickles(ahFile, distmods, meanah_default, stdah_default, meanah_marshall, stdah_marshall, meanah_drimmel, stdah_drimmel, meanah_sale, stdah_sale) else: with open(ahFile, 'rb') as savefile: distmods = pickle.load(savefile) meanah_default = pickle.load(savefile) stdah_default = pickle.load(savefile) meanah_marshall = pickle.load(savefile) stdah_marshall = pickle.load(savefile) meanah_drimmel = pickle.load(savefile) stdah_drimmel = pickle.load(savefile) meanah_sale = pickle.load(savefile) stdah_sale = pickle.load(savefile) # Now plot bovy_plot.bovy_print(fig_height=3.) if _PLOTDIST: distmods = 10.**(distmods / 5 - 2.) xrange = [0., 12.] xlabel = r'$D\,(\mathrm{kpc})$' else: xrange = [7., 15.8], xlabel = r'$\mathrm{distance\ modulus}\ \mu$' ylabel = r'$A_H$' yrange = [ 0., 1.2 * numpy.amax( numpy.vstack( (meanah_default + stdah_default, meanah_marshall + stdah_marshall, meanah_drimmel + stdah_drimmel, meanah_sale + stdah_sale))) ] line_default = bovy_plot.bovy_plot(distmods, meanah_default, 'b-', lw=_LW, zorder=12, xrange=xrange, xlabel=xlabel, yrange=yrange, ylabel=ylabel) pyplot.fill_between(distmods, meanah_default - stdah_default, meanah_default + stdah_default, hatch='/', facecolor=(0, 0, 0, 0), color='b', lw=0.25, zorder=4) line_marshall = bovy_plot.bovy_plot(distmods, meanah_marshall, 'r-', lw=_LW, overplot=True, zorder=8) pyplot.fill_between(distmods, meanah_marshall - stdah_marshall, meanah_marshall + stdah_marshall, hatch='\\', facecolor=(0, 0, 0, 0), color='r', lw=0.25, zorder=2) line_drimmel = bovy_plot.bovy_plot(distmods, meanah_drimmel, '-', lw=_LW, color='gold', overplot=True, zorder=7) pyplot.fill_between(distmods, meanah_drimmel - stdah_drimmel, meanah_drimmel + stdah_drimmel, hatch='///', facecolor=(0, 0, 0, 0), color='gold', lw=0.25, zorder=1) line_sale = bovy_plot.bovy_plot(distmods, meanah_sale, '-', lw=_LW, color='c', overplot=True, zorder=9) pyplot.fill_between(distmods, meanah_sale - stdah_sale, meanah_sale + stdah_sale, hatch='//', facecolor=(0, 0, 0, 0), color='c', lw=0.25, zorder=3) if True: data = get_rcsample() data = data[data['LOCATION_ID'] == location] bovy_plot.bovy_plot(data['RC_DIST'], data['AK_TARG'] * 1.55, 'ko', zorder=20, overplot=True, ms=2.) if location == 4318: pyplot.legend( (line_default[0], line_sale[0]), (r'$\mathrm{Green\ et\ al.\ (2015)}$', r'$\mathrm{Sale\ et\ al.\ (2014)}$'), loc='lower right', #bbox_to_anchor=(.91,.375), numpoints=8, prop={'size': 14}, frameon=False) elif location == 4242: pyplot.legend( (line_marshall[0], line_drimmel[0]), (r'$\mathrm{Marshall\ et\ al.\ (2006)}$', r'$\mathrm{Drimmel\ et\ al.\ (2003)}$'), loc='lower right', #bbox_to_anchor=(.91,.375), numpoints=8, prop={'size': 14}, frameon=False) # Label lcen, bcen = apo.glonGlat(location) if numpy.fabs(bcen) < 0.1: bcen = 0. bovy_plot.bovy_text(r'$(l,b) = (%.1f,%.1f)$' % (lcen, bcen), top_right=True, size=16.) bovy_plot.bovy_end_print(plotname, dpi=300, bbox_extra_artists=pyplot.gca().get_children(), bbox_inches='tight') return None
def generate(locations, type='exp', sample='lowlow', extmap='green15', nls=101, nmock=1000, H0=-1.49, _dmapg15=None, ncpu=1): """ NAME: generate PURPOSE: generate mock data following a given density INPUT: locations - locations to be included in the sample type= ('exp') type of density profile to sample from sample= ('lowlow') for selecting mock parameters extmap= ('green15') extinction map to use ('marshall06' and others use Green15 to fill in unobserved regions) nls= (101) number of longitude bins to use for each field nmock= (1000) number of mock data points to generate H0= (-1.49) absolute magnitude (can be array w/ sampling spread) ncpu= (1) number of cpus to use to compute the probability OUTPUT: mockdata recarray with tags 'RC_GALR_H', 'RC_GALPHI_H', 'RC_GALZ_H' HISTORY: 2015-04-03 - Written - Bovy (IAS) """ if isinstance(H0, float): H0 = [H0] # Setup the density function and its initial parameters rdensfunc = fitDens._setup_densfunc(type) mockparams = _setup_mockparams_densfunc(type, sample) densfunc = lambda x, y, z: rdensfunc(x, y, z, params=mockparams) # Setup the extinction map global dmap global dmapg15 if _dmapg15 is None: dmapg15 = mwdust.Green15(filter='2MASS H') else: dmapg15 = _dmapg15 if isinstance(extmap, mwdust.DustMap3D.DustMap3D): dmap = extmap elif extmap.lower() == 'green15': dmap = dmapg15 elif extmap.lower() == 'marshall06': dmap = mwdust.Marshall06(filter='2MASS H') elif extmap.lower() == 'sale14': dmap = mwdust.Sale14(filter='2MASS H') elif extmap.lower() == 'drimmel03': dmap = mwdust.Drimmel03(filter='2MASS H') # Use brute-force rejection sampling to make no approximations # First need to estimate the max probability to use in rejection; # Loop through all locations and compute sampling probability on grid in # (l,b,D) # First restore the APOGEE selection function (assumed pre-computed) global apo selectFile = '../savs/selfunc-nospdata.sav' if os.path.exists(selectFile): with open(selectFile, 'rb') as savefile: apo = pickle.load(savefile) # Now compute the necessary coordinate transformations and evaluate the # maximum probability distmods = numpy.linspace(7., 15.5, 301) ds = 10.**(distmods / 5 - 2.) nbs = nls lnprobs = numpy.empty((len(locations), len(distmods), nbs, nls)) radii = [] lcens, bcens = [], [] lnprobs = multi.parallel_map(lambda x: _calc_lnprob( locations[x], nls, nbs, ds, distmods, H0, densfunc), range(len(locations)), numcores=numpy.amin([ len(locations), multiprocessing.cpu_count(), ncpu ])) lnprobs = numpy.array(lnprobs) for ll, loc in enumerate(locations): lcen, bcen = apo.glonGlat(loc) rad = apo.radius(loc) radii.append(rad) # save for later lcens.append(lcen[0]) bcens.append(bcen[0]) maxp = (numpy.exp(numpy.nanmax(lnprobs)) - 10.**-8.) * 1.1 # Just to be sure # Now generate mock data using rejection sampling nout = 0 arlocations = numpy.array(locations) arradii = numpy.array(radii) arlcens = numpy.array(lcens) arbcens = numpy.array(bcens) out = numpy.recarray((nmock, ), dtype=[('RC_DIST_H', 'f8'), ('RC_DM_H', 'f8'), ('RC_GALR_H', 'f8'), ('RC_GALPHI_H', 'f8'), ('RC_GALZ_H', 'f8')]) while nout < nmock: nnew = 2 * (nmock - nout) # nnew new locations locIndx = numpy.floor( numpy.random.uniform(size=nnew) * len(locations)).astype('int') newlocations = arlocations[locIndx] # Point within these locations newds_coord = numpy.random.uniform(size=nnew) newds= 10.**((newds_coord*(numpy.amax(distmods)-numpy.amin(distmods))\ +numpy.amin(distmods))/5.-2.) newdls_coord = numpy.random.uniform(size=nnew) newdls= newdls_coord*2.*arradii[locIndx]\ -arradii[locIndx] newdbs_coord = numpy.random.uniform(size=nnew) newdbs= newdbs_coord*2.*arradii[locIndx]\ -arradii[locIndx] newr2s = newdls**2. + newdbs**2. keepIndx = newr2s < arradii[locIndx]**2. newlocations = newlocations[keepIndx] newds_coord = newds_coord[keepIndx] newdls_coord = newdls_coord[keepIndx] newdbs_coord = newdbs_coord[keepIndx] newds = newds[keepIndx] newdls = newdls[keepIndx] newdbs = newdbs[keepIndx] newls = newdls + arlcens[locIndx][keepIndx] newbs = newdbs + arbcens[locIndx][keepIndx] # Reject? tps = numpy.zeros_like(newds) for nloc in list(set(newlocations)): lindx = newlocations == nloc pindx = arlocations == nloc coord = numpy.array([ newds_coord[lindx] * (len(distmods) - 1.), newdbs_coord[lindx] * (nbs - 1.), newdls_coord[lindx] * (nls - 1.) ]) tps[lindx]= \ numpy.exp(ndimage.interpolation.map_coordinates(\ lnprobs[pindx][0], coord,cval=-10., order=1))-10.**-8. XYZ = bovy_coords.lbd_to_XYZ(newls, newbs, newds, degree=True) Rphiz = bovy_coords.XYZ_to_galcencyl(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=define_rcsample._R0, Ysun=0., Zsun=define_rcsample._Z0) testp = numpy.random.uniform(size=len(newds)) * maxp keepIndx = tps > testp if numpy.sum(keepIndx) > nmock - nout: rangeIndx = numpy.zeros(len(keepIndx), dtype='int') rangeIndx[keepIndx] = numpy.arange(numpy.sum(keepIndx)) keepIndx *= (rangeIndx < nmock - nout) out['RC_DIST_H'][nout:nout + numpy.sum(keepIndx)] = newds[keepIndx] out['RC_DM_H'][nout:nout+numpy.sum(keepIndx)]= newds_coord[keepIndx]*(numpy.amax(distmods)-numpy.amin(distmods))\ +numpy.amin(distmods) out['RC_GALR_H'][nout:nout + numpy.sum(keepIndx)] = Rphiz[0][keepIndx] out['RC_GALPHI_H'][nout:nout + numpy.sum(keepIndx)] = Rphiz[1][keepIndx] out['RC_GALZ_H'][nout:nout + numpy.sum(keepIndx)] = Rphiz[2][keepIndx] nout = nout + numpy.sum(keepIndx) return (out, lnprobs)
import matplotlib.pyplot as plt import mwdust #import healpy as hp #import time from pylab import * from matplotlib.colors import LogNorm from mpl_toolkits.axes_grid1.inset_locator import inset_axes #mpl.rcParams['pdf.fonttype'] = 42 #mpl.rcParams['ps.fonttype'] = 42 #mpl.rcParams['text.usetex'] = True #marshall = mwdust.Marshall06(sf10=True) drimmel = mwdust.Drimmel03(sf10=True) green = mwdust.Green15(sf10=True) sale = mwdust.Sale14(sf10=True) zero = mwdust.Zero(sf10=True) #sfd = mwdust.SFD(sf10=True) #combined = mwdust.Combined15(sf10=True) #D = np.array([0.25,0.5,1.,2.,3.,4.,5.,6.]) Ndist = 1000 D = np.linspace(0.05,10.,Ndist) L = 205.09 # 54.7 B = -0.93 #0.08 f = open("Green15.dat", "w") for i in range(Ndist): f.write("%.15E %.15E\n"%(D[i],green(L,B,D)[i])) f.close()