def myloglike(cube, ndim, nparams): tmp_profs = Profiles(gp.pops, gp.nipol) off = 0 rho_param = np.array(cube[off:off+gp.nepol]) if gprio.check_nr(rho_param[2:-1]): print('dn/dr too big!') return gh.err(0.7) tmp_rho = phys.rho(gp.xepol, rho_param) if(gprio.check_rho(tmp_rho)): print('rho slope error') return gh.err(1.) tmp_profs.set_rho(tmp_rho[:gp.nipol]) tmp_profs.set_M(rho_SUM_Mr(gp.xepol, tmp_rho)[:gp.nipol]) # [munit,3D] # TODO: mass is set at binmax, not rbin! # implement integration routine working with density function # based on density parametrization # to give mass below rbin # TODO: implement above function as gl_project.rho_INT_Mr() off += gp.nepol nuparstore = [] for pop in np.arange(gp.pops)+1: nu_param = cube[off:off+gp.nepol] nuparstore.append(nu_param) tmp_nu = phys.rho(gp.xepol, nu_param) # [1], [pc] # if gprio.check_nu(tmp_nu): # print('nu error') # return err/2. if gp.bprior and gprio.check_bprior(tmp_rho, tmp_nu): print('bprior error') return gh.err(1.5) tmp_profs.set_nu(pop, tmp_nu[:gp.nipol]) # [munit/pc^3] off += gp.nepol beta_param = np.array(cube[off:off+gp.nbeta]) tmp_beta = phys.beta(gp.xipol, beta_param) if gprio.check_beta(tmp_beta): print('beta error') return gh.err(2.) tmp_profs.set_beta(pop, tmp_beta) off += gp.nbeta try: # beta_param = np.array([0.,0.]) sig, kap = phys.sig_kap_los(gp.xepol, pop, rho_param, nu_param, beta_param) # sig and kap already are on data radii only, so no extension by 3 bins here except Exception as detail: return gh.err(3.) tmp_profs.set_sig_kap(pop, sig, kap) # determine log likelihood (*not* reduced chi2) chi2 = gc.calc_chi2(tmp_profs, nuparstore) # print('found log likelihood = ', -chi2/2.) return -chi2/2. # from likelihood L = exp(-\chi^2/2), want log of that
def rho_param_INT_Rho(r0, rhoparam): # use splines on variable transformed integral # \Sigma(R) = \int_{r=R}^{R=\infty} \rho(r) d \sqrt(r^2-R^2) # gh.checknan(rhoparam, 'rho_param_INT_Rho') xmin = r0[0]/1e4 r0left = np.array([xmin, r0[0]*0.25, r0[0]*0.50, r0[0]*0.75]) r0nu = np.hstack([r0left, r0]) rhonu = phys.rho(r0nu, rhoparam) Rho = np.zeros(len(r0nu)-gp.nexp) for i in range(len(r0nu)-gp.nexp): xnew = np.sqrt(r0nu[i:]**2-r0nu[i]**2) # [lunit] ynew = 2.*rhonu[i:] # power-law extension to infinity. TODO: include in Rho[i] below C = gh.quadinflog(xnew[-gp.nexp:], ynew[-gp.nexp:], xnew[-1], np.inf) # tcknu = splrep(xnew, ynew, k=3) # interpolation in real space, not log space # problem: splint below could give negative values # reason: for high radii (high i), have a spline that goes negative # workaround: multiply by const/add const to keep spline positive @ all times # or set to log (but then integral is not straightforward # Rho[i] = splint(0., xnew[-1], tcknu) + C Rho[i] = gh.quadinfloglog(xnew[1:], ynew[1:], xmin, xnew[-1]) + C gh.checkpositive(Rho, 'Rho in rho_param_INT_Rho') return Rho[4:] # @r0 (r0nu without r0left)
def physical(r0, prof, pop, tmp_rho, tmp_nu, tmp_beta): if prof == "rho": tmp_prof = phys.rho(r0, tmp_rho) elif prof == 'nr': tmp_prof = tmp_rho[1:] elif prof == "nu": tmp_prof = rho_INT_Rho(r0, phys.rho(r0, tmp_nu)) elif prof == "betastar": tmp_prof = phys.mapping_beta_star_poly(r0, tmp_beta) elif prof == "beta": tmp_prof = phys.beta(r0, tmp_beta) elif prof == "sig": tmp_sig, tmp_kap = phys.sig_kap_los(r0, pop, tmp_rho, tmp_nu, tmp_beta) tmp_prof = tmp_sig elif prof == "kap": tmp_sig, tmp_kap = phys.sig_kap_los(r0, pop, tmp_rho, tmp_nu, tmp_beta) tmp_prof = tmp_kap return tmp_prof
def ant_sigkaplos2surf(r0, beta_param, rho_param, nu_param): # TODO: check all values in ()^2 and ()^4 are >=0 minval = 1.e-30 r0nu = introduce_points_in_between(r0) rhonu = phys.rho(r0nu, rho_param) nunu = phys.rho(r0nu, nu_param) betanu = phys.beta(r0nu, beta_param) # calculate intbeta from beta approx directly idnu = ant_intbeta(r0nu, beta_param) # integrate enclosed 3D mass from 3D density r0tmp = np.hstack([0.,r0nu]) rhoint = 4.*np.pi*r0nu**2*rhonu # add point to avoid 0.0 in Mrnu(r0nu[0]) rhotmp = np.hstack([0.,rhoint]) tck1 = splrep(r0tmp, rhotmp, k=3, s=0.) # not necessarily monotonic Mrnu = np.zeros(len(r0nu)) # work in refined model for i in range(len(r0nu)): # get Mrnu Mrnu[i] = splint(0., r0nu[i], tck1) gh.checkpositive(Mrnu, 'Mrnu') # (sigr2, 3D) * nu/exp(-idnu) xint = r0nu # [pc] yint = gp.G1 * Mrnu / r0nu**2 # [1/pc (km/s)^2] yint *= nunu # [munit/pc^4 (km/s)^2] yint *= np.exp(idnu) # [munit/pc^4 (km/s)^2] gh.checkpositive(yint, 'yint sigr2') # use quadinflog or quadinfloglog here sigr2nu = np.zeros(len(r0nu)) for i in range(len(r0nu)): sigr2nu[i] = np.exp(-idnu[i])/nunu[i]*gh.quadinflog(xint, yint, r0nu[i], np.inf) # project back to LOS values # sigl2sold = np.zeros(len(r0nu)-gp.nexp) sigl2s = np.zeros(len(r0nu)-gp.nexp) dropoffintold = 1.e30 for i in range(len(r0nu)-gp.nexp): # get sig_los^2 xnew = np.sqrt(r0nu[i:]**2-r0nu[i]**2) # [pc] ynew = 2.*(1-betanu[i]*(r0nu[i]**2)/(r0nu[i:]**2)) ynew *= nunu[i:] * sigr2nu[i:] gh.checkpositive(ynew, 'ynew in sigl2s') # is hit several times.. # yscale = 10.**(1.-min(np.log10(ynew[1:]))) # ynew *= yscale # gh.checkpositive(ynew, 'ynew sigl2s') tcknu = splrep(xnew, ynew, k=1) # interpolation in real space for int # power-law approximation from last three bins to infinity # tckex = splrep(xnew[-3:], np.log(ynew[-3:]),k=1,s=1.0) # fine # invexp = lambda x: np.exp(splev(x,tckex,der=0)) # C = quad(invexp,xnew[-1],np.inf)[0] # C = max(0.,gh.quadinflog(xnew[-2:],ynew[-2:],xnew[-1],np.inf)) # sigl2sold[i] = splint(xnew[0], xnew[-1], tcknu) + C sigl2s[i] = gh.quadinflog(xnew[1:], ynew[1:], xnew[0], np.inf) # sigl2s[i] /= yscale # TODO: for last 3 bins, up to factor 2 off # if min(sigl2s)<0.: # pdb.set_trace() gh.checkpositive(sigl2s, 'sigl2s') # derefine on radii of the input vector tck = splrep(r0nu[:-gp.nexp], np.log(sigl2s), k=3, s=0.) sigl2s_out = np.exp(splev(r0, tck)) gh.checkpositive(sigl2s_out, 'sigl2s_out') if not gp.usekappa: # print('not using kappa') return sigl2s_out, np.ones(len(sigl2s_out)) # for the following: enabled calculation of kappa # TODO: include another set of anisotropy parameters beta_' # kappa_r^4 kapr4nu = np.ones(len(r0nu)-gp.nexp) xint = r0nu # [pc] yint = gp.G1 * Mrnu/r0nu**2 # [1/pc (km/s)^2] yint *= nunu # [munit/pc^4 (km/s)^2] yint *= sigr2nu # [munit/pc^4 (km/s)^4 yint *= np.exp(idnu) # [munit/pc^4 (km/s)^4] gh.checkpositive(yint, 'yint in kappa_r^4') yscale = 10.**(1.-min(np.log10(yint[1:]))) yint *= yscale # power-law extrapolation to infinity C = max(0., gh.quadinflog(xint[-3:], yint[-3:], r0nu[-1], np.inf)) # tckexp = splrep(xint[-3:],np.log(yint[-3:]),k=1,s=0.) # fine, exact interpolation # invexp = lambda x: np.exp(splev(x,tckexp,der=0)) # C = quad(invexp,r0nu[-1],np.inf)[0] tcknu = splrep(xint, yint, k=3) # interpolation in real space # TODO: for i in range(len(r0nu)-gp.nexp): # integrate from minimal radius to infinity kapr4nu[i] = 3.*(np.exp(-idnu[i])/nunu[i]) * \ (splint(r0nu[i], r0nu[-1], tcknu) + C) # [(km/s)^4] kapr4nu /= yscale gh.checkpositive(kapr4nu, 'kapr4nu in kappa_r^4') tcke = splrep(r0nu[:-gp.nexp], np.log(kapr4nu), k=3) kapr4ext = np.exp(splev(r0ext, tcke)) kapr4nu = np.hstack([kapr4nu, kapr4ext]) gh.checkpositive(kapr4nu, 'kapr4nu in extended kappa_r^4') tckbet = splrep(r0nu, betanu) dbetanudr = splev(r0nu, tckbet, der=1) gh.checknan(dbetanudr, 'dbetanudr in kappa_r^4') # kappa^4_los*surfdensity kapl4s = np.zeros(len(r0nu)-gp.nexp) # gpl.start(); gpl.yscale('linear') for i in range(len(r0nu)-gp.nexp): xnew = np.sqrt(r0nu[i:]**2-r0nu[i]**2) # [pc] ynew = g(r0nu[i:], r0nu[i], betanu[i:], dbetanudr[i:]) # [1] ynew *= nunu[i:] * kapr4nu[i:] # [TODO] # TODO: ynew could go negative here.. fine? #gpl.plot(xnew, ynew) #gh.checkpositive(ynew, 'ynew in kapl4s') #yscale = 10.**(1.-min(np.log10(ynew[1:]))) #ynew *= yscale # gpl.plot(xnew,ynew) C = max(0., gh.quadinflog(xnew[-3:], ynew[-3:], xnew[-1], np.inf)) tcknu = splrep(xnew,ynew) # not s=0.1, this sometimes gives negative entries after int kapl4s[i] = 2. * (splint(0., xnew[-1], tcknu) + C) #kapl4s[i] /= yscale # print('ynew = ',ynew,', kapl4s =', kapl4s[i]) # TODO: sometimes the last value of kapl4s is nan: why? gh.checkpositive(kapl4s, 'kapl4s in kappa_r^4') # project kappa4_los as well # only use middle values to approximate, without errors in center and far tck = splrep(r0nu[4:-gp.nexp], kapl4s[4:], k=3) # s=0. kapl4s_out = np.exp(splev(r0, tck)) gh.checkpositive(kapl4s_out, 'kapl4s_out in kappa_r^4') return sigl2s_out, kapl4s_out
def get_nu(self, pop): off = nu_offset(pop) return phys.rho(gp.xipol, self.cube[off:off+gp.nipol])
def geom_loglike(cube, ndim, nparams, gp): tmp_profs = Profiles(gp.ntracer_pops, gp.nbins)#, gp.nrhonu, gp.nbaryon_pops, gp.nbaryon_params) #Normalisation constant C for sigz calculation off = 0 offstep = 1 norm = cube[off] off += offstep #Dark Matter rho parameters (rho_C, kz_C, kz_vector, kz_LS) offstep = gp.nrhonu + 1 rho_DM_params = np.array(cube[off:off+offstep]) rho_DM_C = rho_DM_params[0] #rho_C kz_rho_DM_allz = rho_DM_params[1:] #kz for rho across all z points [0, bin_centres, LS] tmp_rho_DM_allz = phys.rho(gp.z_all_pts, kz_rho_DM_allz, rho_DM_C) #outputs rho across all points tmp_profs.kz_rho_DM_C = kz_rho_DM_allz[0] tmp_profs.set_prof('kz_rho_DM_vec', kz_rho_DM_allz[1:-1], 0, gp) tmp_profs.kz_rho_DM_LS = kz_rho_DM_allz[-1] tmp_profs.rho_DM_C = tmp_rho_DM_allz[0] tmp_profs.set_prof('rho_DM_vec', tmp_rho_DM_allz[1:-1], 0, gp) tmp_profs.rho_DM_LS = tmp_rho_DM_allz[-1] off += offstep #Baryons for bary_pop in range(0, gp.nbaryon_pops): offstep = gp.nbaryon_params bary_params = np.array(cube[off:off+offstep]) off += offstep #Tracer params, nu_C, kz_nu_C, kz_nu_vector, kz_nu_LS for tracer_pop in range(0, gp.ntracer_pops): offstep = gp.nrhonu + 1 tracer_params = np.array(cube[off:off+offstep]) nu_C = tracer_params[0] kz_nu_allz = tracer_params[1:] #kz for rho across all z points [0, bin_centres, LS] tmp_nu_allz = phys.rho(gp.z_all_pts, kz_nu_allz, nu_C) #outputs nu across all z points tmp_profs.kz_nu_C = kz_nu_allz[0] tmp_profs.set_prof('kz_nu_vec', kz_nu_allz[1:-1], 0, gp) tmp_profs.kz_nu_LS = kz_nu_allz[-1] tmp_profs.nu_C = tmp_nu_allz[0] tmp_profs.set_prof('nu_vec', tmp_nu_allz[1:-1], tracer_pop, gp) tmp_profs.nu_LS = tmp_nu_allz[-1] off += offstep if off != gp.ndim: gh.LOG(1,'wrong subscripts in gl_class_cube') raise Exception('wrong subscripts in gl_class_cube') #Calculate Sigma (surface density) Sig_DM_allz = phys.Sig(gp.z_all_pts, tmp_rho_DM_allz) tmp_profs.Sig_DM_C = Sig_DM_allz[0] tmp_profs.set_prof('Sig_DM_vec', Sig_DM_allz[1:-1], 0, gp) tmp_profs.Sig_DM_LS = Sig_DM_allz[-1] #Calculate sigma (velocity dispersion) sigz_vecLS = phys.sigz(gp.z_all_pts, Sig_DM_allz, tmp_nu_allz, norm) tmp_profs.set_prof('sig_vec', sigz_vecLS[0:-1], 0, gp) tmp_profs.sig_LS = sigz_vecLS[-1] # determine log likelihood chi2 = calc_chi2(tmp_profs, gp) #HS currently rewriting calc_chi2 gh.LOG(1, ' log L = ', -chi2/2.) tmp_profs.chi2 = chi2 return tmp_profs # from likelihood L = exp(-\chi^2/2), want log of that