def calc_M_nu_sig_disc(): import physics_disc as phys gp.nu1_x = phys.nu(gp.parst.nu1) # gp.nu1_x = phys.nu_decrease(gp.xipol,gp.xipol,gp.parst.nu1) gp.d1_x = phys.delta(gp.parst.delta1) gp.dens_x = phys.dens(gp.xipol, gp.parst.dens) # means Kz, *not* surface density # gp.dens_x = gp.parst.dens+gp.blow # Kzpars with baryonic lower limit gp.M_x = phys.Sigmaz(gp.dens_x) # gp.M_x = phys.kz(gp.xipol, gp.xipol, gp.dens_x, gp.blow) # from kz, added baryons min # TODO: marginalise over lower bound, see gl_priors, kappa_DM < 0 # attention: gives sometimes unphysical decreasing surface density gp.blow = gp.dat.Mdat # npr.normal(gp.dat.Mdat,gp.dat.Merr/4.,gp.nipol) # TODO: naming Rsun = 8000.; hr = 3.0; hsig = 3.0 gp.sig1_x = phys.sigmaz(gp.xipol, gp.dens_x, gp.nu1_x, gp.parst.norm1,\ gp.parst.delta1, [Rsun,hr,hsig]) gp.kap1_x = gp.xipol*0. # set to 0 consistently if gp.pops == 2: gp.nu2_x = phys.nu(gp.parst.nu2) # gp.nu2_x = phys.nu_decrease(gp.xipol,gp.xipol,gp.parst.nu2) gp.d2_x = phys.delta(gp.parst.delta2) gp.sig2_x = phys.sigmaz(gp.xipol, gp.dens_x, gp.nu2_x, gp.parst.norm2,\ gp.parst.delta2, [Rsun,hr,hsig]) gp.kap2_x = gp.xipol*0. if gp.checkint: gp.nu1_x = gp.ipol.nudat1 Rsun = 8.; hr = 3.0; hsig = 3.0 # ^--- irrelevant, as long as gp.deltaprior = True gp.parst.norm1 = 17.**2 gp.sig1_x = phys.sigmaz(gp.xipol, gp.ipol.densdat, gp.blow,\ gp.ipol.nudat1,gp.parst.norm1,gp.parst.delta1,\ [Rsun,hr,hsig]) gp.kap1_x = gp.xipol*0. return
def calc_likelihood(): if gp.uselike: # Errors: if gp.adderrors: # Solve error convolution: # can be jumped prob_z = np.zeros(len(z_dat)) for jj in range(len(z_dat)): zintmin = -abs(z_dat_err[jj])*3.0 zintmax = abs(z_dat_err[jj])*3.0 zintpnts = 25 zint = np.arange(zintpnts) * (zintmax-zintmin)/(zintpnts-1.)\ + zintmin + z_dat[jj] perr_z = errorz(z_dat[jj] - zint, z_dat_err[jj]) nu_int = nu(abs(zint),zp_kz,nupars) p_z = nu_int prob_z[jj] = simps(perr_z * p_z,zint) if prob_z[jj] < 0: print('Negative probability found!') pdb.set_trace() nu_pz = phys.nu(abs(z_dat),zp_kz,nupars) prob_z = nu_pz # FOR TESTING! # test = gp.nu1_x # gpl.plot(z_dat,test,psym=3) # gpl.plot(z_dat,prob_z,color=2,psym=3) # gpl.show(); exit(1) sig_sum = sqrt(sig_z**2. + vz_dat_err**2.) aprob_sigz = alog(1.0/(sqrt(2.0*np.pi) * sig_sum)) -\ (vz_dat-vz_mean)**2./2./sig_sum**2. # *** WARNING *** TI< with errors not yet supported ... ! prob_tilt = 1.0 else: # Calcualte likelihood [N.B. Log[Li] can be +ve!]: prob_z = nu_z aprob_sigz = np.log(1.0/(sqrt(2.0*np.pi) * sig_z))\ - (vz_dat-vz_mean)**2./2./sig_z**2. if not gp.deltaprior: wid_Rz = sigma_rz(abs(z_dat),zp_kz,tparswt) prob_tilt = 1.0/np.pi/wid_Rz *\ BESELK(abs(vz_dat*vR_dat-sig_Rz)/wid_Rz,0) else: prob_tilt = 1.0 prob_t = np.sum(np.log(prob_z) + aprob_sigz + np.log(prob_tilt)) if math.isnan(prob_t): print('Ooops prob_t is NaN ... ') pdb.set_trace() # Calculate the likelihood ratio: fnewoverf = np.exp(prob_t - prob) return gp.fnewoverf
def calc_M_nu_sig_kap_sphere(): import physics_sphere as phys if not gp.checkint: '''normal case''' gp.nu1_x = phys.nu(gp.parst.nu1) # [munit/pc^3] gp.dens_x = phys.dens(gp.xipol, gp.parst.dens) # [munit/pc^3] gp.M_x = rho_SUM_Mr(gp.xipol, gp.dens_x) # [munit,3D] gp.d1_x = phys.delta(gp.parst.delta1) # pdb.set_trace() # TODO: introduce error in dens, nu, d1 here, for weights in splrep gp.sig1_x, gp.kap1_x = phys.sig_kap_los(gp.dens_x, gp.nu1_x, gp.d1_x) if gp.pops == 2: gp.nu2_x = phys.nu(gp.parst.nu2) gp.d2_x = phys.delta(gp.parst.delta2) gp.sig2_x, gp.kap2_x = phys.sig_kap_los(gp.dens_x, gp.nu2_x, gp.d2_x) # to debug almost fitting curves: # if gp.chi2 < 60: # pdb.set_trace() else: 'checkint: check integration routines for sigma, kappa' if gp.investigate == 'hernquist': ### set nu to data, or analytic value gp.nu1_x = gp.ipol.nudat1 # [Msun/pc^3] if gp.analytic: gp.nu1_x = rho_anf(gp.xipol) # [Msun/pc^3] # set dens gp.dens_x = rho_anf(gp.xipol) # [Msun/pc^3] # attention: same as gp.nu1_x in this case! # fine if potential comes from 'tracers' only gp.M_x = M_anf(gp.xipol) # [Msun] gp.d1_x = np.zeros(gp.nipol) # [1] print('set nu, dens, M, delta to analytic') gp.sig1_x, gp.kap1_x = phys.sig_kap_los(gp.dens_x, gp.nu1_x, gp.d1_x) # still checkint, but for walker now elif gp.investigate == 'walker': rhodm, rhostar1, rhostar2 = rhowalker_3D(gp.xipol) gp.nu1_x = rhostar1 if gp.pops == 2: gp.nu2_x = rhostar2 # gp.ipol.nudat2 # data gp.dens_x = rhowalkertot_3D(gp.xipol) # [msun/pc^3] gp.M_x = Mwalkertot(gp.xipol) # should really be this thing here! # gp.M_x = rho_SUM_Mr(gp.ipol.Mx, gp.dens_x) # [msun] # gp.M_x = rho_SUM_Mr(gp.ipol.binmax, gp.dens_x) # [msun] gp.d1_x = walker_delta(1) # [1], actual delta if gp.pops == 2: gp.d2_x = walker_delta(2) # [1] # set sigma_LOS, by calculating expected values # for both (spherical) cases simultaneously gp.sig1_x,gp.kap1_x = phys.sig_kap_los(gp.dens_x, gp.nu1_x, \ gp.d1_x) # [km/s], [1] # takes [munit, 3D], [munit/pc^3], [1] # normalization of nu does not matter, is divided out if gp.pops == 2: gp.sig2_x, gp.kap2_x = phys.sig_kap_los(gp.dens_x,gp.nu2_x,\ gp.d2_x) # [km/s], [1] return
def accept_reject(n): if npr.rand() < gp.fnewoverf: gp.accrate.update(True) gp.pars.assign(gp.parst) gp.chi2 = gp.chi2t gp.lasterr = 'None' gp.d1wild = False; gp.d2wild = False; gp.dens2wild = False gp.b2wild = False; gp.sig2wild = False; gp.nu2wild = 1000 gfile.store_working_pars(n, gp.pars, gp.chi2, gp.parstep) # fplot if npr.rand() < max(0.01, (1.*gp.chi2-gp.chi2tol)/gp.chi2tol/100.)\ or (gp.initphase and gp.adaptstepwait == 1): gpl.update_plot() np.set_printoptions(precision=3) if gp.pops == 1: gp.LOG.warning('n: %d, chi2:'+gh.pretty(gp.chi2,1)+\ ' rate:'+gh.pretty(100*gp.accrate.rate(),2)+\ ' nu1:'+gh.pretty(100*\ abs(np.median((phys.nu(gp.pars.nu1+gp.parstep.nu1)\ -phys.nu(gp.pars.nu1))/\ phys.nu(gp.pars.nu1))),3)+\ ' d1:'+gh.pretty(100*\ abs(np.median(gp.parstep.delta1/gp.pars.delta1)),3)+\ ' dens:'+gh.pretty(100*\ abs(np.median((phys.densdefault(gp.parstep.dens+\ gp.pars.dens)\ -phys.densdefault(gp.pars.dens))/\ phys.densdefault(gp.pars.dens))),3)+\ ' norm1:'+gh.pretty(100*\ abs(np.median(gp.parstep.norm1/gp.pars.norm1)),3), n) else: print('n:',n, ' chi2:',gh.pretty(gp.chi2,1),\ ' rate:',gh.pretty(100*gp.accrate.rate(),2),\ ' nu1:',gh.pretty(100*\ abs(np.median((phys.nu(gp.pars.nu1+gp.parstep.nu1)\ -phys.nu(gp.pars.nu1))/\ phys.nu(gp.pars.nu1))),3),\ ' nu2:',gh.pretty(100*\ abs(np.median((phys.nu(gp.pars.nu2+gp.parstep.nu2)\ -phys.nu(gp.pars.nu2))/\ phys.nu(gp.pars.nu2))),3),\ ' d1:',gh.pretty(100*\ abs(np.median(gp.parstep.delta1/gp.pars.delta1)),3),\ ' d2:',gh.pretty(100* abs(np.median(gp.parstep.delta2/gp.pars.delta2)),3),\ ' dens:',gh.pretty(100*\ abs(np.median((phys.densdefault(gp.parstep.dens+\ gp.pars.dens)-phys.densdefault(gp.pars.dens))/\ phys.densdefault(gp.pars.dens))),3)) adapt_stepsize() end_initphase() else: gp.accrate.update(False) # jump back to last known good point faraway = gp.farinit if gp.initphase else gp.farover if gp.chi2t > gp.chi2 * faraway: gp.LOG.warning(' too far off, setting back to last known good point') gfile.get_working_pars(scale=False) # TODO: check that scale=gp.iniphase is really not the right thing return