def com_shrinkcircle_v_2D(x, y, vlos, pm): #eps = 1e-6 com_x = 1. * np.sum(x * pm) / np.sum(pm) com_y = 1. * np.sum(y * pm) / np.sum(pm) com_vlos = 1. * np.sum(vlos * pm) / np.sum(pm) bucom_x = com_x bucom_y = com_y bucom_vlos = com_vlos x -= com_x y -= com_y vlos -= com_vlos dr = np.sqrt(com_x**2 + com_y**2) r0 = np.sqrt(x**2 + y**2) nit = 0 minlen = len(x) / 2. while nit < 200 and len(x) > minlen: nit += 1 print('it ',nit,' with ',len(x),\ ' part, COM=', \ gh.pretty(bucom_x), gh.pretty(bucom_y),\ ' offset ', gh.pretty(dr)) # shrink sphere: # 1) calc radius r0 = np.sqrt(x**2 + y**2) # 2) sort remaining particles order = np.argsort(r0) r0 = np.array(r0)[order] x = np.array(x)[order] y = np.array(y)[order] pm = np.array(pm)[order] vlos = np.array(vlos)[order] # 3) cut x,y,z,pm after 1-10% end = len(r0) * 0.95 r0 = r0[:end] x = x[:end] y = y[:end] vlos = vlos[:end] pm = pm[:end] # calculate new COM com_x = 1. * np.sum(x * pm) / np.sum(pm) com_y = 1. * np.sum(y * pm) / np.sum(pm) com_vlos = 1. * np.sum(vlos * pm) / np.sum(pm) dr = np.sqrt(com_x**2 + com_y**2) # add to bucom bucom_x += com_x bucom_y += com_y bucom_vlos += com_vlos # recenter particles x -= com_x y -= com_y vlos -= com_vlos return bucom_x, bucom_y, bucom_vlos
def com_shrinkcircle(x, y, z, pm): eps = 1e-6 com_x = 1. * np.sum(x * pm) / np.sum(pm) com_y = 1. * np.sum(y * pm) / np.sum(pm) com_z = 1. * np.sum(z * pm) / np.sum(pm) bucom_x = 0. + com_x bucom_y = 0. + com_y bucom_z = 0. + com_z x -= com_x y -= com_y z -= com_z dr = np.sqrt(com_x**2 + com_y**2 + com_z**2) r0 = np.sqrt(x**2 + y**2 + z**2) nit = 0 minlen = len(x) * 0.666666666 while nit < 200 and len(x) > minlen: nit += 1 print('it ',nit,' with ',len(x), ' part',\ ' COM= ', gh.pretty(bucom_x), gh.pretty(bucom_y), gh.pretty(bucom_z),\ ' offset ', gh.pretty(dr)) # shrink sphere: # 1) calc radius r0 = np.sqrt(x**2 + y**2 + z**2) # 2) sort remaining particles order = np.argsort(r0) r0 = np.array(r0)[order] x = np.array(x)[order] y = np.array(y)[order] z = np.array(z)[order] pm = np.array(pm)[order] # 3) cut x,y,z,pm after 1-10% end = len(r0) * 0.95 r0 = r0[:end] x = x[:end] y = y[:end] z = z[:end] pm = pm[:end] # calculate new COM com_x = 1. * np.sum(x * pm) / np.sum(pm) com_y = 1. * np.sum(y * pm) / np.sum(pm) com_z = 1. * np.sum(z * pm) / np.sum(pm) dr = np.sqrt(com_x**2 + com_y**2 + com_z**2) # add to bucom bucom_x += com_x bucom_y += com_y bucom_z += com_z # recenter particles x -= com_x y -= com_y z -= com_z return bucom_x, bucom_y, bucom_z
def com_shrinkcircle_v(x, y, z, vz, pm): eps = 1e-6 com_x = 1.*np.sum(x*pm)/np.sum(pm) com_y = 1.*np.sum(y*pm)/np.sum(pm) com_z = 1.*np.sum(z*pm)/np.sum(pm) com_vz = 1.*np.sum(vz*pm)/np.sum(pm) bucom_x = 0.+com_x; bucom_y = 0.+com_y; bucom_z = 0.+com_z; bucom_vz = 0.+com_vz x -= com_x; y -= com_y; z -= com_z; vz -= com_vz dr = np.sqrt(com_x**2+com_y**2+com_z**2) r0 = np.sqrt(x**2+y**2+z**2) nit = 0; minlen = len(x)*0.666666666 while nit < 200 and len(x) > minlen: nit += 1 print('it ',nit,' with ',len(x), ' part',\ ' COM= ', \ gh.pretty(bucom_x), gh.pretty(bucom_y), gh.pretty(bucom_z),\ ' vel=', gh.pretty(bucom_vz),\ ' offset ', gh.pretty(dr)) # shrink sphere: # 1) calc radius r0 = np.sqrt(x**2+y**2+z**2) # 2) sort remaining particles order = np.argsort(r0) r0 = np.array(r0)[order] x = np.array(x)[order] y = np.array(y)[order] z = np.array(z)[order] vz = np.array(vz)[order] pm = np.array(pm)[order] # 3) cut x,y,z,pm after 1-10% end = len(r0)*0.95 r0 = r0[:end]; x = x[:end]; y = y[:end]; z = z[:end]; vz = vz[:end]; pm = pm[:end] # calculate new COM pmsum = np.sum(pm) com_x = 1.*np.sum(x*pm)/pmsum com_y = 1.*np.sum(y*pm)/pmsum com_z = 1.*np.sum(z*pm)/pmsum com_vz = 1.*np.sum(vz*pm)/pmsum dr = np.sqrt(com_x**2+com_y**2+com_z**2) # add to bucom bucom_x += com_x; bucom_y += com_y; bucom_z += com_z; bucom_vz += com_vz # recenter particles x -= com_x; y -= com_y; z -= com_z; vz -= com_vz return bucom_x, bucom_y, bucom_z, com_vz
def com_shrinkcircle_2D(x, y): com_x = np.mean(x) com_y = np.mean(y) bucom_x = 0. + com_x bucom_y = 0. + com_y x -= com_x y -= com_y dr = np.sqrt(com_x**2 + com_y**2) R0 = np.sqrt(x**2 + y**2) nit = 0 minlen = len(x) * 0.666666666 while nit < 200 and len(x) > minlen: nit += 1 print('it ',nit,' with ',len(x), ' part',\ ' COM= ', gh.pretty(bucom_x), gh.pretty(bucom_y),\ ' offset ', gh.pretty(dr)) # shrink sphere: # 1) calc radius R0 = np.sqrt(x**2 + y**2) # 2) sort remaining particles order = np.argsort(R0) R0 = np.array(R0)[order] x = np.array(x)[order] y = np.array(y)[order] # 3) cut x,y,z,pm after 1-10% end = len(R0) * 0.95 R0 = R0[:end] x = x[:end] y = y[:end] # calculate new COM com_x = np.mean(x) com_y = np.mean(y) dr = np.sqrt(com_x**2 + com_y**2) # add to bucom bucom_x += com_x bucom_y += com_y # recenter particles x -= com_x y -= com_y return bucom_x, bucom_y
def com_shrinkcircle_2D(x, y): com_x = np.mean(x) com_y = np.mean(y) bucom_x = 0.+com_x bucom_y = 0.+com_y x -= com_x y -= com_y dr = np.sqrt(com_x**2+com_y**2) R0 = np.sqrt(x**2+y**2) nit = 0; minlen = len(x)*0.666666666 while nit < 200 and len(x) > minlen: nit += 1 print('it ',nit,' with ',len(x), ' part',\ ' COM= ', gh.pretty(bucom_x), gh.pretty(bucom_y),\ ' offset ', gh.pretty(dr)) # shrink sphere: # 1) calc radius R0 = np.sqrt(x**2+y**2) # 2) sort remaining particles order = np.argsort(R0) R0 = np.array(R0)[order] x = np.array(x)[order] y = np.array(y)[order] # 3) cut x,y,z,pm after 1-10% end = len(R0)*0.95 R0 = R0[:end] x = x[:end] y = y[:end] # calculate new COM com_x = np.mean(x) com_y = np.mean(y) dr = np.sqrt(com_x**2+com_y**2) # add to bucom bucom_x += com_x bucom_y += com_y # recenter particles x -= com_x y -= com_y return bucom_x, bucom_y
def com_shrinkcircle_v_2D(x, y, vlos, pm): #eps = 1e-6 com_x = 1.*np.sum(x*pm)/np.sum(pm); com_y = 1.*np.sum(y*pm)/np.sum(pm); com_vlos = 1.*np.sum(vlos*pm)/np.sum(pm) bucom_x = com_x; bucom_y = com_y; bucom_vlos = com_vlos x -= com_x; y -= com_y; vlos -= com_vlos dr = np.sqrt(com_x**2+com_y**2) r0 = np.sqrt(x**2+y**2) nit = 0; minlen = len(x)/2. while nit < 200 and len(x) > minlen: nit += 1 print('it ',nit,' with ',len(x),\ ' part, COM=', \ gh.pretty(bucom_x), gh.pretty(bucom_y),\ ' offset ', gh.pretty(dr)) # shrink sphere: # 1) calc radius r0 = np.sqrt(x**2+y**2) # 2) sort remaining particles order = np.argsort(r0) r0 = np.array(r0)[order]; x = np.array(x)[order]; y = np.array(y)[order]; pm = np.array(pm)[order] vlos = np.array(vlos)[order] # 3) cut x,y,z,pm after 1-10% end = len(r0)*0.95 r0 = r0[:end]; x = x[:end]; y = y[:end]; vlos = vlos[:end]; pm = pm[:end] # calculate new COM com_x = 1.*np.sum(x*pm)/np.sum(pm); com_y = 1.*np.sum(y*pm)/np.sum(pm) com_vlos = 1.*np.sum(vlos*pm)/np.sum(pm) dr = np.sqrt(com_x**2+com_y**2) # add to bucom bucom_x += com_x; bucom_y += com_y; bucom_vlos += com_vlos # recenter particles x -= com_x; y -= com_y; vlos -= com_vlos return bucom_x, bucom_y, bucom_vlos
def geom_loglike(cube, ndim, nparams, gp): tmp_profs = Profiles(gp.pops, gp.nepol) off = 0 offstep = gp.nrho if gp.chi2_Sig_converged <= 0: rhodmpar = np.array(cube[off : off + offstep]) tmp_rho0 = phys.rho(gp.xepol, rhodmpar, 0, gp) # for J factor calculation (has been deferred to output routine) # tmp_rhofine = phys.rho(gp.xfine, rhodmpar, 0, gp) # tmp_Jfine = gip.Jpar(gp.xfine, tmp_rhofine, gp) #tmp_rhofine # tck = splrep(gp.xfine[:-3], tmp_Jfine) # tmp_J = splev(gp.xepol, tck) # rhodmpar hold [rho(rhalf), nr to be used for integration # from halflight radius, defined on gp.xepol] # (only calculate) M, check tmp_M0 = gip.rho_SUM_Mr(gp.xepol, tmp_rho0) # store profiles tmp_profs.set_prof("nr", 1.0 * rhodmpar[1 + 1 : -1], 0, gp) tmp_profs.set_prof("rho", tmp_rho0, 0, gp) # tmp_profs.set_prof('J', tmp_J, 0, gp) tmp_profs.set_prof("M", tmp_M0, 0, gp) off += offstep # anyhow, even if Sig not yet converged # get profile for rho* if gp.investigate == "obs": offstep = gp.nrho lbaryonpar = np.array(cube[off : off + offstep]) rhostar = phys.rho(gp.xepol, lbaryonpar, 0, gp) off += offstep Signu = gip.rho_param_INT_Sig(gp.xepol, lbaryonpar, 0, gp) # [Munit/pc^2] MtoL = cube[off] off += 1 # store these profiles every time tmp_profs.set_prof("nu", rhostar, 0, gp) tmp_profs.set_prof("Sig", Signu, 0, gp) tmp_profs.set_MtoL(MtoL) else: lbaryonpar = np.zeros(gp.nrho) MtoL = 0.0 for pop in np.arange(1, gp.pops + 1): # [1, 2, ..., gp.pops] offstep = gp.nrho nupar = np.array(cube[off : off + offstep]) tmp_nrnu = 1.0 * nupar[1 + 1 : -1] tmp_nu = phys.rho(gp.xepol, nupar, pop, gp) tmp_Signu = gip.rho_param_INT_Sig(gp.xepol, nupar, pop, gp) # tmp_nu = pool.apply_async(phys.rho, [gp.xepol, nupar, pop, gp]) # tmp_Signu = pool.apply_async(gip.rho_param_INT_Sig, [gp.xepol, nupar, pop, gp]) off += offstep offstep = 1 tmp_hyperSig = cube[off : off + offstep] off += offstep offstep = 1 tmp_hypersig = cube[off : off + offstep] off += offstep offstep = gp.nbeta if gp.chi2_Sig_converged <= 0: betapar = np.array(cube[off : off + offstep]) tmp_beta, tmp_betastar = phys.beta(gp.xepol, betapar, gp) if check_beta(tmp_beta, gp): gh.LOG(2, "beta error") tmp_profs.chi2 = gh.err(1.0, gp) return tmp_profs try: # if True: if gp.checksig and gp.investigate == "hern": import gi_analytic as ga anrho = ga.rho(gp.xepol, gp)[0] rhodmpar_half = np.exp(splev(gp.dat.rhalf[0], splrep(gp.xepol, np.log(anrho)))) nr = -gh.derivipol(np.log(anrho), np.log(gp.xepol)) dlr = np.hstack([nr[0], nr, nr[-1]]) if gp.investigate == "gaia": dlr[-1] = 4 rhodmpar = np.hstack([rhodmpar_half, dlr]) lbaryonpar = 0.0 * rhodmpar MtoL = 0.0 betapar = np.array([0, 0, 2, max(gp.xipol) / 2]) # for hern annu = ga.rho(gp.xepol, gp)[1] nupar_half = np.exp(splev(gp.dat.rhalf[1], splrep(gp.xepol, np.log(annu)))) nrnu = -gh.derivipol(np.log(annu), np.log(gp.xepol)) dlrnu = np.hstack([nrnu[0], nrnu, nrnu[-1]]) if gp.investigate == "gaia": dlrnu[-1] = 6 nupar = np.hstack([nupar_half, dlrnu]) elif gp.checkbeta and gp.investigate == "gaia": # rhodmpar = np.array([ 0.41586608, 0.38655515, 0.60898657, 0.50936769, 0.52601378, 0.54526758, 0.5755599, 0.57900806, 0.60252357, 0.60668445, 0.62252721, 0.63173754, 0.64555439, 0.65777175, 0.67083556, 0.68506606, 0.69139872, 0.66304763, 0.61462276, 0.70916575, 0.53287872]) rhodmpar = np.array( [ 0.18235821, 0.4719348, 0.0, 0.0, 0.10029569, 0.11309553, 0.25637863, 0.31815175, 0.40621336, 0.46247927, 0.53545415, 0.60874961, 0.68978141, 0.79781574, 0.91218048, 1.08482356, 1.36074895, 1.88041885, 2.31792908, 2.62089078, 3.001, ] ) betapar = np.array([1.23555034e-03, 9.89999994e-01, 2.03722518e00, 5.85640906e00]) nupar = np.array( [ 0.15649498, 6.65618254, 0.10293663, 0.1087109, 0.13849277, 0.24371261, 0.62633345, 1.05913181, 1.43774113, 1.82346043, 2.20091446, 2.60007997, 2.98745825, 3.423104, 3.80766658, 4.2089698, 4.62950843, 4.91166037, 4.97380638, 4.99718073, 5.2277589, ] ) gp.dat.nrnu = [ np.array( [ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769, ] ), np.array( [ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769, ] ), np.array( [ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769, ] ), np.array( [ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769, ] ), ] gp.dat.nrnuerr = [ np.array( [ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884, ] ), np.array( [ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884, ] ), np.array( [ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884, ] ), np.array( [ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884, ] ), ] lbaryonpar = 0.0 * rhodmpar MtoL = 0.0 sig, kap, zetaa, zetab = phys.sig_kap_zet(gp.xepol, rhodmpar, lbaryonpar, MtoL, nupar, betapar, pop, gp) # fill_between(gp.xipol, gp.dat.sig[1]-gp.dat.sigerr[1], gp.dat.sig[1]+gp.dat.sigerr[1]) # plot(gp.xepol, sig, 'r') # xscale('log') # ylim([0, 30]) # xlabel('$r$ [pc]') # ylabel('$\sigma_{LOS}$ [km/s]') # savefig('siglos_gaia_2.pdf') # pdb.set_trace() except Exception: gh.LOG(1, "sigma error") tmp_profs.chi2 = gh.err(2.0, gp) return tmp_profs # now store the profiles gh.sanitize_vector(tmp_beta, len(tmp_profs.x0), -200, 1, gp.debug) tmp_profs.set_prof("beta", tmp_beta, pop, gp) gh.sanitize_vector(tmp_betastar, len(tmp_profs.x0), -1, 1, gp.debug) tmp_profs.set_prof("betastar", tmp_betastar, pop, gp) tmp_profs.set_prof("sig", sig, pop, gp) tmp_profs.hypersig = tmp_hypersig tmp_profs.set_prof("kap", kap, pop, gp) tmp_profs.set_zeta(zetaa, zetab, pop) tmp_profs.set_prof("nrnu", tmp_nrnu, pop, gp) tmp_profs.set_prof("nu", tmp_nu, pop, gp) # pool: tmp_nu.get() # following profile needs to be stored at all times, to calculate chi tmp_profs.set_prof("Sig", tmp_Signu, pop, gp) tmp_profs.hyperSig = tmp_hyperSig off += offstep # still do this even if gp.chi2_Sig_converged is False if off != gp.ndim: gh.LOG(1, "wrong subscripts in gi_loglike") pdb.set_trace() # determine log likelihood chi2 = calc_chi2(tmp_profs, gp) gh.LOG(-1, gp.investigate + "/" + str(gp.case) + "/" + gp.files.timestamp + ": ln L = ", gh.pretty(-chi2 / 2.0)) # x=gp.dat.rbin # linedat,=ax.loglog(x, gp.dat.Sig[1], 'b') # line,=ax.loglog(x, tmp_profs.get_prof("Sig", 1), 'r', alpha=0.1) # plt.draw() # plt.show() tmp_profs.chi2 = chi2 # after some predefined wallclock time and Sig convergence, plot all profiles # if time.time() - gp.last_plot >= gp.plot_after and gp.chi2_Sig_converged <= 0: # gp.last_plot = time.time() # try: # import plotting.plot_profiles # plotting.plot_profiles.run(gp.files.timestamp, gp.files.outdir, gp) # except: # print('plotting error in gi_loglike!') # close pool automatically after with clause return tmp_profs
def run(gp): import gr_params gpr = gr_params.grParams(gp) gpr.fil = gpr.dir+"/data/tracers.dat" A = np.loadtxt(gpr.fil, skiprows=25) RAh,RAm,RAs,DEd,DEm,DEs,Vlos,e_Vlos,Teff,e_Teff,logg,e_logg,Fe,e_Fe,Nobs = A.T # only use stars which have Mg measurements pm = (Nobs>0) # (PM>=0.95)* print("f_members = ", gh.pretty(1.*sum(pm)/len(pm))) RAh=RAh[pm] RAm=RAm[pm] RAs=RAs[pm] DEd=DEd[pm] DEm=DEm[pm] DEs=DEs[pm] Vlos=Vlos[pm] e_Vlos=e_Vlos[pm] Teff=Teff[pm] e_Teff=e_Teff[pm] logg=logg[pm] e_logg=e_logg[pm] Fe=Fe[pm] e_Fe=e_Fe[pm] Nobs = Nobs[pm] sig = abs(RAh[0])/RAh[0] #print('RAh: signum = ',gh.pretty(sig)) RAh = RAh/sig xs = 15*(RAh*3600+RAm*60+RAs)*sig # [arcsec/15] sig = abs(DEd[0])/DEd[0] #print('DEd: signum = ', gh.pretty(sig)) DEd = DEd/sig ys = (DEd*3600+DEm*60+DEs)*sig # [arcsec] arcsec = 2.*np.pi/(360.*60.*60) # [pc] kpc = 1000 # [pc] DL = {1: lambda x: x * (138),#+/- 8 for Fornax 2: lambda x: x * (101),#+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec*DL) # [pc] ys *= (arcsec*DL) # [pc] x0 = np.copy(xs) y0 = np.copy(ys) # [pc] vz0 = np.copy(Vlos) # [km/s] Fe0 = np.copy(Fe) # only use stars which are members of the dwarf: exclude pop3 by construction #pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, outliers #x0, y0, vz0, Mg0, PM0 = select_pm(x0, y0, vz0, Mg0, PM0, pm) # assign population if gp.pops == 2: # drawing of populations based on metallicity # get parameters from function in pymcmetal.py #[p, mu1, sig1, mu2, sig2] = np.loadtxt(gp.files.dir+'metalsplit.dat') #[pm1, pm2] = np.loadtxt(gp.files.dir+'metalsplit_assignment.dat') popass = np.loadtxt(gp.files.dir+'popass') pm1 = (popass==1) pm2 = (popass==2) elif gp.pops == 1: pm1 = (Teff >= 0) pm2 = (Teff < 0) # assign none, but of same length as xs x1, y1, vz1, Fe1, PM1 = select_pm(x0, y0, vz0, Fe, pm, pm1) x2, y2, vz2, Fe2, PM2 = select_pm(x0, y0, vz0, Fe, pm, pm2) # cutting pm_i to a maximum of ntracers_i particles each: ind1 = np.arange(len(x1)) np.random.shuffle(ind1) # random.shuffle already changes ind ind1 = ind1[:gp.ntracer[1-1]] ind2 = np.arange(len(x2)) np.random.shuffle(ind2) # random.shuffle already changes ind ind2 = ind2[:gp.ntracer[2-1]] x1, y1, vz1, Fe1, PMS1 = select_pm(x1, y1, vz1, Fe1, PM1, ind1) x2, y2, vz2, Fe2, PMS2 = select_pm(x2, y2, vz2, Fe2, PM2, ind2) x0, y0, vz0, pm1, pm2, pm = concat_pops(x1, x2, y1, y2, vz1, vz2, gp) # optimum: get 3D center of mass with means # com_x, com_y, com_z = com_mean(x0,y0,z0,PM0) # 3*[pc], z component included if available com_x, com_y, com_vz = com_shrinkcircle_v_2D(x0, y0, vz0, pm) # [pc], [km/s] # from now on, work with 2D data only; z0 was only used to get center in (x,y) better # x0 -= com_x; y0 -= com_y # [pc] # vz0 -= com_vz #[km/s] R0 = np.sqrt(x0**2+y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] overall pop = -1 for pmn in [pm, pm1, pm2]: pop = pop+1 pmr = (R0<(gp.maxR*Rscale)) # read max extension for data # (rprior*Rscale) from gi_params pmn = pmn*pmr # [1] print("fraction of members = ", 1.0*sum(pmn)/len(pmn)) x, y, vz, Fe, PMN = select_pm(x0, y0, vz0, Fe0, pm, pmn) R = np.sqrt(x*x+y*y) # [pc] Rscalei = np.median(R) # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) # [pc] gf.write_data_output(gp.files.get_com_file(pop), x/Rscalei, y/Rscalei, vz, Rscalei) # [pc] if gpr.showplots: gpr.show_part_pos(x, y, pmn, Rscale)
def run(gp): import gr_params gpr = gr_params.grParams(gp) gpr.fil = gpr.dir + "/data/tracers.dat" delim = [0, 22, 3, 3, 6, 4, 3, 5, 6, 6, 7, 5, 6, 5, 6, 5, 6] ID = np.genfromtxt(gpr.fil, skiprows=29, unpack=True, usecols=(0, 1), delimiter=delim) RAh, RAm, RAs, DEd, DEm, DEs, Vmag, VI, VHel, e_VHel, SigFe, e_SigFe, SigMg, e_SigMg, PM = np.genfromtxt( gpr.fil, skiprows=29, unpack=True, usecols=tuple(range(2, 17)), delimiter=delim, filling_values=-1) # only use stars which have Mg measurements pm = (SigMg > -1) * (PM >= 0.95) print("f_members = ", gh.pretty(1. * sum(pm) / len(pm))) ID = ID[1][pm] RAh = RAh[pm] RAm = RAm[pm] RAs = RAs[pm] DEd = DEd[pm] DEm = DEm[pm] DEs = DEs[pm] Vmag = Vmag[pm] VI = VI[pm] VHel = VHel[pm] e_VHel = e_VHel[pm] SigFe = SigFe[pm] e_SigFe = e_SigFe[pm] SigMg = SigMg[pm] e_SigMg = e_SigMg[pm] PM = PM[pm] Mg0 = SigMg sig = abs(RAh[0]) / RAh[0] RAh = RAh / sig xs = 15 * (RAh * 3600 + RAm * 60 + RAs) * sig # [arcsec/15] sig = abs(DEd[0]) / DEd[0] DEd = DEd / sig ys = (DEd * 3600 + DEm * 60 + DEs) * sig # [arcsec] arcsec = 2. * np.pi / (360. * 60. * 60) # [pc] kpc = 1000 # [pc] DL = { 1: lambda x: x * (138), #+/- 8 for Fornax 2: lambda x: x * (101), #+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec * DL) # [pc] ys *= (arcsec * DL) # [pc] PM0 = np.copy(PM) x0 = np.copy(xs) y0 = np.copy(ys) # [pc] vz0 = np.copy(VHel) # [km/s] # only use stars which are members of the dwarf: exclude pop3 by construction #pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, outliers #x0, y0, vz0, Mg0, PM0 = select_pm(x0, y0, vz0, Mg0, PM0, pm) # assign population if gp.pops == 2: # drawing of populations based on metallicity # get parameters from function in pymcmetal.py #[p, mu1, sig1, mu2, sig2] = np.loadtxt(gp.files.dir+'metalsplit.dat') #[pm1, pm2] = np.loadtxt(gp.files.dir+'metalsplit_assignment.dat') popass = np.loadtxt(gp.files.dir + 'popass') pm1 = (popass == 1) pm2 = (popass == 2) elif gp.pops == 1: pm1 = (PM >= 0) pm2 = (PM < 0) # assign none, but of same length as xs x1, y1, vz1, Mg1, PM1 = select_pm(x0, y0, vz0, Mg0, PM0, pm1) x2, y2, vz2, Mg2, PM2 = select_pm(x0, y0, vz0, Mg0, PM0, pm2) # cutting pm_i to a maximum of ntracers_i particles each: ind1 = np.arange(len(x1)) np.random.shuffle(ind1) # random.shuffle already changes ind ind1 = ind1[:gp.ntracer[1 - 1]] ind2 = np.arange(len(x2)) np.random.shuffle(ind2) # random.shuffle already changes ind ind2 = ind2[:gp.ntracer[2 - 1]] x1, y1, vz1, Mg1, PMS1 = select_pm(x1, y1, vz1, Mg1, PM1, ind1) x2, y2, vz2, Mg2, PMS2 = select_pm(x2, y2, vz2, Mg2, PM2, ind2) x0, y0, vz0, pm1, pm2, pm = concat_pops(x1, x2, y1, y2, vz1, vz2, gp) # optimum: get 3D center of mass with means # com_x, com_y, com_z = com_mean(x0,y0,z0,PM0) # 3*[pc], z component included if available com_x, com_y, com_vz = com_shrinkcircle_v_2D(x0, y0, vz0, pm) # [pc], [km/s] # from now on, work with 2D data only; z0 was only used to get center in (x,y) better # x0 -= com_x; y0 -= com_y # [pc] # vz0 -= com_vz #[km/s] R0 = np.sqrt(x0**2 + y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] overall pop = -1 for pmn in [pm, pm1, pm2]: pop = pop + 1 pmr = (R0 < (gp.maxR * Rscale)) # read max extension for data # (rprior*Rscale) from gi_params pmn = pmn * pmr # [1] print("fraction of members = ", 1.0 * sum(pmn) / len(pmn)) x, y, vz, Mg, PMN = select_pm(x0, y0, vz0, Mg0, PM0, pmn) R = np.sqrt(x * x + y * y) # [pc] Rscalei = np.median(R) # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) # [pc] gf.write_data_output(gp.files.get_com_file(pop), x / Rscalei, y / Rscalei, vz, Rscalei) # [pc] if gpr.showplots: gpr.show_part_pos(x, y, pmn, Rscale)
def geom_loglike(cube, ndim, nparams, gp): tmp_profs = Profiles(gp.pops, gp.nepol) off = 0 offstep = gp.nrho if gp.chi2_Sig_converged <= 0: rhodmpar = np.array(cube[off:off + offstep]) tmp_rho0 = phys.rho(gp.xepol, rhodmpar, 0, gp) # for J factor calculation (has been deferred to output routine) #tmp_rhofine = phys.rho(gp.xfine, rhodmpar, 0, gp) #tmp_Jfine = gip.Jpar(gp.xfine, tmp_rhofine, gp) #tmp_rhofine #tck = splrep(gp.xfine[:-3], tmp_Jfine) #tmp_J = splev(gp.xepol, tck) # rhodmpar hold [rho(rhalf), nr to be used for integration # from halflight radius, defined on gp.xepol] # (only calculate) M, check tmp_M0 = gip.rho_SUM_Mr(gp.xepol, tmp_rho0) # store profiles tmp_profs.set_prof('nr', 1. * rhodmpar[1 + 1:-1], 0, gp) tmp_profs.set_prof('rho', tmp_rho0, 0, gp) #tmp_profs.set_prof('J', tmp_J, 0, gp) tmp_profs.set_prof('M', tmp_M0, 0, gp) off += offstep # anyhow, even if Sig not yet converged # get profile for rho* if gp.investigate == 'obs': offstep = gp.nrho lbaryonpar = np.array(cube[off:off + offstep]) rhostar = phys.rho(gp.xepol, lbaryonpar, 0, gp) off += offstep Signu = gip.rho_param_INT_Sig(gp.xepol, lbaryonpar, 0, gp) # [Munit/pc^2] MtoL = cube[off] off += 1 # store these profiles every time tmp_profs.set_prof('nu', rhostar, 0, gp) tmp_profs.set_prof('Sig', Signu, 0, gp) tmp_profs.set_MtoL(MtoL) else: lbaryonpar = np.zeros(gp.nrho) MtoL = 0. for pop in np.arange(1, gp.pops + 1): # [1, 2, ..., gp.pops] offstep = gp.nrho nupar = np.array(cube[off:off + offstep]) tmp_nrnu = 1. * nupar[1 + 1:-1] tmp_nu = phys.rho(gp.xepol, nupar, pop, gp) tmp_Signu = gip.rho_param_INT_Sig(gp.xepol, nupar, pop, gp) #tmp_nu = pool.apply_async(phys.rho, [gp.xepol, nupar, pop, gp]) #tmp_Signu = pool.apply_async(gip.rho_param_INT_Sig, [gp.xepol, nupar, pop, gp]) off += offstep offstep = 1 tmp_hyperSig = cube[off:off + offstep] off += offstep offstep = 1 tmp_hypersig = cube[off:off + offstep] off += offstep offstep = gp.nbeta if gp.chi2_Sig_converged <= 0: betapar = np.array(cube[off:off + offstep]) tmp_beta, tmp_betastar = phys.beta(gp.xepol, betapar, gp) if check_beta(tmp_beta, gp): gh.LOG(2, 'beta error') tmp_profs.chi2 = gh.err(1., gp) return tmp_profs try: #if True: if gp.checksig and gp.investigate == 'hern': import gi_analytic as ga anrho = ga.rho(gp.xepol, gp)[0] rhodmpar_half = np.exp( splev(gp.dat.rhalf[0], splrep(gp.xepol, np.log(anrho)))) nr = -gh.derivipol(np.log(anrho), np.log(gp.xepol)) dlr = np.hstack([nr[0], nr, nr[-1]]) if gp.investigate == 'gaia': dlr[-1] = 4 rhodmpar = np.hstack([rhodmpar_half, dlr]) lbaryonpar = 0.0 * rhodmpar MtoL = 0.0 betapar = np.array([0, 0, 2, max(gp.xipol) / 2]) # for hern annu = ga.rho(gp.xepol, gp)[1] nupar_half = np.exp( splev(gp.dat.rhalf[1], splrep(gp.xepol, np.log(annu)))) nrnu = -gh.derivipol(np.log(annu), np.log(gp.xepol)) dlrnu = np.hstack([nrnu[0], nrnu, nrnu[-1]]) if gp.investigate == 'gaia': dlrnu[-1] = 6 nupar = np.hstack([nupar_half, dlrnu]) elif gp.checkbeta and gp.investigate == 'gaia': # rhodmpar = np.array([ 0.41586608, 0.38655515, 0.60898657, 0.50936769, 0.52601378, 0.54526758, 0.5755599, 0.57900806, 0.60252357, 0.60668445, 0.62252721, 0.63173754, 0.64555439, 0.65777175, 0.67083556, 0.68506606, 0.69139872, 0.66304763, 0.61462276, 0.70916575, 0.53287872]) rhodmpar = np.array([ 0.18235821, 0.4719348, 0., 0., 0.10029569, 0.11309553, 0.25637863, 0.31815175, 0.40621336, 0.46247927, 0.53545415, 0.60874961, 0.68978141, 0.79781574, 0.91218048, 1.08482356, 1.36074895, 1.88041885, 2.31792908, 2.62089078, 3.001 ]) betapar = np.array([ 1.23555034e-03, 9.89999994e-01, 2.03722518e+00, 5.85640906e+00 ]) nupar = np.array([ 0.15649498, 6.65618254, 0.10293663, 0.1087109, 0.13849277, 0.24371261, 0.62633345, 1.05913181, 1.43774113, 1.82346043, 2.20091446, 2.60007997, 2.98745825, 3.423104, 3.80766658, 4.2089698, 4.62950843, 4.91166037, 4.97380638, 4.99718073, 5.2277589 ]) gp.dat.nrnu = [ np.array([ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769 ]), np.array([ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769 ]), np.array([ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769 ]), np.array([ 0.15476906, 0.85086798, 0.9342867, 0.88161169, 0.83254241, 0.85086798, 0.99930431, 1.22211638, 1.47184763, 1.78910057, 2.1987677, 2.51961046, 2.80345393, 3.10336133, 3.88504346, 4.52442727, 4.88817769, 5.07880404, 4.83455511, 6.32165657, 4.88817769 ]) ] gp.dat.nrnuerr = [ np.array([ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884 ]), np.array([ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884 ]), np.array([ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884 ]), np.array([ 0.05158969, 12.22044422, 2.44408884, 2.44408884, 2.44408884, 2.44408884, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 0.48881777, 2.44408884, 2.44408884, 2.44408884, 2.44408884 ]) ] lbaryonpar = 0.0 * rhodmpar MtoL = 0.0 sig, kap, zetaa, zetab = phys.sig_kap_zet( gp.xepol, rhodmpar, lbaryonpar, MtoL, nupar, betapar, pop, gp) #fill_between(gp.xipol, gp.dat.sig[1]-gp.dat.sigerr[1], gp.dat.sig[1]+gp.dat.sigerr[1]) #plot(gp.xepol, sig, 'r') #xscale('log') #ylim([0, 30]) #xlabel('$r$ [pc]') #ylabel('$\sigma_{LOS}$ [km/s]') #savefig('siglos_gaia_2.pdf') #pdb.set_trace() except Exception: gh.LOG(1, 'sigma error') tmp_profs.chi2 = gh.err(2., gp) return tmp_profs # now store the profiles gh.sanitize_vector(tmp_beta, len(tmp_profs.x0), -200, 1, gp.debug) tmp_profs.set_prof('beta', tmp_beta, pop, gp) gh.sanitize_vector(tmp_betastar, len(tmp_profs.x0), -1, 1, gp.debug) tmp_profs.set_prof('betastar', tmp_betastar, pop, gp) tmp_profs.set_prof('sig', sig, pop, gp) tmp_profs.hypersig = tmp_hypersig tmp_profs.set_prof('kap', kap, pop, gp) tmp_profs.set_zeta(zetaa, zetab, pop) tmp_profs.set_prof('nrnu', tmp_nrnu, pop, gp) tmp_profs.set_prof('nu', tmp_nu, pop, gp) # pool: tmp_nu.get() # following profile needs to be stored at all times, to calculate chi tmp_profs.set_prof('Sig', tmp_Signu, pop, gp) tmp_profs.hyperSig = tmp_hyperSig off += offstep # still do this even if gp.chi2_Sig_converged is False if off != gp.ndim: gh.LOG(1, 'wrong subscripts in gi_loglike') pdb.set_trace() # determine log likelihood chi2 = calc_chi2(tmp_profs, gp) gh.LOG( -1, gp.investigate + '/' + str(gp.case) + '/' + gp.files.timestamp + ': ln L = ', gh.pretty(-chi2 / 2.)) # x=gp.dat.rbin # linedat,=ax.loglog(x, gp.dat.Sig[1], 'b') # line,=ax.loglog(x, tmp_profs.get_prof("Sig", 1), 'r', alpha=0.1) # plt.draw() # plt.show() tmp_profs.chi2 = chi2 # after some predefined wallclock time and Sig convergence, plot all profiles #if time.time() - gp.last_plot >= gp.plot_after and gp.chi2_Sig_converged <= 0: # gp.last_plot = time.time() # try: # import plotting.plot_profiles # plotting.plot_profiles.run(gp.files.timestamp, gp.files.outdir, gp) # except: # print('plotting error in gi_loglike!') # close pool automatically after with clause return tmp_profs