def run(gp): import gr_params gpr = gr_params.grParams(gp) print('input: ', gpr.fil) x0, y0, vlos = np.genfromtxt(gpr.fil, skiprows=0, unpack=True, usecols=(0, 1, 5)) # use only 3000 random particles: ind = np.arange(len(x0)) np.random.shuffle(ind) ind = ind[:3000] x0 = x0[ind] y0 = y0[ind] vlos = vlos[ind] x0 *= 1000. # [pc] y0 *= 1000. # [pc] # shrinking sphere method pm = np.ones(len(x0)) com_x, com_y, com_vz = gc.com_shrinkcircle_v_2D(x0, y0, vlos, pm) x0 -= com_x # [pc] y0 -= com_y # [pc] vlos -= com_vz #[km/s] import gi_file as gf for pop in range(2): Rc = np.sqrt(x0**2 + y0**2) # [pc] Rhalf = np.median(Rc) # [pc] Rscale = Rhalf # or gpr.r_DM # [pc] gp.Xscale.append(Rscale) # [pc] print('Rscale = ', Rscale, ' pc') print('max(R) = ', max(Rc), ' pc') print('total number of stars: ', len(Rc)) R0 = np.sqrt(x0**2 + y0**2) / Rscale sel = (R0 < gp.maxR) x = x0[sel] / Rscale y = y0[sel] / Rscale # [Rscale] vz = vlos[sel] # [km/s] m = np.ones(len(x)) R = np.sqrt(x * x + y * y) * Rscale # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), np.median(R)) c = open(gp.files.get_com_file(pop), 'w') print('# x [Xscale],','y [Xscale],','vLOS [km/s],','Xscale = ', \ Rscale, ' pc', file=c) for k in range(len(x)): print(x[k], y[k], vz[k], file=c) #[rscale], [rscale], [km/s] c.close() if gpr.showplots: gpr.show_part_pos(x, y, np.ones(len(x)), Rscale)
def run(gp): import gr_params gpr = gr_params.grParams(gp) gu.G1__pcMsun_1km2s_2 = 1. # as per definition gp.anM = 1. # gp.ana = 1. # print('grh_com: input: ', gpr.simpos) xall, yall, zall = np.loadtxt(gpr.simpos, skiprows=1, unpack=True) # 3*[gp.ana] vxall,vyall,vzall= np.loadtxt(gpr.simvel, skiprows=1, unpack=True) # 3*[gp.ana] nall = len(xall) # [1] # shuffle and restrict to ntracer random points ndm = int(min(gp.ntracer[0], nall-1)) trace = random.sample(range(nall), nall) if gp.pops > 1: gh.LOG(1, 'implement more than 2 pops for hern') pdb.set_trace() PM = [1. for i in trace] # [1]=const, no prob. of membership info in dataset x = [ xall[i] for i in trace ] # [gp.ana] y = [ yall[i] for i in trace ] # [gp.ana] z = [ zall[i] for i in trace ] # [gp.ana] vz = [ vzall[i] for i in trace ] # [km/s] PM = np.array(PM); x=np.array(x); y=np.array(y); z=np.array(z); vz=np.array(vz) com_x, com_y, com_z, com_vz = com_shrinkcircle_v(x,y,z,vz,PM) # 3*[gp.ana], [velocity] print('COM [gp.ana]: ', com_x, com_y, com_z, com_vz) xnew = (x-com_x) #*gp.ana # [pc] ynew = (y-com_y) #*gp.ana # [pc] #znew = (z-com_z) # *gp.ana # [pc] vznew = (vz-com_vz) #*1e3*np.sqrt(gu.G1__pcMsun_1km2s_2*gp.anM/gp.ana) # [km/s], from conversion from system with L=G=M=1 R0 = np.sqrt(xnew**2+ynew**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # or gpr.r_DM # [pc] print('Rscale/pc = ', Rscale) # only for 0 (all) and 1 (first and only population) for pop in range(gp.pops+1): crscale = open(gp.files.get_scale_file(pop),'w') print('# Rscale in [pc],',' surfdens_central (=dens0) in [Munit/rscale**2],',\ ' and totmass_tracers [Munit],',\ ' and max(sigma_LOS) in [km/s]', file=crscale) print(Rscale, file=crscale) crscale.close() gh.LOG(2, 'grh_com: output: ', gp.files.get_com_file(pop)) filepos = open(gp.files.get_com_file(pop), 'w') print('# x [Rscale]','y [Rscale]','vLOS [km/s]', file=filepos) for k in range(ndm): print(xnew[k]/Rscale, ynew[k]/Rscale, vznew[k], file=filepos) filepos.close() gh.LOG(2, '')
def run(gp): import gr_params gpr = gr_params.grParams(gp) print('input: ', gpr.fil) M0,x0,y0,z0,vx0, vy0, vz0, comp0 = read_data(gpr.fil) # [Msun], 3*[pc], 3*[km/s], [1] # assign population if gp.pops==2: pm1 = (comp0 == 1) # will be overwritten below if gp.metalpop pm2 = (comp0 == 2) # same same elif gp.pops==1: pm1 = (comp0 < 3) pm2 = (comp0 == -1) # assign none, but of same length as comp0 # cut to subsets ind1 = gh.draw_random_subset(x1, gp.ntracer[1-1]) M1, x1, y1, z1, vx1, vy1, vz1, comp1 = select_pm(M1, x1, y1, z1, vx1, vy1, vz1, comp1, ind1) ind2 = gh.draw_random_subset(x2, gp.ntracer[2-1]) M2, x2, y2, z2, vx2, vy2, vz2, comp2 = select_pm(M2, x2, y2, z2, vx2, vy2, vz2, comp2, ind2) # use vz for no contamination, or vb for with contamination M0, x0, y0, z0, vx0, vy0, vz0 = concat_pops(M1, M2, x1, x2, y1, y2, z1, z2, vx1, vx2, vy1, vy2, vz1, vz2, gp) com_x, com_y, com_z, com_vz = com_shrinkcircle_v(x0, y0, z0, vz0, pm0) # [pc] print('COM [pc]: ', com_x, com_y, com_z) # [pc] print('VOM [km/s]', com_vz) # [km/s] # from now on, work with 2D data only; z0 was only used to get # center in (x,y) better x0 -= com_x # [pc] y0 -= com_y # [pc] vz0 -= com_vz # [km/s] R0 = np.sqrt(x0**2+y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] from all tracer points pop = -1 for pmn in [pm, pm1, pm2]: pop = pop + 1 # population number pmr = ( R0 < (gp.maxR*Rscale) ) # read max extension for data #(rprior*Rscale) from #gi_params pmn = pmn*pmr # [1] print("fraction of members = ", 1.0*sum(pmn)/len(pmn)) x, y, z, comp, vz, vb, Mg, PMN = select_pm(x0, y0, z0, comp0, vz0, vb0, Mg0, PM0, pmn) R = np.sqrt(x*x+y*y) # [pc] Rscalei = np.median(R) gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) gf.write_data_output(gp.files.get_com_file(pop), x/Rscalei, y/Rscalei, vz, Rscalei) if gpr.showplots: gpr.show_part_pos(x, y, pmn, Rscale)
def run(gp): import gr_params gpr = gr_params.grParams(gp) print('input: ', gpr.fil) x0,y0,vlos = np.genfromtxt(gpr.fil, skiprows=0, unpack = True, usecols = (0,1,5)) # use only 3000 random particles: ind = np.arange(len(x0)) np.random.shuffle(ind) ind = ind[:3000] x0 = x0[ind]; y0 = y0[ind]; vlos = vlos[ind] x0 *= 1000. # [pc] y0 *= 1000. # [pc] # shrinking sphere method pm = np.ones(len(x0)) com_x, com_y, com_vz = gc.com_shrinkcircle_v_2D(x0, y0, vlos, pm) x0 -= com_x # [pc] y0 -= com_y # [pc] vlos -= com_vz #[km/s] import gi_file as gf for pop in range(2): Rc = np.sqrt(x0**2+y0**2) # [pc] Rhalf = np.median(Rc) # [pc] Rscale = Rhalf # or gpr.r_DM # [pc] gp.Xscale.append(Rscale) # [pc] print('Rscale = ', Rscale,' pc') print('max(R) = ', max(Rc),' pc') print('total number of stars: ', len(Rc)) R0 = np.sqrt(x0**2+y0**2)/Rscale sel = (R0 < gp.maxR) x = x0[sel]/Rscale; y = y0[sel]/Rscale # [Rscale] vz = vlos[sel] # [km/s] m = np.ones(len(x)) R = np.sqrt(x*x+y*y)*Rscale # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), np.median(R)) c = open(gp.files.get_com_file(pop), 'w') print('# x [Xscale],','y [Xscale],','vLOS [km/s],','Xscale = ', \ Rscale, ' pc', file=c) for k in range(len(x)): print(x[k], y[k], vz[k], file=c) #[rscale], [rscale], [km/s] c.close() if gpr.showplots: gpr.show_part_pos(x, y, np.ones(len(x)), Rscale)
def run(gp): import gr_params gpr = gr_params.grParams(gp) gpr.fil = gpr.dir + "/deBoer/table1.dat" ALL = np.loadtxt(gpr.fil) RAh = ALL[:, 0] RAm = ALL[:, 1] RAs = ALL[:, 2] DEd = ALL[:, 3] DEm = ALL[:, 4] DEs = ALL[:, 5] # that's all we read in for now. Crude assumptions: each star belongs to Fornax, and has mass 1Msun # only use stars which are members of the dwarf sig = abs(RAh[0]) / RAh[0] RAh = RAh / sig xs = 15 * (RAh * 3600 + RAm * 60 + RAs) * sig # [arcsec/15] sig = abs(DEd[0]) / DEd[0] DEd = DEd / sig ys = (DEd * 3600 + DEm * 60 + DEs) * sig # [arcsec] arcsec = 2. * np.pi / (360. * 60. * 60) # [pc] kpc = 1000 # [pc] DL = { 1: lambda x: x * (138), #+/- 8 for Fornax 2: lambda x: x * (101), #+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec * DL) # [pc] ys *= (arcsec * DL) # [pc] x0 = np.copy(xs) y0 = np.copy(ys) # [pc] com_x, com_y = com_shrinkcircle_2D(x0, y0) # [pc], [km/s] # from now on, work with 2D data only; z0 was only used to get center in (x,y) better # x0 -= com_x; y0 -= com_y # [pc] # vz0 -= com_vz #[km/s] R0 = np.sqrt(x0**2 + y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] overall pop = 0 pmr = (R0 < (gp.maxR * Rscale) ) # read max extension for data (rprior*Rscale) from gi_params x = 1. * x0[pmr] y = 1. * y0[pmr] R = np.sqrt(x * x + y * y) # [pc] Rscalei = np.median(R) # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) # [pc] gf.write_data_output(gp.files.get_com_file(pop), x / Rscalei, y / Rscalei, np.zeros(len(x)), Rscalei) # [pc]
def run(gp): import gr_params gpr = gr_params.grParams(gp) gpr.fil = gpr.dir+"/deBoer/table1.dat" ALL = np.loadtxt(gpr.fil) RAh = ALL[:,0] RAm = ALL[:,1] RAs = ALL[:,2] DEd = ALL[:,3] DEm = ALL[:,4] DEs = ALL[:,5] # that's all we read in for now. Crude assumptions: each star belongs to Fornax, and has mass 1Msun # only use stars which are members of the dwarf sig = abs(RAh[0])/RAh[0] RAh = RAh/sig xs = 15*(RAh*3600+RAm*60+RAs)*sig # [arcsec/15] sig = abs(DEd[0])/DEd[0] DEd = DEd/sig ys = (DEd*3600+DEm*60+DEs)*sig # [arcsec] arcsec = 2.*np.pi/(360.*60.*60) # [pc] kpc = 1000 # [pc] DL = {1: lambda x: x * (138),#+/- 8 for Fornax 2: lambda x: x * (101),#+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec*DL) # [pc] ys *= (arcsec*DL) # [pc] x0 = np.copy(xs) y0 = np.copy(ys) # [pc] com_x, com_y = com_shrinkcircle_2D(x0, y0) # [pc], [km/s] # from now on, work with 2D data only; z0 was only used to get center in (x,y) better # x0 -= com_x; y0 -= com_y # [pc] # vz0 -= com_vz #[km/s] R0 = np.sqrt(x0**2+y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] overall pop = 0 pmr = (R0<(gp.maxR*Rscale)) # read max extension for data (rprior*Rscale) from gi_params x=1.*x0[pmr] y=1.*y0[pmr] R = np.sqrt(x*x+y*y) # [pc] Rscalei = np.median(R) # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) # [pc] gf.write_data_output(gp.files.get_com_file(pop), x/Rscalei, y/Rscalei, np.zeros(len(x)), Rscalei) # [pc]
def run(gp): import gr_params gpr = gr_params.grParams(gp) print('input:', gpr.fil) x0, y0, z0, vx, vy, vz = np.transpose(np.loadtxt(gpr.fil)) # for purely tangential beta=-0.5 models, have units of kpc instead of pc if gp.case == 9 or gp.case == 10: x0 *= 1000. # [pc] y0 *= 1000. # [pc] z0 *= 1000. # [pc] # cutting pm_i to a maximum of ntracers particles: import gi_helper as gh ind1 = gh.draw_random_subset(x0, gp.ntracer[1 - 1]) x0, y0, z0, vz0 = select_pm(x0, y0, z0, vz, ind1) PM = np.ones( len(x0)) # assign all particles the full probability of membership import gi_centering as glc com_x, com_y, com_z, com_vz = glc.com_shrinkcircle_v(x0, y0, z0, vz, PM) # from now on, work with 2D data only; # z0 was only used to get center in (x,y) better x0 -= com_x # [pc] y0 -= com_y # [pc] vz -= com_vz # [km/s] R0 = np.sqrt(x0 * x0 + y0 * y0) # [pc] Rscale = np.median(R0) # [pc] import gi_file as gf for pop in range(gp.pops + 1): # gp.pops +1 for all components together pmr = (R0 < (gp.maxR * Rscale)) #m = np.ones(len(R0)) x = x0[pmr] # [pc] y = y0[pmr] # [pc] R = np.sqrt(x * x + y * y) # [pc] Rscalei = np.median(R) # print("x y z" on first line, to interprete data later on) gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) gf.write_data_output(gp.files.get_com_file(pop), x / Rscalei, y / Rscalei, vz, Rscalei)
def run(gp): import gr_params gpr = gr_params.grParams(gp) print('input:', gpr.fil) x0, y0, z0, vx, vy, vz = np.transpose(np.loadtxt(gpr.fil)) # for purely tangential beta=-0.5 models, have units of kpc instead of pc if gp.case == 9 or gp.case == 10: x0 *= 1000. # [pc] y0 *= 1000. # [pc] z0 *= 1000. # [pc] # cutting pm_i to a maximum of ntracers particles: import gi_helper as gh ind1 = gh.draw_random_subset(x0, gp.ntracer[1-1]) x0, y0, z0, vz0 = select_pm(x0, y0, z0, vz, ind1) PM = np.ones(len(x0)) # assign all particles the full probability of membership import gi_centering as glc com_x, com_y, com_z, com_vz = glc.com_shrinkcircle_v(x0, y0, z0, vz, PM) # from now on, work with 2D data only; # z0 was only used to get center in (x,y) better x0 -= com_x # [pc] y0 -= com_y # [pc] vz -= com_vz # [km/s] R0 = np.sqrt(x0*x0+y0*y0) # [pc] Rscale = np.median(R0) # [pc] import gi_file as gf for pop in range(gp.pops+1): # gp.pops +1 for all components together pmr = (R0<(gp.maxR*Rscale)) #m = np.ones(len(R0)) x = x0[pmr] # [pc] y = y0[pmr] # [pc] R = np.sqrt(x*x+y*y) # [pc] Rscalei = np.median(R) # print("x y z" on first line, to interprete data later on) gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) gf.write_data_output(gp.files.get_com_file(pop), x/Rscalei, y/Rscalei, vz, Rscalei)
def run(gp): import gr_params gpr = gr_params.grParams(gp) for pop in range(2): # get radius, used for all binning print('input: ', gp.files.get_com_file(pop)) if gf.bufcount(gp.files.get_com_file(pop))<2: return x,y,vlos = np.loadtxt(gp.files.get_com_file(pop), skiprows=1, unpack=True) #2*[rscale], [km/s] # totmass_tracers = 1.*len(x) # [Munit], [Munit], where each star is weighted with the same mass r = np.sqrt(x*x+y*y) # [rscale] #set binning #gp.nipol = (max - min)*N^(1/3)/(2*(Q3-Q1)) #(method of wand) rmin = 0. # [rscale] rmax = max(r) if gp.maxR < 0 else 1.0*gp.maxR # [rscale] binmin, binmax, rbin = gh.determine_radius(r, rmin, rmax, gp) # [rscale0] # offset from the start! rs = gpr.Rerr*np.random.randn(len(r))+r #[rscale] vlos = gpr.vrerr*np.random.randn(len(vlos))+vlos #[km/s] vfil = open(gp.files.sigfiles[pop], 'w') print('r', 'sigma_r(r)', 'error', file=vfil) # 30 iterations for drawing a given radius in bin dispvelocity = np.zeros((gp.nipol,gpr.n)) a = np.zeros((gp.nipol,gpr.n)) p_dvlos = np.zeros(gp.nipol) p_edvlos = np.zeros(gp.nipol) for k in range(gpr.n): rsi = gpr.Rerr*np.random.randn(len(rs))+rs #[rscale] vlosi = gpr.vrerr*np.random.randn(len(vlos))+vlos #[km/s] for i in range(gp.nipol): ind1 = np.argwhere(np.logical_and(rsi>binmin[i],rsi<binmax[i])).flatten() a[i][k] = len(ind1) #[1] vlos1 = vlosi[ind1] #[km/s] if(len(ind1)<=1): dispvelocity[i][k] = dispvelocity[i-1][k] # attention! should be 0, uses last value else: dispvelocity[i][k] = meanbiweight(vlos1,ci_perc=68.4,\ ci_mean=True,ci_std=True)[1] # [km/s], see BiWeight.py for i in range(gp.nipol): dispvel = np.sum(dispvelocity[i])/gpr.n #[km/s] ab = np.sum(a[i])/(1.*gpr.n) #[1] if ab == 0: dispvelerr = p_edvlos[i-1] #[km/s] # attention! uses last error else: dispvelerr = dispvel/np.sqrt(ab) #[km/s] p_dvlos[i] = dispvel #[km/s] p_edvlos[i]= dispvelerr #[km/s] maxsiglos = max(p_dvlos) #[km/s] print('maxsiglos = ',maxsiglos,'[km/s]') fpars = open(gp.files.get_scale_file(pop),'a') print(maxsiglos, file=fpars) #[km/s] fpars.close() #import shutil #shutil.copy2(gp.files.get_scale_file(0), gp.files.get_scale_file(1)) for i in range(gp.nipol): # [rscale] [maxsiglos] [maxsiglos] print(rbin[i], binmin[i], binmax[i], np.abs(p_dvlos[i]/maxsiglos),np.abs(p_edvlos[i]/maxsiglos), file=vfil) #/np.sqrt(n)) vfil.close()
def run(gp): import gr_params gpr = gr_params.grParams(gp) xall,yall,zall = np.loadtxt(gp.files.get_com_file(0),skiprows=1,\ usecols=(0,1,2),unpack=True) # 2*[rscale0] rscale0 = gf.read_Xscale(gp.files.get_scale_file(0)+'_3D') xall *= rscale0 yall *= rscale0 zall *= rscale0 # calculate 3D r = np.sqrt(xall**2+yall**2+zall**2) #[pc] # set number and size of (linearly spaced) bins rmin = 0. # [pc] rmax = max(r) if gp.maxR < 0 else 1.0*gp.maxR # [pc] print('rmax [rscale] = ', rmax) r = r[(r<rmax)] # [pc] binmin, binmax, rbin = gh.determine_radius(r, rmin, rmax, gp) # [pc] vol = volume_spherical_shell(binmin, binmax, gp) # [pc^3] for pop in range(gp.pops+1): print('####### working on component ',pop) print('input: ',gp.files.get_com_file(pop)+'_3D') # start from data centered on COM already: if gf.bufcount(gp.files.get_com_file(pop)+'_3D')<2: continue x,y,z,v = np.loadtxt(gp.files.get_com_file(pop)+'_3D',\ skiprows=1,usecols=(0,1,2,3),unpack=True) # 3*[rscale], [km/s] rscalei = gf.read_Xscale(gp.files.get_scale_file(pop)) # [pc] x *= rscalei y *= rscalei z *= rscalei # calculate 2D radius on the skyplane r = np.sqrt(x**2+y**2+z**2) # [pc] # set maximum radius (if gp.maxR is set) rmax = max(r) if gp.maxR<0 else 1.0*gp.maxR # [pc] print('rmax [pc] = ', rmax) sel = (r<=rmax) x = x[sel]; y = y[sel]; z = z[sel]; v = v[sel]; r = r[sel] # [rscale] totmass_tracers = 1.*len(x) # [Munit], Munit = 1/star rs = r # + possible starting offset, [rscale] vlos = v # + possible starting offset, [km/s] gf.write_tracer_file(gp.files.get_ntracer_file(pop)+'_3D', totmass_tracers) de, em = gf.write_headers_3D(gp, pop) # gpr.n=30 iterations for getting random picked radius values density = np.zeros((gp.nipol,gpr.n)) a = np.zeros((gp.nipol,gpr.n)) # shared by density, siglos, kappa calcs for k in range(gpr.n): rsi = gpr.Rerr * np.random.randn(len(rs)) + rs # [pc] vlosi = gpr.vrerr*np.random.randn(len(vlos)) + vlos # [km/s] for i in range(gp.nipol): ind1 = np.argwhere(np.logical_and(rsi>=binmin[i], rsi<binmax[i])).flatten() # [1] density[i][k] = (1.*len(ind1))/vol[i]*totmass_tracers # [Munit/rscale^2] vlos1 = vlosi[ind1] # [km/s] a[i][k] = 1.*len(ind1) # [1] dens0 = np.sum(density[0])/(1.*gpr.n) # [Munit/rscale^3] print('dens0 = ',dens0,' [Munit/rscale^3]') dens0pc = dens0/rscale0**3 gf.write_Sig_scale(gp.files.get_scale_file(pop)+'_3D', dens0pc, totmass_tracers) tpb0 = np.sum(a[0])/float(gpr.n) # [1] tracers per bin denserr0 = dens0/np.sqrt(tpb0) # [Munit/rscale^3] p_dens = np.zeros(gp.nipol) p_edens = np.zeros(gp.nipol) for b in range(gp.nipol): dens = np.sum(density[b])/float(gpr.n) # [Munit/rscale^3] tpb = np.sum(a[b])/float(gpr.n) # [1] denserr = dens/np.sqrt(tpb) # [Munit/rscale^3] if(np.isnan(denserr)): p_dens[b] = p_dens[b-1] # [1] p_edens[b]= p_edens[b-1] # [1] else: p_dens[b] = dens/dens0 # [1] p_edens[b]= denserr/dens0 # [1] #100/rbin would be artificial guess print(rbin[b], binmin[b], binmax[b], p_dens[b], p_edens[b], file=de) # [rscale], 2*[dens0] indr = (r<binmax[b]) menclosed = float(np.sum(indr))/totmass_tracers # for normalization to 1 # [totmass_tracers] merr = menclosed/np.sqrt(tpb) # artificial menclosed/10 # [totmass_tracers] print(rbin[b], binmin[b], binmax[b], menclosed, merr, file=em) # [rscale], 2*[totmass_tracers] de.close() em.close() if gpr.showplots: print('plotting for pop ', pop) #show_plots_dens(rbin, p_dens, p_edens, gp) mf1 = 0.02 #1/totmass_tracers mf2 = 0.02 rho_dm, rho_star1, rho_star2 = ga.rho_walk(rbin*rscale0, gp, mf1, mf2) if pop == 0: loglog(rbin*rscale0, rho_star1+rho_star2, 'k.-', lw=0.5) elif pop == 1: loglog(rbin*rscale0, rho_star1, 'b.-', lw = 0.5) elif pop == 2: loglog(rbin*rscale0, rho_star2, 'g.-', lw = 0.5) loglog(rbin*rscale0, dens0pc*p_dens, 'r.-') pdb.set_trace() clf()
def run(gp): import gr_params gpr = gr_params.grParams(gp) print('input: ', gpr.fil) x0,y0,z0,vb0,vz0,Mg0,PM0,comp0 = read_data(gpr.fil) # [pc], [km/s], [1] # only use stars which are members of the dwarf: exclude pop3 by # construction pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, #outliers x0, y0, z0, comp0, vb0, vz0, Mg0, PM0 = select_pm(x0, y0, z0, comp0, vb0, vz0, Mg0, PM0, pm) # assign population if gp.pops==2: pm1 = (comp0 == 1) # will be overwritten below if gp.metalpop pm2 = (comp0 == 2) # same same elif gp.pops==1: pm1 = (comp0 < 3) pm2 = (comp0 == -1) # assign none, but of same length as comp0 if gp.metalpop: # drawing of populations based on metallicity get parameters # from function in pymcmetal.py import pickle fi = open('metalsplit.dat', 'rb') DATA = pickle.load(fi) fi.close() p, mu1, sig1, mu2, sig2, M, pm1, pm2 = DATA x1, y1, z1, comp1, vb1, vz1, Mg1, PM1 = select_pm(x0, y0, z0, comp0, vb0, vz0, Mg0, PM0, pm1) x2, y2, z2, comp2, vb2, vz2, Mg2, PM2 = select_pm(x0, y0, z0, comp0, vb0, vz0, Mg0, PM0, pm2) # cut to subsets ind1 = gh.draw_random_subset(x1, gp.ntracer[1-1]) x1, y1, z1, comp1, vb1, vz1, Mg1, PM1 = select_pm(x1, y1, z1, comp1, vb1, vz1, Mg1, PM1, ind1) ind2 = gh.draw_random_subset(x2, gp.ntracer[2-1]) x2, y2, z2, comp2, vb2, vz2, Mg2, PM2 = select_pm(x2, y2, z2, comp2, vb2, vz2, Mg2, PM2, ind2) # use vz for no contamination, or vb for with contamination x0, y0, z0, vz0, pm1, pm2, pm = concat_pops(x1, x2, y1, y2, z1, z2, vz1, vz2, gp) com_x, com_y, com_z, com_vz = com_shrinkcircle_v(x0, y0, z0, vz0, pm) # [pc] print('COM [pc]: ', com_x, com_y, com_z) # [pc] print('VOM [km/s]', com_vz) # [km/s] # from now on, work with 2D data only; z0 was only used to get # center in (x,y) better x0 -= com_x # [pc] y0 -= com_y # [pc] vz0 -= com_vz # [km/s] R0 = np.sqrt(x0**2+y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] from all tracer points pop = -1 for pmn in [pm, pm1, pm2]: pop = pop + 1 # population number pmr = ( R0 < (gp.maxR*Rscale) ) # read max extension for data #(rprior*Rscale) from #gi_params pmn = pmn*pmr # [1] print("fraction of members = ", 1.0*sum(pmn)/len(pmn)) x, y, z, comp, vz, vb, Mg, PMN = select_pm(x0, y0, z0, comp0, vz0, vb0, Mg0, PM0, pmn) R = np.sqrt(x*x+y*y) # [pc] Rscalei = np.median(R) gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) gf.write_data_output(gp.files.get_com_file(pop), x/Rscalei, y/Rscalei, vz, Rscalei) if gpr.showplots: gpr.show_part_pos(x, y, pmn, Rscale)
def run(gp): pop = 0 import gr_params gpr = gr_params.grParams(gp) xall, yall = np.loadtxt(gp.files.get_com_file(0), skiprows=1, usecols=(0, 1), unpack=True) # 2*[Rscale0] R = np.sqrt(xall**2 + yall**2) # [Rscale0] # set number and size of (linearly spaced) bins Rmin = 0. # [Rscale0] Rmax = max(R) if gp.maxR < 0 else 1.0 * gp.maxR # [Rscale0] R = R[(R < Rmax)] # [Rscale0] Binmin, Binmax, Rbin = gh.determine_radius(R, Rmin, Rmax, gp) # [Rscale0] gp.xipol = Rbin minr = min(Rbin) # [pc] maxr = max(Rbin) # [pc] gp.xepol = np.hstack( [minr / 8., minr / 4., minr / 2., Rbin, 2 * maxr, 4 * maxr, 8 * maxr]) # [pc] Vol = gh.volume_circular_ring(Binmin, Binmax, gp) # [Rscale0^2] Rscale0 = float(gf.read_Xscale(gp.files.get_scale_file(0))) # [pc] print('####### working on component ', pop) print('input: ', gp.files.get_com_file(pop)) # start from data centered on COM already: if gf.bufcount(gp.files.get_com_file(pop)) < 2: return # only read in data if needed: pops = 1: reuse data from pop=0 part x, y = np.loadtxt(gp.files.get_com_file(pop), skiprows=1, usecols=(0, 1), unpack=True) # [Rscalei], [Rscalei] # calculate 2D radius on the skyplane R = np.sqrt(x**2 + y**2) #[Rscalei] Rscalei = gf.read_Xscale(gp.files.get_scale_file(pop)) # [pc] # set maximum radius (if gp.maxR is set) Rmax = max(R) if gp.maxR < 0 else 1.0 * gp.maxR # [Rscale0] print('Rmax [Rscale0] = ', Rmax) sel = (R * Rscalei <= Rmax * Rscale0) x = x[sel] # [Rscalei] y = y[sel] # [Rscalei] R = R[sel] # [Rscalei] totmass_tracers = float(len(x)) # [Munit], Munit = 1/star Rs = R # + possible starting offset, [Rscalei] tr = open(gp.files.get_ntracer_file(pop), 'w') print(totmass_tracers, file=tr) tr.close() f_Sig, f_nu, f_mass, f_sig, f_kap, f_zeta = gf.write_headers_2D(gp, pop) Sig_phot = np.zeros((gp.nipol, gpr.n)) # particle selections, shared by density, siglos, kappa and zeta calculations tpb = np.zeros((gp.nipol, gpr.n)) for k in range(gpr.n): Rsi = gh.add_errors(Rs, gpr.Rerr) # [Rscalei] for i in range(gp.nipol): ind1 = np.argwhere(np.logical_and(Rsi * Rscalei >= Binmin[i] * Rscale0, \ Rsi * Rscalei < Binmax[i] * Rscale0)).flatten() # [1] tpb[i][k] = float(len(ind1)) #[1] Sig_phot[i][k] = float( len(ind1)) * totmass_tracers / Vol[i] # [Munit/rscale^2] # do the following for all populations Sig0 = np.sum(Sig_phot[0]) / float(gpr.n) # [Munit/Rscale^2] Sig0pc = Sig0 / Rscale0**2 # [munis/pc^2] gf.write_Sig_scale(gp.files.get_scale_file(pop), Sig0pc, totmass_tracers) # calculate density and mass profile, store it # ---------------------------------------------------------------------- P_dens = np.zeros(gp.nipol) P_edens = np.zeros(gp.nipol) for b in range(gp.nipol): Sig = np.sum(Sig_phot[b]) / (1. * gpr.n) # [Munit/Rscale^2] tpbb = np.sum(tpb[b]) / float( gpr.n) # [1], mean number of tracers in bin Sigerr = Sig / np.sqrt(tpbb) # [Munit/Rscale^2], Poissonian error # compare data and analytic profile <=> get stellar # density or mass ratio from Matt Walker if (np.isnan(Sigerr)): P_dens[b] = P_dens[b - 1] # [1] P_edens[b] = P_edens[b - 1] # [1] else: P_dens[b] = Sig / Sig0 # [1] P_edens[b] = Sigerr / Sig0 # [1] print(Rbin[b], Binmin[b], Binmax[b], P_dens[b], P_edens[b], file=f_Sig) # 3*[rscale], [dens0], [dens0] indr = (R < Binmax[b]) Menclosed = float( np.sum(indr) ) / totmass_tracers # for normalization to 1#[totmass_tracers] Merr = Menclosed / np.sqrt( tpbb) # or artificial Menclosed/10 #[totmass_tracers] print(Rbin[b], Binmin[b], Binmax[b], Menclosed, Merr, file=f_mass) # [Rscale0], 2* [totmass_tracers] f_Sig.close() f_mass.close() # deproject Sig to get nu numedi = gip.Sig_INT_rho(Rbin * Rscalei, Sig0pc * P_dens, gp) #numin = gip.Sig_INT_rho(Rbin*Rscalei, Sig0pc*(P_dens-P_edens), gp) numax = gip.Sig_INT_rho(Rbin * Rscalei, Sig0pc * (P_dens + P_edens), gp) nu0pc = numedi[0] gf.write_nu_scale(gp.files.get_scale_file(pop), nu0pc) nuerr = numax - numedi for b in range(gp.nipol): print(Rbin[b], Binmin[b], Binmax[b],\ numedi[b]/nu0pc, nuerr[b]/nu0pc, \ file = f_nu) f_nu.close() # write dummy sig scale, not to be used later on maxsiglos = -1. #[km/s] fpars = open(gp.files.get_scale_file(pop), 'a') print(maxsiglos, file=fpars) #[km/s] fpars.close()
def run(gp): import gr_params gpr = gr_params.grParams(gp) gu.G1__pcMsun_1km2s_2 = 1. # as per definition gp.anM = 1. # gp.ana = 1. # print('grh_com: input: ', gpr.simpos) xall, yall, zall = np.loadtxt(gpr.simpos, skiprows=1, unpack=True) # 3*[gp.ana] vxall, vyall, vzall = np.loadtxt(gpr.simvel, skiprows=1, unpack=True) # 3*[gp.ana] nall = len(xall) # [1] # shuffle and restrict to ntracer random points ndm = int(min(gp.ntracer[0], nall - 1)) trace = random.sample(range(nall), nall) if gp.pops > 1: gh.LOG(1, 'implement more than 2 pops for hern') pdb.set_trace() PM = [1. for i in trace] # [1]=const, no prob. of membership info in dataset x = [xall[i] for i in trace] # [gp.ana] y = [yall[i] for i in trace] # [gp.ana] z = [zall[i] for i in trace] # [gp.ana] vz = [vzall[i] for i in trace] # [km/s] PM = np.array(PM) x = np.array(x) y = np.array(y) z = np.array(z) vz = np.array(vz) com_x, com_y, com_z, com_vz = com_shrinkcircle_v( x, y, z, vz, PM) # 3*[gp.ana], [velocity] print('COM [gp.ana]: ', com_x, com_y, com_z, com_vz) xnew = (x - com_x) #*gp.ana # [pc] ynew = (y - com_y) #*gp.ana # [pc] #znew = (z-com_z) # *gp.ana # [pc] vznew = ( vz - com_vz ) #*1e3*np.sqrt(gu.G1__pcMsun_1km2s_2*gp.anM/gp.ana) # [km/s], from conversion from system with L=G=M=1 R0 = np.sqrt(xnew**2 + ynew**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # or gpr.r_DM # [pc] print('Rscale/pc = ', Rscale) # only for 0 (all) and 1 (first and only population) for pop in range(gp.pops + 1): crscale = open(gp.files.get_scale_file(pop), 'w') print('# Rscale in [pc],',' surfdens_central (=dens0) in [Munit/rscale**2],',\ ' and totmass_tracers [Munit],',\ ' and max(sigma_LOS) in [km/s]', file=crscale) print(Rscale, file=crscale) crscale.close() gh.LOG(2, 'grh_com: output: ', gp.files.get_com_file(pop)) filepos = open(gp.files.get_com_file(pop), 'w') print('# x [Rscale]', 'y [Rscale]', 'vLOS [km/s]', file=filepos) for k in range(ndm): print(xnew[k] / Rscale, ynew[k] / Rscale, vznew[k], file=filepos) filepos.close() gh.LOG(2, '')
def run(gp): import gr_params gpr = gr_params.grParams(gp) gpr.fil = gpr.dir+"/data/tracers.dat" A = np.loadtxt(gpr.fil, skiprows=25) RAh,RAm,RAs,DEd,DEm,DEs,Vlos,e_Vlos,Teff,e_Teff,logg,e_logg,Fe,e_Fe,Nobs = A.T # only use stars which have Mg measurements pm = (Nobs>0) # (PM>=0.95)* print("f_members = ", gh.pretty(1.*sum(pm)/len(pm))) RAh=RAh[pm] RAm=RAm[pm] RAs=RAs[pm] DEd=DEd[pm] DEm=DEm[pm] DEs=DEs[pm] Vlos=Vlos[pm] e_Vlos=e_Vlos[pm] Teff=Teff[pm] e_Teff=e_Teff[pm] logg=logg[pm] e_logg=e_logg[pm] Fe=Fe[pm] e_Fe=e_Fe[pm] Nobs = Nobs[pm] sig = abs(RAh[0])/RAh[0] #print('RAh: signum = ',gh.pretty(sig)) RAh = RAh/sig xs = 15*(RAh*3600+RAm*60+RAs)*sig # [arcsec/15] sig = abs(DEd[0])/DEd[0] #print('DEd: signum = ', gh.pretty(sig)) DEd = DEd/sig ys = (DEd*3600+DEm*60+DEs)*sig # [arcsec] arcsec = 2.*np.pi/(360.*60.*60) # [pc] kpc = 1000 # [pc] DL = {1: lambda x: x * (138),#+/- 8 for Fornax 2: lambda x: x * (101),#+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec*DL) # [pc] ys *= (arcsec*DL) # [pc] x0 = np.copy(xs) y0 = np.copy(ys) # [pc] vz0 = np.copy(Vlos) # [km/s] Fe0 = np.copy(Fe) # only use stars which are members of the dwarf: exclude pop3 by construction #pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, outliers #x0, y0, vz0, Mg0, PM0 = select_pm(x0, y0, vz0, Mg0, PM0, pm) # assign population if gp.pops == 2: # drawing of populations based on metallicity # get parameters from function in pymcmetal.py #[p, mu1, sig1, mu2, sig2] = np.loadtxt(gp.files.dir+'metalsplit.dat') #[pm1, pm2] = np.loadtxt(gp.files.dir+'metalsplit_assignment.dat') popass = np.loadtxt(gp.files.dir+'popass') pm1 = (popass==1) pm2 = (popass==2) elif gp.pops == 1: pm1 = (Teff >= 0) pm2 = (Teff < 0) # assign none, but of same length as xs x1, y1, vz1, Fe1, PM1 = select_pm(x0, y0, vz0, Fe, pm, pm1) x2, y2, vz2, Fe2, PM2 = select_pm(x0, y0, vz0, Fe, pm, pm2) # cutting pm_i to a maximum of ntracers_i particles each: ind1 = np.arange(len(x1)) np.random.shuffle(ind1) # random.shuffle already changes ind ind1 = ind1[:gp.ntracer[1-1]] ind2 = np.arange(len(x2)) np.random.shuffle(ind2) # random.shuffle already changes ind ind2 = ind2[:gp.ntracer[2-1]] x1, y1, vz1, Fe1, PMS1 = select_pm(x1, y1, vz1, Fe1, PM1, ind1) x2, y2, vz2, Fe2, PMS2 = select_pm(x2, y2, vz2, Fe2, PM2, ind2) x0, y0, vz0, pm1, pm2, pm = concat_pops(x1, x2, y1, y2, vz1, vz2, gp) # optimum: get 3D center of mass with means # com_x, com_y, com_z = com_mean(x0,y0,z0,PM0) # 3*[pc], z component included if available com_x, com_y, com_vz = com_shrinkcircle_v_2D(x0, y0, vz0, pm) # [pc], [km/s] # from now on, work with 2D data only; z0 was only used to get center in (x,y) better # x0 -= com_x; y0 -= com_y # [pc] # vz0 -= com_vz #[km/s] R0 = np.sqrt(x0**2+y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] overall pop = -1 for pmn in [pm, pm1, pm2]: pop = pop+1 pmr = (R0<(gp.maxR*Rscale)) # read max extension for data # (rprior*Rscale) from gi_params pmn = pmn*pmr # [1] print("fraction of members = ", 1.0*sum(pmn)/len(pmn)) x, y, vz, Fe, PMN = select_pm(x0, y0, vz0, Fe0, pm, pmn) R = np.sqrt(x*x+y*y) # [pc] Rscalei = np.median(R) # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) # [pc] gf.write_data_output(gp.files.get_com_file(pop), x/Rscalei, y/Rscalei, vz, Rscalei) # [pc] if gpr.showplots: gpr.show_part_pos(x, y, pmn, Rscale)
def run(gp): import gr_params gpr = gr_params.grParams(gp) xall,yall,zall = np.loadtxt(gp.files.get_com_file(0),skiprows=1,\ usecols=(0,1,2),unpack=True) # 2*[rscale0] rscale0 = gf.read_Xscale(gp.files.get_scale_file(0) + '_3D') xall *= rscale0 yall *= rscale0 zall *= rscale0 # calculate 3D r = np.sqrt(xall**2 + yall**2 + zall**2) #[pc] # set number and size of (linearly spaced) bins rmin = 0. # [pc] rmax = max(r) if gp.maxR < 0 else 1.0 * gp.maxR # [pc] print('rmax [rscale] = ', rmax) r = r[(r < rmax)] # [pc] binmin, binmax, rbin = gh.determine_radius(r, rmin, rmax, gp) # [pc] vol = volume_spherical_shell(binmin, binmax, gp) # [pc^3] for pop in range(gp.pops + 1): print('####### working on component ', pop) print('input: ', gp.files.get_com_file(pop) + '_3D') # start from data centered on COM already: if gf.bufcount(gp.files.get_com_file(pop) + '_3D') < 2: continue x,y,z,v = np.loadtxt(gp.files.get_com_file(pop)+'_3D',\ skiprows=1,usecols=(0,1,2,3),unpack=True) # 3*[rscale], [km/s] rscalei = gf.read_Xscale(gp.files.get_scale_file(pop)) # [pc] x *= rscalei y *= rscalei z *= rscalei # calculate 2D radius on the skyplane r = np.sqrt(x**2 + y**2 + z**2) # [pc] # set maximum radius (if gp.maxR is set) rmax = max(r) if gp.maxR < 0 else 1.0 * gp.maxR # [pc] print('rmax [pc] = ', rmax) sel = (r <= rmax) x = x[sel] y = y[sel] z = z[sel] v = v[sel] r = r[sel] # [rscale] totmass_tracers = 1. * len(x) # [Munit], Munit = 1/star rs = r # + possible starting offset, [rscale] vlos = v # + possible starting offset, [km/s] gf.write_tracer_file( gp.files.get_ntracer_file(pop) + '_3D', totmass_tracers) de, em = gf.write_headers_3D(gp, pop) # gpr.n=30 iterations for getting random picked radius values density = np.zeros((gp.nipol, gpr.n)) a = np.zeros( (gp.nipol, gpr.n)) # shared by density, siglos, kappa calcs for k in range(gpr.n): rsi = gpr.Rerr * np.random.randn(len(rs)) + rs # [pc] vlosi = gpr.vrerr * np.random.randn(len(vlos)) + vlos # [km/s] for i in range(gp.nipol): ind1 = np.argwhere( np.logical_and(rsi >= binmin[i], rsi < binmax[i])).flatten() # [1] density[i][k] = ( 1. * len(ind1)) / vol[i] * totmass_tracers # [Munit/rscale^2] vlos1 = vlosi[ind1] # [km/s] a[i][k] = 1. * len(ind1) # [1] dens0 = np.sum(density[0]) / (1. * gpr.n) # [Munit/rscale^3] print('dens0 = ', dens0, ' [Munit/rscale^3]') dens0pc = dens0 / rscale0**3 gf.write_Sig_scale( gp.files.get_scale_file(pop) + '_3D', dens0pc, totmass_tracers) tpb0 = np.sum(a[0]) / float(gpr.n) # [1] tracers per bin denserr0 = dens0 / np.sqrt(tpb0) # [Munit/rscale^3] p_dens = np.zeros(gp.nipol) p_edens = np.zeros(gp.nipol) for b in range(gp.nipol): dens = np.sum(density[b]) / float(gpr.n) # [Munit/rscale^3] tpb = np.sum(a[b]) / float(gpr.n) # [1] denserr = dens / np.sqrt(tpb) # [Munit/rscale^3] if (np.isnan(denserr)): p_dens[b] = p_dens[b - 1] # [1] p_edens[b] = p_edens[b - 1] # [1] else: p_dens[b] = dens / dens0 # [1] p_edens[ b] = denserr / dens0 # [1] #100/rbin would be artificial guess print(rbin[b], binmin[b], binmax[b], p_dens[b], p_edens[b], file=de) # [rscale], 2*[dens0] indr = (r < binmax[b]) menclosed = float( np.sum(indr)) / totmass_tracers # for normalization to 1 # [totmass_tracers] merr = menclosed / np.sqrt( tpb) # artificial menclosed/10 # [totmass_tracers] print(rbin[b], binmin[b], binmax[b], menclosed, merr, file=em) # [rscale], 2*[totmass_tracers] de.close() em.close() if gpr.showplots: print('plotting for pop ', pop) #show_plots_dens(rbin, p_dens, p_edens, gp) mf1 = 0.02 #1/totmass_tracers mf2 = 0.02 rho_dm, rho_star1, rho_star2 = ga.rho_walk(rbin * rscale0, gp, mf1, mf2) if pop == 0: loglog(rbin * rscale0, rho_star1 + rho_star2, 'k.-', lw=0.5) elif pop == 1: loglog(rbin * rscale0, rho_star1, 'b.-', lw=0.5) elif pop == 2: loglog(rbin * rscale0, rho_star2, 'g.-', lw=0.5) loglog(rbin * rscale0, dens0pc * p_dens, 'r.-') pdb.set_trace() clf()
def run(gp): import gr_params gpr = gr_params.grParams(gp) global Nsample, split, e_split, PM, split_min, split_max gpr.fil = gpr.dir + "data/tracers.dat" # number of measured tracer stars Nsample = bufcount(gpr.fil) delim = [0, 22, 3, 3, 6, 4, 3, 5, 6, 6, 7, 5, 6, 5, 6, 5, 6] #ID = np.genfromtxt(gpr.fil,skiprows=29,unpack=True,usecols=(0,1),delimiter=delim) if gp.case == 5: RAh, RAm, RAs, DEd, DEm, DEs, VHel, e_VHel, Teff, e_Teff, logg, e_logg, Fe, e_Fe, N = np.loadtxt( gpr.fil, skiprows=25, unpack=True) PM = np.ones(len(RAh)) split = logg e_split = e_logg else: RAh, RAm, RAs, DEd, DEm, DEs, Vmag, VI, VHel, e_VHel, SigFe, e_SigFe, Mg, Mg_err, PM = np.genfromtxt( gpr.fil, skiprows=29, unpack=True, usecols=tuple(range(2, 17)), delimiter=delim, filling_values=-1) split = Mg e_split = Mg_err if gp.case == 5: sel = (N > 0) else: sel = (Mg > -1) # exclude missing data on Mg RAh = RAh[sel] RAm = RAm[sel] RAs = RAs[sel] DEd = DEd[sel] DEm = DEm[sel] DEs = DEs[sel] #Vmag = Vmag[sel] #VI = VI[sel] VHel = VHel[sel] e_VHel = e_VHel[sel] if gp.case < 5: Mg = Mg[sel] Mg_err = Mg_err[sel] elif gp.case == 5: Teff = Teff[sel] e_Teff = e_Teff[sel] logg = logg[sel] e_logg = e_logg[sel] Fe = Fe[sel] e_Fe = e_Fe[sel] N = N[sel] split = split[sel] e_split = e_split[sel] PM = PM[sel] split_min = min(split) # -3, 3 if according to WalkerPenarrubia2011 split_max = max(split) # easiest way for visualization: use histogram to show data #hist(split, np.sqrt(len(split))/2, normed=True) # but: it's not as easy as that # we have datapoints with errors and probability of membership weighting # thus, we need to smear the values out using a Gaussian of width = split_err # and add them up afterwards after scaling with probability PM x = np.array(np.linspace(split_min, split_max, 100)) splitdf = np.zeros(100) for i in range(len(split)): splitdf += PM[i] * gh.gauss(x, split[i], e_split[i]) splitdf /= sum(PM) #plot(x, Mgdf, 'g', lw=2) # only then we want to compare to Gaussians n_dims = 1 + gp.pops * 2 #Nsample = 10*n_dims pymultinest.run( myloglike, myprior, n_dims, # nest_ndims n_dims + 1, # nest_totPar n_dims, # separate modes on nest_nCdims # the rho parameters only (gp.nrho in this case) [gp.pops, gp.nipol, gp.nrho], True, # nest_IS = INS enabled True, #nest_mmodal = # separate modes True, # nest_ceff = use const sampling efficiency Nsample, # nest_nlive = 0.0, # nest_tol = 0 to keep working infinitely 0.8, # nest_ef = 10000, # nest_updInt = output after this many iterations 1., # null_log_evidence separate modes if #logevidence > this param. Nsample, # maxClst = -1.e30, # nest_Ztol = mode tolerance in the #case where no special value exists: highly negative gp.files.outdir, # outputfiles_basename = -1, # seed = True, # nest_fb = False, # nest_resume = 0, # context = True, # nest_outfile = -999999, # nest_logZero = points with log L < log_zero will be 1000, # nest_maxIter = False, # initMPI = use MPI None) #dump_callback = import os os.system( 'cd ' + gp.files.outdir + '; grep -n6 Maximum stats.dat|tail -5|cut -d " " -f8 > metalmaxL.dat;') os.system("cd " + gp.files.outdir + "; sed -i 's/\\([0-9]\\)-\\([0-9]\\)/\\1E-\\2/g' metalmaxL.dat") os.system("cd " + gp.files.outdir + "; sed -i 's/\\([0-9]\\)+\\([0-9]\\)/\\1E+\\2/g' metalmaxL.dat") cubeML = np.loadtxt(gp.files.outdir + 'metalmaxL.dat') cubeMLphys = cubeML #myprior(cubeML, 1+gp.pops*2, 1+gp.pops*2) #myloglike(cubeMLphys, 1+gp.pops*2, 1+gp.pops*2) pML, mu1ML, sig1ML, mu2ML, sig2ML = cubeMLphys #g1 = pML*gh.gauss(x, mu1ML, sig1ML) #g2 = (1-pML)*gh.gauss(x, mu2ML, sig2ML) #gtot = g1+g2 #plot(x, pML*g1, 'white') #plot(x, (1-pML)*g2, 'white') #plot(x, gtot, 'r') #xlabel('Mg') #ylabel('pdf') #pdb.set_trace() sig = abs(RAh[0]) / RAh[0] RAh = RAh / sig xs = 15 * (RAh * 3600 + RAm * 60 + RAs) * sig # [arcsec/15] sig = abs(DEd[0]) / DEd[0] DEd = DEd / sig ys = (DEd * 3600 + DEm * 60 + DEs) * sig # [arcsec] arcsec = 2. * np.pi / (360. * 60. * 60) # [pc] kpc = 1000 # [pc] DL = { 1: lambda x: x * (138), #+/- 8 for Fornax 2: lambda x: x * (101), #+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec * DL) # [pc] ys *= (arcsec * DL) # [pc] # alternative: get center of photometric measurements by deBoer # for Fornax, we have if gp.case == 1: com_x = 96203.736358393697 com_y = -83114.080684733024 xs = xs - com_x ys = ys - com_y else: # determine com_x, com_y from shrinking sphere import gi_centering as grc com_x, com_y = grc.com_shrinkcircle_2D(xs, ys) # instantiate different samplings, store half-light radii (2D) coll_R1half = [] coll_R2half = [] coll_popass = [] print('drawing 1000 assignments of stars to best fitting Gaussians') import numpy.random as npr #import gi_project as gip for kl in range(1000): # get a sample assignment: popass = [] for i in range(sum(sel)): # random assignment, wrong #if npr.rand() <= 0.5: # popass.append(1) #else: # popass.append(2) spl = split[i] ppop1 = pML * gh.gauss(spl, mu1ML, sig1ML) ppop2 = (1 - pML) * gh.gauss(spl, mu2ML, sig2ML) if npr.rand() <= ppop1 / (ppop1 + ppop2): popass.append(1) else: popass.append(2) popass = np.array(popass) coll_popass.append(popass) sel1 = (popass == 1) sel2 = (popass == 2) # radii of all stellar tracers from pop 1 and 2 R1 = np.sqrt((xs[sel1])**2 + (ys[sel1])**2) R2 = np.sqrt((xs[sel2])**2 + (ys[sel2])**2) R1.sort() R2.sort() for pop in np.arange(2) + 1: if pop == 1: R0 = R1 # [pc] Rhalf = R1[len(R1) / 2] coll_R1half.append(Rhalf) co = 'blue' else: R0 = R2 # [pc] Rhalf = R2[len(R2) / 2] coll_R2half.append(Rhalf) co = 'red' coll_R1half = np.array(coll_R1half) coll_R2half = np.array(coll_R2half) coll_Rdiffhalf = np.abs(coll_R1half - coll_R2half) # select 3 assignments: one for median, one for median-1sigma, one for median+1sigma med_Rdiff = np.median(coll_Rdiffhalf) stdif = np.std(coll_Rdiffhalf) min1s_Rdiff = med_Rdiff - stdif max1s_Rdiff = med_Rdiff + stdif #clf() #hist(coll_Rdiffhalf, np.sqrt(len(coll_Rdiffhalf))/2) #xlabel(r'$\Delta R/pc$') #ylabel('count') #axvline(med_Rdiff, color='r') #axvline(min1s_Rdiff, color='g') #axvline(max1s_Rdiff, color='g') kmed = np.argmin(abs(coll_Rdiffhalf - med_Rdiff)) kmin1s = np.argmin(abs(coll_Rdiffhalf - min1s_Rdiff)) kmax1s = np.argmin(abs(coll_Rdiffhalf - max1s_Rdiff)) print('saving median, lower 68%, upper 68% stellar assignments') np.savetxt(gpr.dir + 'data/popass_median', coll_popass[kmed]) np.savetxt(gpr.dir + 'data/popass_min1s', coll_popass[kmin1s]) np.savetxt(gpr.dir + 'data/popass_max1s', coll_popass[kmax1s]) print('finished')
ax.set_ylim(-1, 1.5) show() ## \fn show_metallicity(Fe, Fe_err, Mg, Mg_err) # show ellipses with error bars for each star's Fe and Mg # @param Fe iron abundance # @param Fe_err error on it # @param Mg Magnesium abundance # @param Mg_err error on it import gi_params gp = gi_params.Params() import gr_params gpr = gr_params.grParams(gp) DL = { 1: lambda x: x * (138), #+/- 8 for Fornax 2: lambda x: x * (101), #+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](gu.kpc__pc) k2 = { 1: lambda x: x * (339), #+/-36 for Fornax 2: lambda x: x * (137), #+/-22 for Carina 3: lambda x: x * (94), #+/-26 for Sculptor 4: lambda x: x * (294), #+/-38 for Sextans 5: lambda x: x * (-1) # TODO: look up for Draco }[gp.case](1)
def run(gp): import gr_params gpr = gr_params.grParams(gp) xall,yall = np.loadtxt(gp.files.get_com_file(0), skiprows=1, usecols=(0,1), unpack=True) # 2*[Rscale0] R = np.sqrt(xall**2+yall**2) # [Rscale0] # set number and size of (linearly spaced) bins Rmin = 0. #[Rscale0] Rmax = max(R) if gp.maxR < 0 else 1.0*gp.maxR # [Rscale0] R = R[(R<Rmax)] # [Rscale0] Binmin, Binmax, Rbin = gh.determine_radius(R, Rmin, Rmax, gp) # [Rscale0] gp.xipol = Rbin minr = min(Rbin) # [pc] maxr = max(Rbin) # [pc] gp.xepol = np.hstack([minr/8., minr/4., minr/2., Rbin, 2*maxr, 4*maxr, 8*maxr]) # [pc] Vol = gh.volume_circular_ring(Binmin, Binmax, gp) # [Rscale0^2] Rscale0 = gf.read_Xscale(gp.files.get_scale_file(0)) # [pc] for pop in range(gp.pops+1): print('####### working on component ',pop) print('input: ', gp.files.get_com_file(pop)) # exclude second condition if self-consistent approach wished if gp.investigate == "obs" and gp.case==1 and pop==0: # for Fornax, overwrite first Sigma with deBoer data import gr_MCMCbin_for gr_MCMCbin_for.run(gp) continue # start from data centered on COM already: if gf.bufcount(gp.files.get_com_file(pop))<2: continue # only read in data if needed: pops = 1: reuse data from pop=0 part if (gp.pops == 1 and pop < 1 or gp.pops == 2) or gp.investigate == 'obs': x,y,v = np.loadtxt(gp.files.get_com_file(pop), skiprows=1,usecols=(0,1,2),unpack=True) # [Rscalei], [Rscalei], [km/s] # calculate 2D radius on the skyplane R = np.sqrt(x**2+y**2) #[Rscalei] Rscalei = gf.read_Xscale(gp.files.get_scale_file(pop)) # [pc] # set maximum radius (if gp.maxR is set) Rmax = max(R) if gp.maxR<0 else 1.0*gp.maxR # [Rscale0] print('Rmax [Rscale0] = ', Rmax) #pdb.set_trace() #from pylab import clf, hist, axvline, xlim #clf() #hist(np.log10(R*Rscalei), 40) #for i in range(len(Rbin)): # axvline(np.log10(Rbin[i]*Rscale0)) #xlim([np.log10(min(gp.xepol*Rscale0)), np.log10(max(gp.xepol*Rscale0))]) sel = (R * Rscalei <= Rmax * Rscale0) x = x[sel] y = y[sel] v = v[sel] R = R[sel] # [Rscalei] totmass_tracers = float(len(x)) # [Munit], Munit = 1/star Rs = R # + possible starting offset, [Rscalei] vlos = v # + possible starting offset, [km/s] tr = open(gp.files.get_ntracer_file(pop),'w') print(totmass_tracers, file=tr) tr.close() f_Sig, f_nu, f_mass, f_sig, f_kap, f_zeta = gf.write_headers_2D(gp, pop) if (gp.pops == 1 and pop < 1) or gp.pops == 2 or gp.investigate == 'obs': Sig_kin = np.zeros((gp.nipol, gpr.n)) siglos = np.zeros((gp.nipol, gpr.n)) if gp.usekappa: kappa = np.zeros((gp.nipol, gpr.n)) if gp.usezeta: v2 = np.zeros((gp.nipol, gpr.n)) v4 = np.zeros((gp.nipol, gpr.n)) Ntot = np.zeros(gpr.n) zetaa = np.zeros(gpr.n) zetab = np.zeros(gpr.n) # particle selections, shared by density, siglos, kappa and zeta calculations tpb = np.zeros((gp.nipol,gpr.n)) for k in range(gpr.n): Rsi = gh.add_errors(Rs, gpr.Rerr) # [Rscalei] vlosi = gh.add_errors(vlos, gpr.vrerr) # [km/s] for i in range(gp.nipol): ind1 = np.argwhere(np.logical_and(Rsi * Rscalei >= Binmin[i] * Rscale0, Rsi * Rscalei < Binmax[i] * Rscale0)).flatten() # [1] tpb[i][k] = float(len(ind1)) # [1] Sig_kin[i][k] = float(len(ind1))*totmass_tracers/Vol[i] # [Munit/rscale**2] if(len(ind1)<=1): siglos[i][k] = siglos[i-1][k] print('### using last value, missing data') if gp.usekappa: kappa[i][k] = kappa[i-1][k] # attention! should be 0, uses last value if gp.usezeta: v2[i][k] = v2[i-1][k] v4[i][k] = v4[i-1][k] else: siglos[i][k] = meanbiweight(vlosi[ind1], ci_perc=68.4, \ ci_mean=True, ci_std=True)[1] # [km/s], see BiWeight.py if gp.usekappa: kappa[i][k] = kurtosis(vlosi[ind1], axis=0, \ fisher=False, bias=False) # [1] if gp.usezeta: ave, adev, sdev, var, skew, curt = gh.moments(vlosi[ind1]) v2[i][k] = var v4[i][k] = (curt+3)*var**2 Sigma = Sig_kin[:,k] if gp.usezeta: pdb.set_trace() Ntot[k] = gh.Ntot(Rbin, Sigma, gp) zetaa[k] = gh.starred(Rbin, v4[:,k], Sigma, Ntot[k], gp) v2denom = (gh.starred(Rbin, v2[:,k], Sigma, Ntot[k], gp))**2 zetaa[k] /= v2denom zetab[k] = gh.starred(Rbin, v4[:,k]*Rbin**2, Sigma, Ntot[k], gp) zetab[k] /= v2denom zetab[k] /= (gh.starred(Rbin, Rbin, Sigma, Ntot[k], gp))**2 if gp.investigate == 'obs' and gp.case < 5: Sig_phot = obs_Sig_phot(Binmin, Binmax, Rscale0, Sig_kin, gp, gpr) else: Sig_phot = Sig_kin # do the following for all populations Sig0 = np.sum(Sig_phot[0])/float(gpr.n) # [Munit/Rscale^2] Sig0pc = Sig0/Rscale0**2 # [munis/pc^2] gf.write_Sig_scale(gp.files.get_scale_file(pop), Sig0pc, totmass_tracers) # calculate density and mass profile, store it # ---------------------------------------------------------------------- #tpb0 = np.sum(tpb[0])/float(gpr.n) # [1] #Sigerr0 = Sig0/np.sqrt(tpb0) # [Munit/Rscale^2] P_dens = np.zeros(gp.nipol) P_edens = np.zeros(gp.nipol) for b in range(gp.nipol): Sig = np.sum(Sig_kin[b])/(1.*gpr.n) # [Munit/Rscale^2] tpbb = np.sum(tpb[b])/float(gpr.n) # [1], mean number of tracers in bin Sigerr = Sig/np.sqrt(tpbb) # [Munit/Rscale^2], Poissonian error # compare data and analytic profile <=> get stellar # density or mass ratio from Matt Walker if(np.isnan(Sigerr)): P_dens[b] = P_dens[b-1] # [1] P_edens[b]= P_edens[b-1] # [1] else: P_dens[b] = Sig/Sig0 # [1] P_edens[b]= Sigerr/Sig0 # [1] print(Rbin[b], Binmin[b], Binmax[b], P_dens[b], P_edens[b], file=f_Sig) # 3*[rscale], [dens0], [dens0] indr = (R<Binmax[b]) Menclosed = float(np.sum(indr))/totmass_tracers # for normalization to 1#[totmass_tracers] Merr = Menclosed/np.sqrt(tpbb) # or artificial Menclosed/10 #[totmass_tracers] print(Rbin[b], Binmin[b], Binmax[b], Menclosed, Merr, file=f_mass) # [Rscale0], 2* [totmass_tracers] f_Sig.close() f_mass.close() # deproject Sig to get nu numedi = gip.Sig_INT_rho(Rbin*Rscalei, Sig0pc*P_dens, gp) #numin = gip.Sig_INT_rho(Rbin*Rscalei, Sig0pc*(P_dens-P_edens), gp) numax = gip.Sig_INT_rho(Rbin*Rscalei, Sig0pc*(P_dens+P_edens), gp) nu0pc = numedi[0] gf.write_nu_scale(gp.files.get_scale_file(pop), nu0pc) nuerr = numax-numedi for b in range(gp.nipol): print(Rbin[b], Binmin[b], Binmax[b], numedi[b]/nu0pc, nuerr[b]/nu0pc, file = f_nu) f_nu.close() # calculate and output siglos # -------------------------------------------- p_dvlos = np.zeros(gp.nipol) p_edvlos = np.zeros(gp.nipol) for b in range(gp.nipol): sig = np.sum(siglos[b])/gpr.n #[km/s] tpbb = np.sum(tpb[b])/float(gpr.n) #[1] if tpbb == 0: sigerr = p_edvlos[b-1] #[km/s] # attention! uses last error else: # Poisson error with measurement errors #sigerr = sig/np.sqrt(tpbb) #sigerr = np.sqrt(sigerr**2+2**2) # 2km/s # standard deviation #sigerr = stddevbiweight(siglos[b]) # Poisson error, first guess sigerr = sig/np.sqrt(tpbb) #[km/s] p_dvlos[b] = sig #[km/s] p_edvlos[b]= sigerr #[km/s] maxsiglos = max(p_dvlos) #[km/s] print('maxsiglos = ', maxsiglos, '[km/s]') fpars = open(gp.files.get_scale_file(pop),'a') print(maxsiglos, file=fpars) #[km/s] fpars.close() for b in range(gp.nipol): print(Rbin[b], Binmin[b], Binmax[b], np.abs(p_dvlos[b]/maxsiglos),\ np.abs(p_edvlos[b]/maxsiglos), file=f_sig) # 3*[rscale], 2*[maxsiglos] f_sig.close() # calculate and output kurtosis kappa # -------------------------------------------- if gp.usekappa: p_kappa = np.zeros(gp.nipol) # needed for plotting later p_ekappa = np.zeros(gp.nipol) for b in range(gp.nipol): kappavel = np.sum(kappa[b])/gpr.n #[1] tpbb = np.sum(tpb[b])/float(gpr.n) #[1] if tpbb == 0: kappavelerr = p_edvlos[b-1] #[1] # attention! uses last error else: kappavelerr = np.abs(kappavel/np.sqrt(tpbb)) #[1] p_kappa[b] = kappavel p_ekappa[b] = kappavelerr print(Rbin[b], Binmin[b], Binmax[b], \ kappavel, kappavelerr, file=f_kap) # [rscale], 2*[1] f_kap.close() # output zetas # ------------------------------------------------------------- if gp.usezeta: print(np.median(zetaa), np.median(zetab), file=f_zeta) f_zeta.close() if gpr.showplots: gpr.show_plots_dens_2D(Rbin*Rscalei, P_dens, P_edens, Sig0pc) gpr.show_plots_sigma(Rbin*Rscalei, p_dvlos, p_edvlos) if gp.usekappa: gpr.show_plots_kappa(Rbin*Rscalei, p_kappa, p_ekappa) # overwrite Sig profile if photometric data is used if gp.investigate == 'obs' and gp.case==1 and pop==1 and not gp.selfconsistentnu: import os os.system('cp '+gp.files.get_scale_file(0)+' '+gp.files.get_scale_file(1)) # replace last line with actual maxsiglos from tracer particles os.system("sed -i '$s/^.*/"+str(maxsiglos)+"/' "+gp.files.get_scale_file(1)) os.system('cp '+gp.files.Sigfiles[0]+' '+gp.files.Sigfiles[1]) continue
def run(gp): import gr_params gpr = gr_params.grParams(gp) print('input: ', gpr.fil) x0, y0, z0, vb0, vz0, Mg0, PM0, comp0 = read_data(gpr.fil) # [pc], [km/s], [1] # only use stars which are members of the dwarf: exclude pop3 by # construction pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, #outliers x0, y0, z0, comp0, vb0, vz0, Mg0, PM0 = select_pm(x0, y0, z0, comp0, vb0, vz0, Mg0, PM0, pm) # assign population if gp.pops == 2: pm1 = (comp0 == 1) # will be overwritten below if gp.metalpop pm2 = (comp0 == 2) # same same elif gp.pops == 1: pm1 = (comp0 < 3) pm2 = (comp0 == -1) # assign none, but of same length as comp0 if gp.metalpop: # drawing of populations based on metallicity get parameters # from function in pymcmetal.py import pickle fi = open('metalsplit.dat', 'rb') DATA = pickle.load(fi) fi.close() p, mu1, sig1, mu2, sig2, M, pm1, pm2 = DATA x1, y1, z1, comp1, vb1, vz1, Mg1, PM1 = select_pm(x0, y0, z0, comp0, vb0, vz0, Mg0, PM0, pm1) x2, y2, z2, comp2, vb2, vz2, Mg2, PM2 = select_pm(x0, y0, z0, comp0, vb0, vz0, Mg0, PM0, pm2) # cut to subsets ind1 = gh.draw_random_subset(x1, gp.ntracer[1 - 1]) x1, y1, z1, comp1, vb1, vz1, Mg1, PM1 = select_pm(x1, y1, z1, comp1, vb1, vz1, Mg1, PM1, ind1) ind2 = gh.draw_random_subset(x2, gp.ntracer[2 - 1]) x2, y2, z2, comp2, vb2, vz2, Mg2, PM2 = select_pm(x2, y2, z2, comp2, vb2, vz2, Mg2, PM2, ind2) # use vz for no contamination, or vb for with contamination x0, y0, z0, vz0, pm1, pm2, pm = concat_pops(x1, x2, y1, y2, z1, z2, vz1, vz2, gp) com_x, com_y, com_z, com_vz = com_shrinkcircle_v(x0, y0, z0, vz0, pm) # [pc] print('COM [pc]: ', com_x, com_y, com_z) # [pc] print('VOM [km/s]', com_vz) # [km/s] # from now on, work with 2D data only; z0 was only used to get # center in (x,y) better x0 -= com_x # [pc] y0 -= com_y # [pc] vz0 -= com_vz # [km/s] R0 = np.sqrt(x0**2 + y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] from all tracer points pop = -1 for pmn in [pm, pm1, pm2]: pop = pop + 1 # population number pmr = (R0 < (gp.maxR * Rscale)) # read max extension for data #(rprior*Rscale) from #gi_params pmn = pmn * pmr # [1] print("fraction of members = ", 1.0 * sum(pmn) / len(pmn)) x, y, z, comp, vz, vb, Mg, PMN = select_pm(x0, y0, z0, comp0, vz0, vb0, Mg0, PM0, pmn) R = np.sqrt(x * x + y * y) # [pc] Rscalei = np.median(R) gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) gf.write_data_output(gp.files.get_com_file(pop), x / Rscalei, y / Rscalei, vz, Rscalei) if gpr.showplots: gpr.show_part_pos(x, y, pmn, Rscale)
def run(gp): import gr_params gpr = gr_params.grParams(gp) ## read input measurements print('input: ', gpr.fil) x0,y0,z0,vb0,vz0,Mg0,PM0,comp0=np.genfromtxt(gpr.fil,skiprows=0,unpack=True,\ usecols=(0, 1, 2, 5, 11, 13, 19, 20),\ dtype="d17",\ converters={0:expDtofloat, # x0 in pc \ 1:expDtofloat, # y0 in pc \ 2:expDtofloat, # z0 in pc \ 5:expDtofloat, # vz0 in km/s\ 12:expDtofloat, # vb0(LOS due binary), km/s\ 13:expDtofloat, # Mg0 in Angstrom\ 19:expDtofloat, # PM0 [1]\ 20:expDtofloat}) # comp0 1,2,3(background) # use component 12-1 instead of 6-1 for z velocity, to exclude observational errors # only use stars which are members of the dwarf: exclude pop3 by construction pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, outliers PM0 = PM0[pm] comp0 = comp0[pm] x0 = x0[pm] y0 = y0[pm] z0 = z0[pm] vz0 = vz0[pm]; vb0 = vb0[pm]; Mg0 = Mg0[pm] pm1 = (comp0 == 1) # will be overwritten below if gp.metalpop pm2 = (comp0 == 2) # same same pm3 = (comp0 == 3) if gp.metalpop: # drawing of populations based on metallicity # get parameters from function in pymcmetal.py import pickle fi = open('metalsplit.dat', 'rb') DATA = pickle.load(fi) fi.close() p, mu1, sig1, mu2, sig2, M, pm1, pm2 = DATA # cutting pm_i to a maximum of ntracers particles: ind = np.arange(len(x0)) np.random.shuffle(ind) ind = ind[:np.sum(gp.ntracer)] x0 = x0[ind]; y0 = y0[ind]; z0 = z0[ind]; comp0 = comp0[ind] vz0 = vz0[ind]; vb0=vb0[ind]; Mg0 = Mg0[ind] PM0 = PM0[ind]; pm1 = pm1[ind]; pm2 = pm2[ind]; pm3 = pm3[ind]; pm = pm1+pm2+pm3 # get COM with shrinking sphere method com_x, com_y, com_z = com_shrinkcircle(x0,y0,z0,PM0) print('COM [pc]: ', com_x, com_y, com_z) com_vz = np.sum(vz0*PM0)/np.sum(PM0) # [km/s] print('VOM [km/s]', com_vz) # from now on, continue to work with 3D data. store to different files x0 -= com_x; y0 -= com_y; z0 -= com_z # [pc] vz0 -= com_vz #[km/s] # but still get the same radii as from 2D method, to get comparison of integration routines right r0 = np.sqrt(x0*x0+y0*y0+z0*z0) # [pc] rhalf = np.median(r0) # [pc] rscale = rhalf # or gpr.r_DM # [pc] print('rscale = ', rscale, ' pc') print('max(R) = ', max(r0) ,' pc') print('last element of R : ',r0[-1],' pc') print('total number of stars: ',len(r0)) pop = -1 for pmn in [pm, pm1, pm2]: pmr = (r0<(gp.maxR*rscale)) # [1] based on [pc] pmn = pmn*pmr # [1] print("fraction of members = ", 1.0*sum(pmn)/len(pmn)) pop = pop + 1 x = x0[pmn]; y = y0[pmn]; z = z0[pmn]; vz = vz0[pmn]; vb = vb0[pmn]; # [pc], [km/s] Mg = Mg0[pmn]; comp = comp0[pmn]; PMN = PM0[pmn] # [ang], [1], [1] m = np.ones(len(pmn)) rscalei = np.median(np.sqrt(x*x+y*y+z*z)) # print("x y z" on first line, to interprete data later on) crscale = open(gp.files.get_scale_file(pop)+'_3D','w') print('# rscale in [pc], surfdens_central (=dens0) in [Munit/rscale0^2], and in [Munit/pc^2], and totmass_tracers [Munit], and max(sigma_LOS) in [km/s]', file=crscale) print(rscalei, file=crscale) # use 3 different half-light radii crscale.close() # store recentered positions and velocity print('output: ',gp.files.get_com_file(pop)+'_3D') c = open(gp.files.get_com_file(pop)+'_3D','w') print('# x [rscale],','y [rscale],', 'z [rscale]','vLOS [km/s],','rscale = ',rscalei,' pc', file=c) for k in range(len(x)): print(x[k]/rscalei, y[k]/rscalei, z[k]/rscalei, vz[k], file=c) # 3* [pc], [km/s] c.close() if gpr.showplots and False: from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111, projection='3d') #res = (abs(x)<3*rscalei)*(abs(y)<3*rscalei) #x = x[res]; y = y[res]; z = z[res] en = len(x) ax.scatter3D(x[:en], y[:en], z[:en], c=pmn[:en], s=35, \ vmin=0.95, vmax=1.0, lw=0.0, alpha=0.2) #circ_HL=Circle((0,0), radius=rscalei, fc='None', ec='b', lw=1) #gca().add_patch(circ_HL) #circ_DM=Circle((0,0), radius=gpr.r_DM, fc='None', ec='r', lw=1) #gca().add_patch(circ_DM) pdb.set_trace() gpr.show_part_pos(x, y, pmn, rscalei)
def run(gp): import gr_params gpr = gr_params.grParams(gp) gpr.fil = gpr.dir + "/data/tracers.dat" delim = [0, 22, 3, 3, 6, 4, 3, 5, 6, 6, 7, 5, 6, 5, 6, 5, 6] ID = np.genfromtxt(gpr.fil, skiprows=29, unpack=True, usecols=(0, 1), delimiter=delim) RAh, RAm, RAs, DEd, DEm, DEs, Vmag, VI, VHel, e_VHel, SigFe, e_SigFe, SigMg, e_SigMg, PM = np.genfromtxt( gpr.fil, skiprows=29, unpack=True, usecols=tuple(range(2, 17)), delimiter=delim, filling_values=-1) # only use stars which have Mg measurements pm = (SigMg > -1) * (PM >= 0.95) print("f_members = ", gh.pretty(1. * sum(pm) / len(pm))) ID = ID[1][pm] RAh = RAh[pm] RAm = RAm[pm] RAs = RAs[pm] DEd = DEd[pm] DEm = DEm[pm] DEs = DEs[pm] Vmag = Vmag[pm] VI = VI[pm] VHel = VHel[pm] e_VHel = e_VHel[pm] SigFe = SigFe[pm] e_SigFe = e_SigFe[pm] SigMg = SigMg[pm] e_SigMg = e_SigMg[pm] PM = PM[pm] Mg0 = SigMg sig = abs(RAh[0]) / RAh[0] RAh = RAh / sig xs = 15 * (RAh * 3600 + RAm * 60 + RAs) * sig # [arcsec/15] sig = abs(DEd[0]) / DEd[0] DEd = DEd / sig ys = (DEd * 3600 + DEm * 60 + DEs) * sig # [arcsec] arcsec = 2. * np.pi / (360. * 60. * 60) # [pc] kpc = 1000 # [pc] DL = { 1: lambda x: x * (138), #+/- 8 for Fornax 2: lambda x: x * (101), #+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec * DL) # [pc] ys *= (arcsec * DL) # [pc] PM0 = np.copy(PM) x0 = np.copy(xs) y0 = np.copy(ys) # [pc] vz0 = np.copy(VHel) # [km/s] # only use stars which are members of the dwarf: exclude pop3 by construction #pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, outliers #x0, y0, vz0, Mg0, PM0 = select_pm(x0, y0, vz0, Mg0, PM0, pm) # assign population if gp.pops == 2: # drawing of populations based on metallicity # get parameters from function in pymcmetal.py #[p, mu1, sig1, mu2, sig2] = np.loadtxt(gp.files.dir+'metalsplit.dat') #[pm1, pm2] = np.loadtxt(gp.files.dir+'metalsplit_assignment.dat') popass = np.loadtxt(gp.files.dir + 'popass') pm1 = (popass == 1) pm2 = (popass == 2) elif gp.pops == 1: pm1 = (PM >= 0) pm2 = (PM < 0) # assign none, but of same length as xs x1, y1, vz1, Mg1, PM1 = select_pm(x0, y0, vz0, Mg0, PM0, pm1) x2, y2, vz2, Mg2, PM2 = select_pm(x0, y0, vz0, Mg0, PM0, pm2) # cutting pm_i to a maximum of ntracers_i particles each: ind1 = np.arange(len(x1)) np.random.shuffle(ind1) # random.shuffle already changes ind ind1 = ind1[:gp.ntracer[1 - 1]] ind2 = np.arange(len(x2)) np.random.shuffle(ind2) # random.shuffle already changes ind ind2 = ind2[:gp.ntracer[2 - 1]] x1, y1, vz1, Mg1, PMS1 = select_pm(x1, y1, vz1, Mg1, PM1, ind1) x2, y2, vz2, Mg2, PMS2 = select_pm(x2, y2, vz2, Mg2, PM2, ind2) x0, y0, vz0, pm1, pm2, pm = concat_pops(x1, x2, y1, y2, vz1, vz2, gp) # optimum: get 3D center of mass with means # com_x, com_y, com_z = com_mean(x0,y0,z0,PM0) # 3*[pc], z component included if available com_x, com_y, com_vz = com_shrinkcircle_v_2D(x0, y0, vz0, pm) # [pc], [km/s] # from now on, work with 2D data only; z0 was only used to get center in (x,y) better # x0 -= com_x; y0 -= com_y # [pc] # vz0 -= com_vz #[km/s] R0 = np.sqrt(x0**2 + y0**2) # [pc] Rhalf = np.median(R0) # [pc] Rscale = Rhalf # [pc] overall pop = -1 for pmn in [pm, pm1, pm2]: pop = pop + 1 pmr = (R0 < (gp.maxR * Rscale)) # read max extension for data # (rprior*Rscale) from gi_params pmn = pmn * pmr # [1] print("fraction of members = ", 1.0 * sum(pmn) / len(pmn)) x, y, vz, Mg, PMN = select_pm(x0, y0, vz0, Mg0, PM0, pmn) R = np.sqrt(x * x + y * y) # [pc] Rscalei = np.median(R) # [pc] gf.write_Xscale(gp.files.get_scale_file(pop), Rscalei) # [pc] gf.write_data_output(gp.files.get_com_file(pop), x / Rscalei, y / Rscalei, vz, Rscalei) # [pc] if gpr.showplots: gpr.show_part_pos(x, y, pmn, Rscale)
def run(gp, pop): import gr_params gpr = gr_params.grParams(gp) xall,yall = np.loadtxt(gp.files.get_com_file(0), skiprows=1, \ usecols=(0,1), unpack=True) # 2*[Rscale0] R = np.sqrt(xall**2+yall**2) # [Rscale0] # set number and size of (linearly spaced) bins Rmin = 0. #[Rscale0] Rmax = max(R) if gp.maxR < 0 else 1.0*gp.maxR # [Rscale0] R = R[(R<Rmax)] # [Rscale0] Binmin, Binmax, Rbin = gh.determine_radius(R, Rmin, Rmax, gp) # [Rscale0] gp.xipol = Rbin minr = min(Rbin) # [pc] maxr = max(Rbin) # [pc] gp.xepol = np.hstack([minr/8., minr/4., minr/2., Rbin, 2*maxr, 4*maxr, 8*maxr]) # [pc] Vol = gh.volume_circular_ring(Binmin, Binmax, gp) # [Rscale0^2] Rscale0 = gf.read_Xscale(gp.files.get_scale_file(0)) # [pc] print('####### working on component ',pop) print('input: ', gp.files.get_com_file(pop)) # start from data centered on COM already: if gf.bufcount(gp.files.get_com_file(pop))<2: return # only read in data if needed: pops = 1: reuse data from pop=0 part x,y = np.loadtxt(gp.files.get_com_file(pop), skiprows=1, usecols=(0,1), unpack = True) # [Rscalei], [Rscalei] # calculate 2D radius on the skyplane R = np.sqrt(x**2+y**2) #[Rscalei] Rscalei = gf.read_Xscale(gp.files.get_scale_file(pop)) # [pc] # set maximum radius (if gp.maxR is set) Rmax = max(R) if gp.maxR<0 else 1.0*gp.maxR # [Rscale0] print('Rmax [Rscale0] = ', Rmax) sel = (R * Rscalei <= Rmax * Rscale0) x = x[sel] # [Rscalei] y = y[sel] # [Rscalei] R = R[sel] # [Rscalei] totmass_tracers = float(len(x)) # [Munit], Munit = 1/star Rs = R # + possible starting offset, [Rscalei] tr = open(gp.files.get_ntracer_file(pop),'w') print(totmass_tracers, file=tr) tr.close() f_Sig, f_nu, f_mass, f_sig, f_kap, f_zeta = gf.write_headers_2D(gp, pop) Sig_phot = np.zeros((gp.nipol, gpr.n)) # particle selections, shared by density, siglos, kappa and zeta calculations tpb = np.zeros((gp.nipol,gpr.n)) for k in range(gpr.n): Rsi = gh.add_errors(Rs, gpr.Rerr) # [Rscalei] for i in range(gp.nipol): ind1 = np.argwhere(np.logical_and(Rsi * Rscalei >= Binmin[i] * Rscale0, \ Rsi * Rscalei < Binmax[i] * Rscale0)).flatten() # [1] tpb[i][k] = float(len(ind1)) #[1] Sig_phot[i][k] = float(len(ind1))*totmass_tracers/Vol[i] # [Munit/rscale^2] # do the following for all populations Sig0 = np.sum(Sig_phot[0])/float(gpr.n) # [Munit/Rscale^2] Sig0pc = Sig0/Rscale0**2 # [munis/pc^2] gf.write_Sig_scale(gp.files.get_scale_file(pop), Sig0pc, totmass_tracers) # calculate density and mass profile, store it # ---------------------------------------------------------------------- P_dens = np.zeros(gp.nipol) P_edens = np.zeros(gp.nipol) for b in range(gp.nipol): Sig = np.sum(Sig_phot[b])/(1.*gpr.n) # [Munit/Rscale^2] tpbb = np.sum(tpb[b])/float(gpr.n) # [1], mean number of tracers in bin Sigerr = Sig/np.sqrt(tpbb) # [Munit/Rscale^2], Poissonian error # compare data and analytic profile <=> get stellar # density or mass ratio from Matt Walker if(np.isnan(Sigerr)): P_dens[b] = P_dens[b-1] # [1] P_edens[b]= P_edens[b-1] # [1] else: P_dens[b] = Sig/Sig0 # [1] P_edens[b]= Sigerr/Sig0 # [1] print(Rbin[b], Binmin[b], Binmax[b], P_dens[b], P_edens[b], file=f_Sig) # 3*[rscale], [dens0], [dens0] indr = (R<Binmax[b]) Menclosed = float(np.sum(indr))/totmass_tracers # for normalization to 1#[totmass_tracers] Merr = Menclosed/np.sqrt(tpbb) # or artificial Menclosed/10 #[totmass_tracers] print(Rbin[b], Binmin[b], Binmax[b], Menclosed, Merr, file=f_mass) # [Rscale0], 2* [totmass_tracers] f_Sig.close() f_mass.close() # deproject Sig to get nu numedi = gip.Sig_INT_rho(Rbin*Rscalei, Sig0pc*P_dens, gp) #numin = gip.Sig_INT_rho(Rbin*Rscalei, Sig0pc*(P_dens-P_edens), gp) numax = gip.Sig_INT_rho(Rbin*Rscalei, Sig0pc*(P_dens+P_edens), gp) nu0pc = numedi[0] gf.write_nu_scale(gp.files.get_scale_file(pop), nu0pc) nuerr = numax-numedi for b in range(gp.nipol): print(Rbin[b], Binmin[b], Binmax[b],\ numedi[b]/nu0pc, nuerr[b]/nu0pc, \ file = f_nu) f_nu.close() # write dummy sig scale, not to be used later on maxsiglos = -1. #[km/s] fpars = open(gp.files.get_scale_file(pop),'a') print(maxsiglos, file=fpars) #[km/s] fpars.close()
def read(Rdiff, gp): if Rdiff != 'median' and Rdiff != 'min1s' and Rdiff != 'max1s': print('run grd_metalsplit.py to get the split by metallicity done before reading it in for GravImage') exit(1) import gr_params gpr = gr_params.grParams(gp) global Nsample, split, e_split, PM, split_min, split_max gpr.fil = gpr.dir+"data/tracers.dat" # number of measured tracer stars Nsample = bufcount(gpr.fil) delim = [0,22,3,3,6,4,3,5,6,6,7,5,6,5,6,5,6] #ID = np.genfromtxt(gpr.fil,skiprows=29,unpack=True,usecols=(0,1),delimiter=delim) if gp.case==5: RAh,RAm,RAs,DEd,DEm,DEs,VHel,e_VHel,Teff,e_Teff,logg,e_logg,Fe,e_Fe,N=np.loadtxt(gpr.fil, skiprows=25, unpack=True) PM = np.ones(len(RAh)) split = logg e_split = e_logg sel = (N>0) else: RAh,RAm,RAs,DEd,DEm,DEs,Vmag,VI,VHel,e_VHel,SigFe,e_SigFe, Mg,Mg_err,PM = np.genfromtxt(gpr.fil, skiprows=29, unpack=True, usecols=tuple(range(2,17)), delimiter=delim, filling_values=-1) split = Mg e_split = Mg_err sel = (Mg>-1) # exclude missing data on Mg RAh = RAh[sel] RAm = RAm[sel] RAs = RAs[sel] DEd = DEd[sel] DEm = DEm[sel] DEs = DEs[sel] #Vmag = Vmag[sel] #VI = VI[sel] VHel = VHel[sel] e_VHel = e_VHel[sel] if gp.case < 5: Mg = Mg[sel] Mg_err = Mg_err[sel] elif gp.case == 5: Teff = Teff[sel] e_Teff = e_Teff[sel] logg = logg[sel] e_logg = e_logg[sel] Fe = Fe[sel] e_Fe = e_Fe[sel] N = N[sel] split = split[sel] e_split = e_split[sel] PM = PM[sel] split_min = min(split) # -3, 3 if according to WalkerPenarrubia2011 split_max = max(split) # but: it's not as easy as that # we have datapoints with errors and probability of membership weighting # thus, we need to smear the values out using a Gaussian of width = split_err # and add them up afterwards after scaling with probability PM x = np.array(np.linspace(split_min, split_max, 100)) splitdf = np.zeros(100) for i in range(len(split)): splitdf += PM[i]*gh.gauss(x, split[i], e_split[i]) splitdf /= sum(PM) sig = abs(RAh[0])/RAh[0] RAh = RAh/sig xs = 15*(RAh*3600+RAm*60+RAs)*sig # [arcsec/15] sig = abs(DEd[0])/DEd[0] DEd = DEd/sig ys = (DEd*3600+DEm*60+DEs)*sig # [arcsec] arcsec = 2.*np.pi/(360.*60.*60) # [pc] kpc = 1000 # [pc] DL = {1: lambda x: x * (138),#+/- 8 for Fornax 2: lambda x: x * (101),#+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec*DL) # [pc] ys *= (arcsec*DL) # [pc] # alternative: get center of photometric measurements by deBoer # for Fornax, we have if gp.case == 1: com_x = 96203.736358393697 com_y = -83114.080684733024 xs = xs-com_x ys = ys-com_y else: # determine com_x, com_y from shrinking sphere import gi_centering as grc com_x, com_y = grc.com_shrinkcircle_2D(xs, ys) popass = np.loadtxt(gpr.dir+'data/popass_'+Rdiff) sel1 = (popass==1) sel2 = (popass==2) # radii of all stellar tracers from pop 1 and 2 R1 = np.sqrt((xs[sel1])**2 + (ys[sel1])**2) R2 = np.sqrt((xs[sel2])**2 + (ys[sel2])**2) R1.sort() R2.sort() R0 = np.hstack([R1, R2]) R0.sort() for pop in np.arange(2)+1: if pop == 1: Rhalf = R1[len(R1)/2] co = 'blue' else: Rhalf = R2[len(R2)/2] co = 'red' Rmin = min(R0) # [pc] Rmax = max(R0) # [pc] Binmin, Binmax, Rbin = gh.determine_radius(R0, Rmin, Rmax, gp) # [pc] gp.xipol = Rbin # [pc] minr = min(Rbin)# [pc] maxr = max(Rbin)# [pc] Vol = gh.volume_circular_ring(Binmin, Binmax, gp) # [pc^2] totmass_tracers = float(len(x)) Rsi = gh.add_errors(R0, gpr.Rerr) # [pc], gpr.Rerr was in tpb = np.zeros(gp.nipol) Sig_phot = np.zeros(gp.nipol) for i in range(gp.nipol): ind1 = np.argwhere(np.logical_and(Rsi >= Binmin[i], Rsi < Binmax[i])).flatten() # [1] tpb[i] = float(len(ind1)) # [1] Sig_phot[i] = float(len(ind1))*totmass_tracers/Vol[i] # [Munit/pc^2] #loglog(gp.xipol, Sig_phot, co) #axvline(Rhalf, color=co) #xlim([min(gp.xipol), max(gp.xipol)]) #xlabel(r'$R$') #ylabel(r'$\Sigma(R)$') #pdb.set_trace() # deproject to get 3D nu profiles gp.xipol = Rbin minr = min(Rbin) # [pc] maxr = max(Rbin) # [pc] gp.xepol =np.hstack([minr/8.,minr/4.,minr/2.,Rbin,2*maxr,4*maxr,8*maxr])#[pc] gp.xfine = introduce_points_in_between(gp.xepol, gp) #pdb.set_trace() #Sigdatnu, Sigerrnu = gh.complete_nu(Rbin, Sig_phot, Sig_phot/10., gp.xfine) #dummyx,nudatnu,nuerrnu,Mrnu = gip.Sig_NORM_rho(gp.xfine,Sigdatnu,Sigerrnu,gp) #nudat = gh.linipollog(gp.xfine, nudatnu, gp.xipol) #nuerr = gh.linipollog(gp.xfine, nuerrnu, gp.xipol) #loglog(gp.xipol, nudat, co) #axvline(Rhalf, color=co) #xlim([min(gp.xipol), max(gp.xipol)]) #xlabel(r'$R$') #ylabel(r'$\nu(R)$') #plum = 100*gh.plummer(gp.xipol, Rhalf, len(R0)) #loglog(gp.xipol, plum, color=co, linestyle='--') #ylim([min(plum), max(plum)]) #pdb.set_trace() return
def run(gp): import gr_params gpr = gr_params.grParams(gp) print("scalefile: ", gp.files.get_scale_file(0)) Rscale0 = gf.read_Xscale(gp.files.get_scale_file(0)) # [pc] print("input: ", gp.files.get_com_file(0)) # start from data centered on COM already: x, y, v = np.loadtxt( gp.files.get_com_file(0), skiprows=1, usecols=(0, 1, 2), unpack=True ) # [Rscalei], [Rscalei], [km/s] for pop in range(2): # calculate 2D radius on the skyplane R = np.sqrt(x ** 2 + y ** 2) # [Rscalei] Rscalei = gf.read_Xscale(gp.files.get_scale_file(pop)) # [pc] # set number and size of bins Rmin = 0.0 # [rscale] Rmax = max(R) if gp.maxR < 0 else float(gp.maxR) # [Rscale0] sel = R * Rscalei < Rmax * Rscale0 x = x[sel] y = y[sel] v = v[sel] # [rscale] totmass_tracers = 1.0 * len(x) # [munit], munit = 1/star Binmin, Binmax, Rbin = gh.determine_radius(R, Rmin, Rmax, gp) # [Rscale0] gp.xipol = Rbin minr = min(Rbin) # [pc] maxr = max(Rbin) # [pc] gp.xepol = np.hstack([minr / 8.0, minr / 4.0, minr / 2.0, Rbin, 2 * maxr, 4 * maxr, 8 * maxr]) # [pc] Vol = gh.volume_circular_ring(Binmin, Binmax, gp) # [Rscale0^2] # rs = gpr.Rerr*np.random.randn(len(r))+r Rs = R # [Rscale] # if no initial offset is whished tr = open(gp.files.get_ntracer_file(pop), "w") print(totmass_tracers, file=tr) tr.close() f_Sig, f_nu, f_mass, f_sig, f_kap, f_zeta = gf.write_headers_2D(gp, pop) # 30 iterations for getting random picked radius values Density = np.zeros((gp.nipol, gpr.n)) tpb = np.zeros((gp.nipol, gpr.n)) for k in range(gpr.n): Rsi = gh.add_errors(Rs, gpr.Rerr) # [Rscalei] for j in range(gp.nipol): ind1 = np.argwhere( np.logical_and(Rsi * Rscalei >= Binmin[j] * Rscale0, Rsi * Rscalei < Binmax[j] * Rscale0) ).flatten() # [1] Density[j][k] = float(len(ind1)) / Vol[j] * totmass_tracers # [munit/Rscale0^2] tpb[j][k] = float(len(ind1)) # [1] Dens0 = np.sum(Density[0]) / float(gpr.n) # [Munit/Rscale0^2] Dens0pc = Dens0 / Rscale0 ** 2 # [Munit/pc^2] gf.write_Sig_scale(gp.files.get_scale_file(pop), Dens0pc, totmass_tracers) tpbb0 = np.sum(tpb[0]) / float(gpr.n) # [1] Denserr0 = Dens0 / np.sqrt(tpbb0) # [Munit/rscale^2] p_dens = np.zeros(gp.nipol) p_edens = np.zeros(gp.nipol) for b in range(gp.nipol): Dens = np.sum(Density[b]) / float(gpr.n) # [Munit/rscale^2] tpbb = np.sum(tpb[b]) / float(gpr.n) # [1] Denserr = Dens / np.sqrt(tpbb) # [Munit/rscale^2] if np.isnan(Denserr): p_dens[b] = p_dens[b - 1] # [1] p_edens[b] = p_edens[b - 1] # [1] else: p_dens[b] = Dens / Dens0 # [1] p_edens[b] = Denserr / Dens0 # [1] #100/rbin would be artificial guess for b in range(gp.nipol): print(Rbin[b], Binmin[b], Binmax[b], p_dens[b], p_edens[b], file=f_Sig) # [rscale], [dens0], [dens0] indr = R < Binmax[b] menclosed = float(np.sum(indr)) / totmass_tracers # /totmass_tracers for normalization to 1 at last bin #[totmass_tracers] merr = menclosed / np.sqrt(tpbb) # artificial menclosed/10 gives good approximation #[totmass_tracers] print(Rbin[b], Binmin[b], Binmax[b], menclosed, merr, file=f_mass) # [rscale], [totmass_tracers], [totmass_tracers] f_Sig.close() f_mass.close() # deproject Sig to get nu numedi = gip.Sig_INT_rho(Rbin * Rscalei, Dens0pc * p_dens, gp) numin = gip.Sig_INT_rho(Rbin * Rscalei, Dens0pc * (p_dens - p_edens), gp) numax = gip.Sig_INT_rho(Rbin * Rscalei, Dens0pc * (p_dens + p_edens), gp) nu0pc = numedi[0] gf.write_nu_scale(gp.files.get_scale_file(pop), nu0pc) nuerr = numax - numedi for b in range(gp.nipol): print(Rbin[b], Binmin[b], Binmax[b], numedi[b] / nu0pc, nuerr[b] / nu0pc, file=f_nu) f_nu.close() if gpr.showplots: gpr.show_plots_dens_2D(Rbin * Rscalei, p_dens, p_edens, Dens0pc)
def run(gp): global K, C, D, F, zth, zp_kz, zmin, zmax, z0, z02 # Set up simple population here using analytic formulae: zmin = 100.0 # [pc], first bin center zmax = 1300.0 # [pc], last bin center # get Stuetzpunkte for theoretical profiles (not yet stars, finer spacing in real space) nth = gp.nipol # [1] number of bins zth = 1.0 * np.arange(nth) * (zmax - zmin) / (nth - 1.0) + zmin # [pc] bin centers z0 = 240.0 # [pc], scaleheight of first population z02 = 200.0 # [pc], scaleheight of second population D = 250.0 # [pc], scaleheight of all stellar tracers K = 1.65 F = 1.65e-4 C = 17.0 ** 2.0 # [km/s] integration constant in sig # Draw mock data from exponential disk: nu_zth = np.exp(-zth / z0) # [nu0] = [Msun/A/pc] 3D tracer density Kz_zth = -(K * zth / np.sqrt(zth ** 2.0 + D ** 2.0) + 2.0 * F * zth) if gp.adddarkdisc: DD = 600 # [pc] scaleheight of dark disc KD = 0.15 * 1.650 Kz_zth = Kz_zth - KD * zth / np.sqrt(zth ** 2.0 + DD ** 2.0) # calculate sig_z^2 inti = np.zeros(nth) for i in range(1, nth): inti[i] = simps(Kz_zth[:i] * nu_zth[:i], zth[:i]) sigzth = np.sqrt((inti + C) / nu_zth) # project back to positions of stars ran = npr.uniform(size=int(gp.ntracer[1 - 1])) # [1] zstar = -z0 * np.log(1.0 - ran) # [pc] stellar positions, exponential falloff sigzstar = gh.ipol(zth, sigzth, zstar) # > 0 ((IDL, Justin)) stellar velocity dispersion # assign [0,1] * maxsig ran2 = npr.normal(size=int(gp.ntracer[1 - 1])) # [1] vzstar = ran2 * sigzstar # [km/s] # Add second population [thick-disc like]: if gp.pops == 2: nu_zth2 = gp.ntracer[2 - 1] / gp.ntracer[1 - 1] * np.exp(-zth / z02) # [nu0,2] = [Msun/A/pc], 3D tracer density, exponentially falling # no normalization to 1 done here inti = np.zeros(nth) for i in range(1, nth): inti[i] = simps(Kz_zth[:i] * nu_zth2[:i], zth[:i]) sigzth2 = np.sqrt((inti + C) / nu_zth2) # same integration constant ran = npr.uniform(-1.0, 1.0, gp.ntracer[2 - 1]) # [1] zstar2 = -z02 * np.log(1.0 - ran) # [pc] # zstarobs = np.hstack([zstar, zstar2]) # concat pop1, pop2 for all stars sigzstar2 = gh.ipol(zth, sigzth2, zstar2) ran2 = npr.normal(-1.0, 1, gp.ntracer[2 - 1]) # [1] vzstar2 = ran2 * sigzstar2 # [(km/2)^2] # enforce observational cut on zmax: sel = zstar < zmax print("fraction of z<zmax selected elements: ", 1.0 * sum(sel) / (1.0 * len(sel))) z_dat1 = zstar[sel] vz_dat1 = vzstar[sel] # throw away velocities of value zero (unstable?): sel = abs(vz_dat1) > 0 print("fraction of vz_dat>0 selected elements: ", 1.0 * sum(sel) / (1.0 * len(sel))) z_dat1 = z_dat1[sel] vz_dat1 = vz_dat1[sel] # Calulate binned data (for plots/binned anal.). old way, linear spacings, no const #particles/bin binmin1, binmax1, z_dat_bin1, sig_dat_bin1, count_bin1 = gh.binsmooth(z_dat1, vz_dat1, zmin, zmax, gp.nipol, 0.0) sig_dat_err_bin1 = np.sqrt(sig_dat_bin1) # Poisson errors nu_dat_bin1, nu_dat_err_bin1 = gh.bincount(z_dat1, binmax1) nu_dat_bin1 /= binmax1 - binmin1 nu_dat_err_bin1 /= binmax1 - binmin1 import gr_params gpr = gr_params.grParams(gp) if gpr.showplots: nuscaleb = nu_zth[np.argmin(np.abs(zth - z0))] plt.loglog(zth, nu_zth / nuscaleb, "b.-") nuscaler = nu_dat_bin1[np.argmin(np.abs(zth - z0))] plt.loglog(zth, nu_dat_bin1 / nuscaler, "r.-") # pdb.set_trace() Sig_dat_bin1 = np.cumsum(nu_dat_bin1) Sig_dat_err_bin1 = np.sqrt(Sig_dat_bin1) Mrdat1 = np.cumsum(Sig_dat_bin1) Mrerr1 = Mrdat1 * Sig_dat_err_bin1 / Sig_dat_bin1 scales = [[], [], []] scales[1].append(z0) # [pc] scales[1].append(Sig_dat_bin1[0]) scales[1].append(Mrdat1[-1]) scales[1].append(nu_dat_bin1[0]) scales[1].append(max(sig_dat_bin1)) # start analysis of "all stars" with only component 1, # append to it later if more populations required z_dat0 = z_dat1 # [pc] vz_dat0 = vz_dat1 # [km/s] if gp.pops == 2: # enforce observational constraints on z<z_max sel = zstar2 < zmax z_dat2 = zstar2[sel] vz_dat2 = vzstar2[sel] # cut zero velocities: sel = abs(vz_dat2) > 0 z_dat2 = z_dat2[sel] vz_dat2 = vz_dat2[sel] # Calulate binned data (for plots/binned analysis): binmin2, binmax2, z_dat_bin2, sig_dat_bin2, count_bin2 = gh.binsmooth( z_dat2, vz_dat2, zmin, zmax, gp.nipol, 0.0 ) sig_dat_err_bin2 = np.sqrt(sig_dat_bin2) # Poissonian errors nu_dat_bin2, nu_dat_err_bin2 = gh.bincount(z_dat2, binmax2) nu_dat_bin2 /= binmax2 - binmin2 nu_dat_err_bin2 /= binmax2 - binmin2 Sig_dat_bin2 = np.cumsum(nu_dat_bin2) Sig_dat_err_bin2 = np.sqrt(Sig_dat_bin2) Mrdat2 = np.cumsum(nu_dat_bin2) Mrerr2 = np.sqrt(Mrdat2) scales[2].append(z02) # [pc] scales[2].append(Sig_dat_bin2[0]) scales[2].append(Mrdat2[-1]) scales[2].append(nu_dat_bin2[0]) # normalize by max density of first bin, rather scales[2].append(max(sig_dat_bin2)) # calculate properties for all pop together with stacked values z_dat0 = np.hstack([z_dat1, z_dat2]) vz_dat0 = np.hstack([vz_dat1, vz_dat2]) # Calulate binned data (for plots/binned anal.). old way, linear spacings, no const #particles/bin binmin0, binmax0, z_dat_bin0, sig_dat_bin0, count_bin0 = gh.binsmooth(z_dat0, vz_dat0, zmin, zmax, gp.nipol, 0.0) sig_dat_err_bin0 = np.sqrt(sig_dat_bin0) # binmin, binmax, z_dat_bin = gh.bin_r_const_tracers(z_dat, gp.nipol) nu_dat_bin0, nu_dat_err_bin0 = gh.bincount(z_dat0, binmax0) nu_dat_bin0 /= binmax0 - binmin0 nu_dat_err_bin0 /= binmax0 - binmin0 Sig_dat_bin0 = np.cumsum(nu_dat_bin0) Sig_dat_err_bin0 = np.sqrt(Sig_dat_bin0) # renorm0 = max(nu_dat_bin0) xip = np.copy(z_dat_bin0) # [pc] Mrdat0 = K * xip / np.sqrt(xip ** 2.0 + D ** 2.0) / (2.0 * np.pi * gu.G1__pcMsun_1km2s_2) Mrerr0 = Mrdat0 * nu_dat_err_bin0 / nu_dat_bin0 scales[0].append(D) # [pc] scales[0].append(Sig_dat_bin0[0]) scales[0].append(Mrdat0[-1]) scales[0].append(nu_dat_bin0[0]) scales[0].append(max(sig_dat_bin0)) rmin = binmin0 / scales[0][0] # [pc] rbin = xip / scales[0][0] # [pc] rmax = binmax0 / scales[0][0] # [pc] # store parameters for output # normalized by scale values nudat = [] nudat.append(nu_dat_bin0 / scales[0][3]) # [Msun/pc^3] nudat.append(nu_dat_bin1 / scales[1][3]) if gp.pops == 2: nudat.append(nu_dat_bin2 / scales[2][3]) nuerr = [] nuerr.append(nu_dat_err_bin0 / scales[0][3]) # [Msun/pc^3] nuerr.append(nu_dat_err_bin1 / scales[1][3]) if gp.pops == 2: nuerr.append(nu_dat_err_bin2 / scales[2][3]) Mrdat = [] Mrdat.append(Mrdat0 / scales[0][2]) # [Msun] Mrdat.append(Mrdat1 / scales[1][2]) if gp.pops == 2: Mrdat.append(Mrdat2 / scales[2][2]) Mrerr = [] Mrerr.append(Mrerr0 / scales[0][2]) # [Msun] Mrerr.append(Mrerr1 / scales[1][2]) if gp.pops == 2: Mrerr.append(Mrerr2 / scales[2][2]) Sigdat = [] Sigdat.append(Sig_dat_bin0 / scales[0][1]) Sigdat.append(Sig_dat_bin1 / scales[1][1]) if gp.pops == 2: Sigdat.append(Sig_dat_bin2 / scales[2][1]) Sigerr = [] Sigerr.append(Sig_dat_err_bin0 / scales[0][1]) Sigerr.append(Sig_dat_err_bin1 / scales[1][1]) if gp.pops == 2: Sigerr.append(Sig_dat_err_bin2 / scales[2][1]) sigdat = [] sigdat.append(sig_dat_bin0 / scales[0][4]) # [km/s] sigdat.append(sig_dat_bin1 / scales[1][4]) if gp.pops == 2: sigdat.append(sig_dat_bin2 / scales[2][4]) sigerr = [] sigerr.append(sig_dat_err_bin0 / scales[0][4]) # [km/s] sigerr.append(sig_dat_err_bin1 / scales[1][4]) if gp.pops == 2: sigerr.append(sig_dat_err_bin2 / scales[2][4]) write_disc_output_files(rbin, rmin, rmax, nudat, nuerr, Sigdat, Sigerr, Mrdat, Mrerr, sigdat, sigerr, scales, gp) return gp.dat
def read(Rdiff, gp): if Rdiff != 'median' and Rdiff != 'min1s' and Rdiff != 'max1s': print( 'run grd_metalsplit.py to get the split by metallicity done before reading it in for GravImage' ) exit(1) import gr_params gpr = gr_params.grParams(gp) global Nsample, split, e_split, PM, split_min, split_max gpr.fil = gpr.dir + "data/tracers.dat" # number of measured tracer stars Nsample = bufcount(gpr.fil) delim = [0, 22, 3, 3, 6, 4, 3, 5, 6, 6, 7, 5, 6, 5, 6, 5, 6] #ID = np.genfromtxt(gpr.fil,skiprows=29,unpack=True,usecols=(0,1),delimiter=delim) if gp.case == 5: RAh, RAm, RAs, DEd, DEm, DEs, VHel, e_VHel, Teff, e_Teff, logg, e_logg, Fe, e_Fe, N = np.loadtxt( gpr.fil, skiprows=25, unpack=True) PM = np.ones(len(RAh)) split = logg e_split = e_logg sel = (N > 0) else: RAh, RAm, RAs, DEd, DEm, DEs, Vmag, VI, VHel, e_VHel, SigFe, e_SigFe, Mg, Mg_err, PM = np.genfromtxt( gpr.fil, skiprows=29, unpack=True, usecols=tuple(range(2, 17)), delimiter=delim, filling_values=-1) split = Mg e_split = Mg_err sel = (Mg > -1) # exclude missing data on Mg RAh = RAh[sel] RAm = RAm[sel] RAs = RAs[sel] DEd = DEd[sel] DEm = DEm[sel] DEs = DEs[sel] #Vmag = Vmag[sel] #VI = VI[sel] VHel = VHel[sel] e_VHel = e_VHel[sel] if gp.case < 5: Mg = Mg[sel] Mg_err = Mg_err[sel] elif gp.case == 5: Teff = Teff[sel] e_Teff = e_Teff[sel] logg = logg[sel] e_logg = e_logg[sel] Fe = Fe[sel] e_Fe = e_Fe[sel] N = N[sel] split = split[sel] e_split = e_split[sel] PM = PM[sel] split_min = min(split) # -3, 3 if according to WalkerPenarrubia2011 split_max = max(split) # but: it's not as easy as that # we have datapoints with errors and probability of membership weighting # thus, we need to smear the values out using a Gaussian of width = split_err # and add them up afterwards after scaling with probability PM x = np.array(np.linspace(split_min, split_max, 100)) splitdf = np.zeros(100) for i in range(len(split)): splitdf += PM[i] * gh.gauss(x, split[i], e_split[i]) splitdf /= sum(PM) sig = abs(RAh[0]) / RAh[0] RAh = RAh / sig xs = 15 * (RAh * 3600 + RAm * 60 + RAs) * sig # [arcsec/15] sig = abs(DEd[0]) / DEd[0] DEd = DEd / sig ys = (DEd * 3600 + DEm * 60 + DEs) * sig # [arcsec] arcsec = 2. * np.pi / (360. * 60. * 60) # [pc] kpc = 1000 # [pc] DL = { 1: lambda x: x * (138), #+/- 8 for Fornax 2: lambda x: x * (101), #+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec * DL) # [pc] ys *= (arcsec * DL) # [pc] # alternative: get center of photometric measurements by deBoer # for Fornax, we have if gp.case == 1: com_x = 96203.736358393697 com_y = -83114.080684733024 xs = xs - com_x ys = ys - com_y else: # determine com_x, com_y from shrinking sphere import gi_centering as grc com_x, com_y = grc.com_shrinkcircle_2D(xs, ys) popass = np.loadtxt(gpr.dir + 'data/popass_' + Rdiff) sel1 = (popass == 1) sel2 = (popass == 2) # radii of all stellar tracers from pop 1 and 2 R1 = np.sqrt((xs[sel1])**2 + (ys[sel1])**2) R2 = np.sqrt((xs[sel2])**2 + (ys[sel2])**2) R1.sort() R2.sort() R0 = np.hstack([R1, R2]) R0.sort() for pop in np.arange(2) + 1: if pop == 1: Rhalf = R1[len(R1) / 2] co = 'blue' else: Rhalf = R2[len(R2) / 2] co = 'red' Rmin = min(R0) # [pc] Rmax = max(R0) # [pc] Binmin, Binmax, Rbin = gh.determine_radius(R0, Rmin, Rmax, gp) # [pc] gp.xipol = Rbin # [pc] minr = min(Rbin) # [pc] maxr = max(Rbin) # [pc] Vol = gh.volume_circular_ring(Binmin, Binmax, gp) # [pc^2] totmass_tracers = float(len(x)) Rsi = gh.add_errors(R0, gpr.Rerr) # [pc], gpr.Rerr was in tpb = np.zeros(gp.nipol) Sig_phot = np.zeros(gp.nipol) for i in range(gp.nipol): ind1 = np.argwhere(np.logical_and(Rsi >= Binmin[i], Rsi < Binmax[i])).flatten() # [1] tpb[i] = float(len(ind1)) # [1] Sig_phot[i] = float( len(ind1)) * totmass_tracers / Vol[i] # [Munit/pc^2] #loglog(gp.xipol, Sig_phot, co) #axvline(Rhalf, color=co) #xlim([min(gp.xipol), max(gp.xipol)]) #xlabel(r'$R$') #ylabel(r'$\Sigma(R)$') #pdb.set_trace() # deproject to get 3D nu profiles gp.xipol = Rbin minr = min(Rbin) # [pc] maxr = max(Rbin) # [pc] gp.xepol = np.hstack( [minr / 8., minr / 4., minr / 2., Rbin, 2 * maxr, 4 * maxr, 8 * maxr]) #[pc] gp.xfine = introduce_points_in_between(gp.xepol, gp) #pdb.set_trace() #Sigdatnu, Sigerrnu = gh.complete_nu(Rbin, Sig_phot, Sig_phot/10., gp.xfine) #dummyx,nudatnu,nuerrnu,Mrnu = gip.Sig_NORM_rho(gp.xfine,Sigdatnu,Sigerrnu,gp) #nudat = gh.linipollog(gp.xfine, nudatnu, gp.xipol) #nuerr = gh.linipollog(gp.xfine, nuerrnu, gp.xipol) #loglog(gp.xipol, nudat, co) #axvline(Rhalf, color=co) #xlim([min(gp.xipol), max(gp.xipol)]) #xlabel(r'$R$') #ylabel(r'$\nu(R)$') #plum = 100*gh.plummer(gp.xipol, Rhalf, len(R0)) #loglog(gp.xipol, plum, color=co, linestyle='--') #ylim([min(plum), max(plum)]) #pdb.set_trace() return
def run(gp): import gr_params gpr = gr_params.grParams(gp) global Nsample, split, e_split, PM, split_min, split_max gpr.fil = gpr.dir+"data/tracers.dat" # number of measured tracer stars Nsample = bufcount(gpr.fil) delim = [0,22,3,3,6,4,3,5,6,6,7,5,6,5,6,5,6] #ID = np.genfromtxt(gpr.fil,skiprows=29,unpack=True,usecols=(0,1),delimiter=delim) if gp.case==5: RAh,RAm,RAs,DEd,DEm,DEs,VHel,e_VHel,Teff,e_Teff,logg,e_logg,Fe,e_Fe,N=np.loadtxt(gpr.fil, skiprows=25, unpack=True) PM = np.ones(len(RAh)) split = logg e_split = e_logg else: RAh,RAm,RAs,DEd,DEm,DEs,Vmag,VI,VHel,e_VHel,SigFe,e_SigFe, Mg,Mg_err,PM = np.genfromtxt(gpr.fil, skiprows=29, unpack=True, usecols=tuple(range(2,17)), delimiter=delim, filling_values=-1) split = Mg e_split = Mg_err if gp.case == 5: sel = (N>0) else: sel = (Mg>-1) # exclude missing data on Mg RAh = RAh[sel] RAm = RAm[sel] RAs = RAs[sel] DEd = DEd[sel] DEm = DEm[sel] DEs = DEs[sel] #Vmag = Vmag[sel] #VI = VI[sel] VHel = VHel[sel] e_VHel = e_VHel[sel] if gp.case < 5: Mg = Mg[sel] Mg_err = Mg_err[sel] elif gp.case == 5: Teff = Teff[sel] e_Teff = e_Teff[sel] logg = logg[sel] e_logg = e_logg[sel] Fe = Fe[sel] e_Fe = e_Fe[sel] N = N[sel] split = split[sel] e_split = e_split[sel] PM = PM[sel] split_min = min(split) # -3, 3 if according to WalkerPenarrubia2011 split_max = max(split) # easiest way for visualization: use histogram to show data #hist(split, np.sqrt(len(split))/2, normed=True) # but: it's not as easy as that # we have datapoints with errors and probability of membership weighting # thus, we need to smear the values out using a Gaussian of width = split_err # and add them up afterwards after scaling with probability PM x = np.array(np.linspace(split_min, split_max, 100)) splitdf = np.zeros(100) for i in range(len(split)): splitdf += PM[i]*gh.gauss(x, split[i], e_split[i]) splitdf /= sum(PM) #plot(x, Mgdf, 'g', lw=2) # only then we want to compare to Gaussians n_dims = 1+gp.pops*2 #Nsample = 10*n_dims pymultinest.run(myloglike, myprior, n_dims, # nest_ndims n_dims+1, # nest_totPar n_dims, # separate modes on nest_nCdims # the rho parameters only (gp.nrho in this case) [ gp.pops, gp.nipol, gp.nrho], True, # nest_IS = INS enabled True, #nest_mmodal = # separate modes True, # nest_ceff = use const sampling efficiency Nsample, # nest_nlive = 0.0, # nest_tol = 0 to keep working infinitely 0.8, # nest_ef = 10000, # nest_updInt = output after this many iterations 1., # null_log_evidence separate modes if #logevidence > this param. Nsample, # maxClst = -1.e30, # nest_Ztol = mode tolerance in the #case where no special value exists: highly negative gp.files.outdir, # outputfiles_basename = -1, # seed = True, # nest_fb = False, # nest_resume = 0, # context = True, # nest_outfile = -999999, # nest_logZero = points with log L < log_zero will be 1000, # nest_maxIter = False, # initMPI = use MPI None) #dump_callback = import os os.system('cd '+gp.files.outdir+'; grep -n6 Maximum stats.dat|tail -5|cut -d " " -f8 > metalmaxL.dat;') os.system("cd "+gp.files.outdir+"; sed -i 's/\\([0-9]\\)-\\([0-9]\\)/\\1E-\\2/g' metalmaxL.dat") os.system("cd "+gp.files.outdir+"; sed -i 's/\\([0-9]\\)+\\([0-9]\\)/\\1E+\\2/g' metalmaxL.dat") cubeML = np.loadtxt(gp.files.outdir+'metalmaxL.dat') cubeMLphys = cubeML #myprior(cubeML, 1+gp.pops*2, 1+gp.pops*2) #myloglike(cubeMLphys, 1+gp.pops*2, 1+gp.pops*2) pML, mu1ML, sig1ML, mu2ML, sig2ML = cubeMLphys #g1 = pML*gh.gauss(x, mu1ML, sig1ML) #g2 = (1-pML)*gh.gauss(x, mu2ML, sig2ML) #gtot = g1+g2 #plot(x, pML*g1, 'white') #plot(x, (1-pML)*g2, 'white') #plot(x, gtot, 'r') #xlabel('Mg') #ylabel('pdf') #pdb.set_trace() sig = abs(RAh[0])/RAh[0] RAh = RAh/sig xs = 15*(RAh*3600+RAm*60+RAs)*sig # [arcsec/15] sig = abs(DEd[0])/DEd[0] DEd = DEd/sig ys = (DEd*3600+DEm*60+DEs)*sig # [arcsec] arcsec = 2.*np.pi/(360.*60.*60) # [pc] kpc = 1000 # [pc] DL = {1: lambda x: x * (138),#+/- 8 for Fornax 2: lambda x: x * (101),#+/- 5 for Carina 3: lambda x: x * (79), #+/- 4 for Sculptor 4: lambda x: x * (86), #+/- 4 for Sextans 5: lambda x: x * (80) #+/- 10 for Draco }[gp.case](kpc) xs *= (arcsec*DL) # [pc] ys *= (arcsec*DL) # [pc] # alternative: get center of photometric measurements by deBoer # for Fornax, we have if gp.case == 1: com_x = 96203.736358393697 com_y = -83114.080684733024 xs = xs-com_x ys = ys-com_y else: # determine com_x, com_y from shrinking sphere import gi_centering as grc com_x, com_y = grc.com_shrinkcircle_2D(xs, ys) # instantiate different samplings, store half-light radii (2D) coll_R1half = [] coll_R2half = [] coll_popass = [] print('drawing 1000 assignments of stars to best fitting Gaussians') import numpy.random as npr #import gi_project as gip for kl in range(1000): # get a sample assignment: popass = [] for i in range(sum(sel)): # random assignment, wrong #if npr.rand() <= 0.5: # popass.append(1) #else: # popass.append(2) spl = split[i] ppop1 = pML*gh.gauss(spl, mu1ML, sig1ML) ppop2 = (1-pML)*gh.gauss(spl, mu2ML, sig2ML) if npr.rand() <= ppop1/(ppop1+ppop2): popass.append(1) else: popass.append(2) popass = np.array(popass) coll_popass.append(popass) sel1 = (popass==1) sel2 = (popass==2) # radii of all stellar tracers from pop 1 and 2 R1 = np.sqrt((xs[sel1])**2 + (ys[sel1])**2) R2 = np.sqrt((xs[sel2])**2 + (ys[sel2])**2) R1.sort() R2.sort() for pop in np.arange(2)+1: if pop == 1: R0 = R1 # [pc] Rhalf = R1[len(R1)/2] coll_R1half.append(Rhalf) co = 'blue' else: R0 = R2 # [pc] Rhalf = R2[len(R2)/2] coll_R2half.append(Rhalf) co = 'red' coll_R1half = np.array(coll_R1half) coll_R2half = np.array(coll_R2half) coll_Rdiffhalf = np.abs(coll_R1half-coll_R2half) # select 3 assignments: one for median, one for median-1sigma, one for median+1sigma med_Rdiff = np.median(coll_Rdiffhalf) stdif = np.std(coll_Rdiffhalf) min1s_Rdiff = med_Rdiff-stdif max1s_Rdiff = med_Rdiff+stdif #clf() #hist(coll_Rdiffhalf, np.sqrt(len(coll_Rdiffhalf))/2) #xlabel(r'$\Delta R/pc$') #ylabel('count') #axvline(med_Rdiff, color='r') #axvline(min1s_Rdiff, color='g') #axvline(max1s_Rdiff, color='g') kmed = np.argmin(abs(coll_Rdiffhalf-med_Rdiff)) kmin1s = np.argmin(abs(coll_Rdiffhalf-min1s_Rdiff)) kmax1s = np.argmin(abs(coll_Rdiffhalf-max1s_Rdiff)) print('saving median, lower 68%, upper 68% stellar assignments') np.savetxt(gpr.dir+'data/popass_median', coll_popass[kmed]) np.savetxt(gpr.dir+'data/popass_min1s', coll_popass[kmin1s]) np.savetxt(gpr.dir+'data/popass_max1s', coll_popass[kmax1s]) print('finished')