Ejemplo n.º 1
0
def PotentialEnergy(xc, mc, vc, hc, uc, tree=None, particles_not_in_tree=None, x=None, m=None, h=None):
#    if len(xc) > 1e5: return 0 # this effective sets a cutoff in particle number so we don't waste time on things that are clearly too big to be a GMC
    if len(xc)==1: return -2.8*mc/hc**2 / 2
    if tree:
        phic = pykdgrav.Potential(xc, mc, hc, tree=tree, G=4.301e4)
        if particles_not_in_tree: # have to add the potential from the stuff that's not in the tree
            phic += BruteForcePotential2(xc, x[particles_not_in_tree], m[particles_not_in_tree], h=h[particles_not_in_tree], G=4.301e4)
    else:
        phic = BruteForcePotential(xc, mc, hc, G=4.301e4)
    return np.sum(mc*0.5*phic)
Ejemplo n.º 2
0
def PE(c, x, m, h, v, u):
#    print(x[c], m[c], h[c])
    phic = pykdgrav.Potential(x[c], m[c], h[c], G=4.301e4, theta=0.7)
#    print("Done!")
    return 0.5*(phic*m[c]).sum()
Ejemplo n.º 3
0
def PE(c, x, m, h, v, u):
    phic = pykdgrav.Potential(x[c], m[c], h[c], G=4.301e4, theta=0.7)
    return 0.5 * (phic * m[c]).sum()
Ejemplo n.º 4
0
def ComputeClouds(filename, options):
    n = filename.split("_")[1].split(".")[0]
    nmin = float(options["--nmin"])
    datafile_name = "bound_%s_n%g_alpha%g.dat" % (n, nmin, alpha_crit)
    #    if path.isfile(datafile_name): return
    print(filename)
    cluster_ngb = int(float(options["--cluster_ngb"]) + 0.5)
    G = float(options["--G"])
    boxsize = options["--boxsize"]
    ptype = "PartType" + options["--ptype"]

    #    recompute_potential = options["--recompute_potential"]
    softening = float(options["--softening"])
    if boxsize != "None":
        boxsize = float(boxsize)
    else:
        boxsize = None
    fuzz = float(options["--fuzz"])

    if path.isfile(filename):
        F = h5py.File(filename, 'r')
    else:
        print("Could not find " + filename)
        return
    if not ptype in F.keys():
        print("Particles of desired type not found!")

    m = np.array(F[ptype]["Masses"])
    criteria = np.ones(len(m), dtype=np.bool)

    if len(m) < cluster_ngb:
        print("Not enough particles for meaningful cluster analysis!")
        return

    x = np.array(F[ptype]["Coordinates"])

    ids = np.array(F[ptype]["ParticleIDs"])
    u = (np.array(F[ptype]["InternalEnergy"])
         if ptype == "PartType0" else np.zeros_like(m))
    if "Density" in F[ptype].keys():
        rho = np.array(F[ptype]["Density"])
    else:
        rho = meshoid.meshoid(x, m, des_ngb=cluster_ngb).Density()
#    print(rho)
#ngbdist = meshoid.meshoid(x,m,des_ngb=cluster_ngb).ngbdist
    zz = np.array(F[ptype]["Metallicity"])
    #    rho = meshoid.meshoid(x,m).KernelAverage(rho)
    #    c = np.average(x,axis=0,weights=rho**2)
    #    x = x - c
    #    print(rho.max())

    criteria *= (rho * 145.7 > nmin)  # only look at dense gas (>nmin cm^-3)
    #    criteria *= np.max(np.abs(x),axis=1) < 50.
    print("%g particles denser than %g cm^-3" %
          (criteria.sum(), nmin))  #(np.sum(rho*147.7>nmin), nmin))
    if not criteria.sum(): return
    m = m[criteria]
    x = x[criteria]
    u = u[criteria]
    v = np.array(F[ptype]["Velocities"])[criteria]
    rho = rho[criteria]
    ids = ids[criteria]
    zz = zz[criteria]
    #    ngbdist, ngb = ngbdist[criteria]
    if fuzz: x += np.random.normal(size=x.shape) * x.std() * fuzz

    if "AGS-Softening" in F[ptype].keys():
        h_ags = np.array(F[ptype]["AGS-Softening"])[criteria]
    elif "SmoothingLength" in F[ptype].keys():
        h_ags = np.array(F[ptype]["SmoothingLength"])[criteria]
    else:
        # h_ags = meshoid.meshoid(x,m,des_ngb=cluster_ngb).SmoothingLength() #np.ones_like(m)*softening #(m/rho * cluster_ngb)**(1./3) #
        h_ags = np.ones_like(m) * softening
    if "Potential" in F[ptype].keys():  # and not recompute_potential:
        phi = np.array(F[ptype]["Potential"])[criteria]
    else:
        phi = pykdgrav.Potential(x, m, h_ags)

#    phi = np.ones_like(rho)

    x, m, rho, phi, h_ags, u, v, zz = np.float64(x), np.float64(m), np.float64(
        rho), np.float64(phi), np.float64(h_ags), np.float64(u), np.float64(
            v), np.float64(zz)

    t = time()
    groups, bound_groups, assigned_group = ParticleGroups(
        x, m, rho, phi, h_ags, u, v, zz, ids, cluster_ngb=cluster_ngb)
    t = time() - t
    print("Time: %g" % t)
    print("Done grouping. Computing group properties...")
    groupmass = np.array(
        [m[c].sum() for c in bound_groups.values() if len(c) > 10])
    groupid = np.array(
        [c for c in bound_groups.keys() if len(bound_groups[c]) > 10])
    groupid = groupid[groupmass.argsort()[::-1]]
    bound_groups = OrderedDict(zip(groupid,
                                   [bound_groups[i] for i in groupid]))
    #    exit()

    # Now we analyze the clouds and dump their properties

    bound_data = OrderedDict()
    bound_data["Mass"] = []
    bound_data["Center"] = []
    bound_data["PrincipalAxes"] = []
    bound_data["Reff"] = []
    bound_data["HalfMassRadius"] = []
    bound_data["NumParticles"] = []
    #    bound_data["SigmaEff"] = []

    Fout = h5py.File("Clouds_%s_%g.hdf5" % (n, nmin), 'w')

    i = 0
    for k, c in bound_groups.items():
        bound_data["Mass"].append(m[c].sum())
        bound_data["NumParticles"].append(len(c))
        bound_data["Center"].append(np.average(x[c], weights=m[c], axis=0))
        dx = x[c] - bound_data["Center"][-1]
        eig = np.linalg.eig(np.cov(dx.T))[0]
        bound_data["PrincipalAxes"].append(np.sqrt(eig))
        bound_data["Reff"].append(np.prod(np.sqrt(eig))**(1. / 3))
        r = np.sum(dx**2, axis=1)**0.5
        bound_data["HalfMassRadius"].append(np.median(r))
        #        sigma_eff = meshoid.meshoid(x[c],m[c],h_ags[c]).SurfaceDensity(size=4*bound_data["HalfMassRadius"][-1],center=bound_data["Center"][-1], res=400)

        #        bound_data["SigmaEff"].append(np.average(sigma_eff,weights=sigma_eff)*1e4)

        cluster_id = "Cloud" + ("%d" % i).zfill(
            int(np.log10(len(bound_groups)) + 1))

        N = len(c)

        Fout.create_group(cluster_id)
        fids = np.array(F[ptype]["ParticleIDs"])
        idx = np.in1d(fids, ids[c])
        for k in F[ptype].keys():
            Fout[cluster_id].create_dataset(ptype + "/" + k,
                                            data=np.array(F[ptype][k])[idx])
#            Fout[cluster_id].create_dataset("Coordinates", data=x[c])
# Fout[cluster_id].create_dataset("Velocities", data=v[c])
# Fout[cluster_id].create_dataset("Metallicity", data=zz[c])
# Fout[cluster_id].create_dataset("Masses", data=m[c])
# Fout[cluster_id].create_dataset("InternalEnergy", data=u[c])
# Fout[cluster_id].create_dataset("Density", data=rho[c])
# Fout[cluster_id].create_dataset("SmoothingLength", data=h_ags[c])
# Fout[cluster_id].create_dataset("ParticleIDs", data=ids[c])
        i += 1

    print("Done grouping bound clusters!")

    #                print "Reduced chi^2: %g Relative mass error: %g"%(EFF_rChiSqr,EFF_dm/mc[bound].sum())
    #    cluster_masses = np.array(bound_data["Mass"])
    #    bound_clusters = [b for b in bound_groups.values()]
    #np.array(bound_clusters)[np.array(bound_data["Mass"]).argsort()[::-1]]

    # write to Clusters_xxx.hdf5
    #    for i, c in enumerate(bound_clusters):

    Fout.close()
    F.close()

    #now save the ascii data files
    SaveArrayDict(datafile_name, bound_data)
Ejemplo n.º 5
0
def ComputeClusters(filename, options):
    brute_force_N = int(float(options["--brute_force_N"]) + 0.5)
    cluster_ngb = int(float(options["--cluster_ngb"]) + 0.5)
    min_cluster_size = int(float(options["--cluster_ngb"]) + 0.5)
    softening = float(options["--softening"])
    G = float(options["--G"])
    boxsize = options["--boxsize"]
    ptype = "PartType"+ options["--ptype"]
    recompute_potential = options["--recompute_potential"]
    if boxsize != "None":
        boxsize = float(boxsize)
    else:
        boxsize = None
    fuzz = float(options["--fuzz"])
    fits = int(float(options["--fits"])+0.5)

    if path.isfile(filename):
        F = h5py.File(filename)
    else:
        print("Could not find "+filename)
        return
    if not ptype in F.keys():
        print("Particles of desired type not found!")

    m = np.array(F[ptype]["Masses"])
    criteria = np.ones(len(m),dtype=np.bool)
    m = m[criteria]

    if len(m) < 32:
        print("Not enough particles for meaningful cluster analysis!")
        return
    
    x = np.array(F[ptype]["Coordinates"])[criteria]
    if ptype=="PartType0":
        u = np.array(F[ptype]["InternalEnergy"])
        rho = np.array(F[ptype]["Density"])
        criteria *= (u < 10.)*(rho*404 > 10.) # only look at cold dense gas (<100K, >10cm^-3)
        print(criteria.sum())
        m = m[criteria]
        x = x[criteria]
        recompute_potential = True


    if fuzz: x += np.random.normal(size=x.shape)*x.std()*fuzz
    if "Potential" in F[ptype].keys() and not recompute_potential:
        phi = np.array(F[ptype]["Potential"])[criteria]
    else:
        phi = pykdgrav.Potential(x,m,G=G,theta=1.)
    if "AGS-Softening" in F[ptype].keys():
        h_ags = np.array(F[ptype]["AGS-Softening"])[criteria]
    elif "SmoothingLength" in F[ptype].keys():
        h_ags = np.array(F[ptype]["SmoothingLength"])[criteria]
    else:
        h_ags = softening*np.ones_like(m)
    hmin = h_ags.min()
        
    v = np.array(F[ptype]["Velocities"])[criteria]
    
    print("Finding neighbors...")
    mm = meshoid.meshoid(x, m, des_ngb=cluster_ngb, boxsize=boxsize)
    h = mm.h

    ngbdist, ngb = mm.ngbdist, mm.ngb #tree.query(x, cluster_ngb)
    print("Done!")

    owners = -np.ones(len(phi), dtype=np.int32)
    owners = FindOwners(ngb, phi,ngbdist)
    
    clusters = OrderedDict()
    for i, o in enumerate(owners):
        if not o in clusters.keys():
            clusters[o] = []
        clusters[o].append(i)

    # have to merge any spurious double-clusters
    clusters_merged = {}
    for c in clusters.keys():
#        dx = np.sum((x[clusters[c]] - x[c])**2, axis=1)**0.5
        r1s = 4*hmin
        dxc = np.sum((x[clusters.keys()] - x[c])**2, axis=1)**0.5
        # is there are no clusters within the 10% radius, simply copy the original cluster. Otherwise, merge the clusters in proximity
        if not np.any(np.sort(dxc)[1:] < r1s):
            clusters_merged[c] = clusters[c]
        else:
            #figure out which one has the lowest potential out of the clusters within the 10% radius. Merge all others into that one.
            within_r = np.array(clusters.keys())[dxc < r1s]
            parent = within_r[phi[within_r].argmin()]
            cluster = []
            for i in within_r:
                cluster += clusters[i]
                clusters_merged[parent] = cluster #sum([clusters[i] for i in within_r])

    clusters = clusters_merged

    # This ends the assignment of clusters to potential wells; the dictionary clusters contains the indices if that cluster's particles

    # Now we determine the bound subsets of the clusters

#    csize = [len(c) for c in clusters.values()]
    rejects = []

    bound_data = OrderedDict()
    bound_data["Mass"] = []
    bound_data["Center"] = []
    bound_data["HalfMassRadius"] = []
    bound_data["NumParticles"] = []
    if fits:
        bound_data["EFF_gamma"] = []
        bound_data["EFF_gamma_error"] = []
        bound_data["EFF_a"] = []
        bound_data["EFF_a_error"] = []
        bound_data["EFF_DeltaM"] = []
        bound_data["EFF_rChiSqr"] = []
    unbound_data = OrderedDict()
    unbound_data["Mass"] = []
    unbound_data["Center"] = []
    unbound_data["HalfMassRadius"] = []
    unbound_data["NumParticles"] = []
    unbound_data["BoundFraction"] = []

    n = filename.split("snapshot_")[1].split(".")[0]

    Fout = h5py.File(filename.split("snapshot")[0] + "Clusters_%s.hdf5"%n, 'w')

    print("Selecting bound subsets...")

    rejects = []
    bound_clusters = []    
    for k,c in clusters.items():
        if len(c) < min_cluster_size:
            rejects.append(c)
            continue
        c = np.array(c)
        xc = x[c]
        phic = phi[c]
        r = np.sum((xc - xc[phic.argmin()])**2,axis=1)**0.5
        rorder = r.argsort()
        c = c[rorder]
        xc = xc[rorder]
        phic = phic[rorder]
        vc = v[c]
        hc = h[c]
        mc = m[c]
        r = r[rorder]

        unbound_data["Mass"].append(mc.sum())
        unbound_data["NumParticles"].append(len(mc))
        unbound_data["Center"].append(xc[phic.argmin()])
        unbound_data["HalfMassRadius"].append(np.median(r))

        Mr = mc.cumsum()
        if len(c) < brute_force_N:
            phi2 = ComputePotential(xc, mc, h_ags[c]/2.8, G) # direct summation
        else:
            phi2 = pykdgrav.Potential(np.float64(xc), np.float64(mc), G=G)
            #phi2 = G*integrate.cumtrapz(Mr[::-1]/(r[::-1]**2 + (h_ags[c]/2.8)**2), x=r[::-1], initial=0.0)[::-1] - G*mc.sum()/r[-1] # spherical symmetry approximation


        rho = mc/(4*np.pi*hc**3/3)
        v_cluster = np.average(vc,axis=0,weights=mc*np.abs(phi2))#rho**2)
#        x_cluster = np.average(xc,axis=0,weights=mc*rho**2)
        vSqr = np.sum((vc - v_cluster)**2,axis=1)

        bound = 0.5*vSqr + phi2 < 0


        unbound_data["BoundFraction"].append(float(bound.sum())/len(bound))

        rejects.append(c[np.invert(bound)])

        if bound.sum() > min_cluster_size:
            bound_clusters.append(c[bound])
            c = c[bound]
            bound_data["Mass"].append(mc[bound].sum())
            bound_data["NumParticles"].append(len(mc[bound]))
            bound_data["Center"].append(np.average(xc[bound],weights=phi2[bound]**2, axis=0))
            bound_data["HalfMassRadius"].append(np.median(r[bound]))
            if fits:
                #print EFF_fit(mc[bound], xc[bound], phic[bound], hc[bound], dim=fits)
                EFF_params, EFF_errors, EFF_dm, EFF_rChiSqr = EFF_fit(mc[bound], xc[bound], phi2[bound], hc[bound], dim=fits)#, path=filename.split("snapshot")[0])
                bound_data["EFF_gamma"].append(EFF_params[2])
                bound_data["EFF_gamma_error"].append(EFF_errors[2])
                bound_data["EFF_a"].append(EFF_params[1])
                bound_data["EFF_a_error"].append(EFF_errors[1])
                bound_data["EFF_DeltaM"].append(EFF_dm)
                bound_data["EFF_rChiSqr"].append(EFF_rChiSqr)

    print("Done grouping bound clusters!")
#                print "Reduced chi^2: %g Relative mass error: %g"%(EFF_rChiSqr,EFF_dm/mc[bound].sum())
#    cluster_masses = np.array(bound_data["Mass"])
    bound_clusters = np.array(bound_clusters)[np.array(bound_data["Mass"]).argsort()[::-1]]
    
    # write to Clusters_xxx.hdf5
    for i, c in enumerate(bound_clusters):
        cluster_id = "Cluster"+ ("%d"%i).zfill(int(np.log10(len(bound_clusters))+1))
        N = len(c)
        Fout.create_group(cluster_id)
        for k in F[ptype].keys():
            Fout[cluster_id].create_dataset(k, data=np.array(F[ptype][k])[criteria][c])
        
    Fout.close()
    F.close()
    
    #now save the ascii data files
    SaveArrayDict(filename.split("snapshot")[0] + "bound_%s.dat"%n, bound_data)
    SaveArrayDict(filename.split("snapshot")[0] + "unbound_%s.dat"%n, unbound_data)