Ejemplo n.º 1
0
def run(file_name):
    print "Reading in the data from file"
    points = read_data(file_name)
    mean_density = calc_mean_density(len(points))
    print mean_density

    print "Initializing cKD Tree"
    kd = PeriodicCKDTree(bounds, points)
    test(points, kd, mean_density)

    print "calculating local densities for each particle"
    t = time.time()

    local_density = calc_local_densities(points, kd)
    local_density = sorted(local_density, key=lambda tup: tup[1], reverse=True)

    in_halo = set()
    # array of tuples (center, radius, number)
    halos = []

    for tup in local_density:
        if tup[0] not in in_halo:
            #print i
            halo = get_radius_neighbors(points[tup[0]], kd, mean_density)
            for index in halo[1]:
                in_halo.add(index)
            halos.append((points[tup[0]], halo[0], len(halo[1])))
            #print "Found halo of size %g" %(len(halo[1]))

    create_mass_plot(halos, "halos_" + file_name)
    print "Total time to run: %g" % (time.time() - t)
Ejemplo n.º 2
0
def get_tcc(configuration, tccrawfile, box, rcut=1., criterium="not marked"):
    "Get the connected cluster formed by  marked or not marked particles"
    xyz = pl.loadtxt(configuration, skiprows=2, usecols=[1, 2, 3])
    cl = pl.loadtxt(tccrawfile, skiprows=3, dtype="S1")

    if criterium == 'not marked':
        select = xyz[(cl == 'A') + (cl == 'B')]
    if criterium == "marked":
        select = xyz[(cl == 'C') + (cl == 'D')]

    T = PeriodicCKDTree(box, select)
    # Find neighbors within a fixed distance of a point
    balls = T.query_ball_point(select, r=rcut)

    visited = pl.zeros(select.shape[0])
    added = pl.zeros(select.shape[0])
    clusters = []

    def addballs(p, cluster):
        if visited[p] == 0:
            visited[p] = 1
            cluster.append(p)
            for e in balls[p]:
                addballs(e, cluster)

    for i in xrange(select.shape[0]):
        cluster = []
        addballs(i, cluster)
        if len(cluster) > 0:
            clusters.append(cluster)
    return clusters
Ejemplo n.º 3
0
def get_marked(xyz, labels, box, marker=True, rcut=1.4, periodic=False):
    select = xyz[labels == marker]
    # print select
    if periodic:
        T = PeriodicCKDTree(box, select)
    else:
        T = cKDTree(select)
    # Find neighbors within a fixed distance of a point
    balls = T.query_ball_point(select, r=rcut)

    visited = pl.zeros(select.shape[0])
    added = pl.zeros(select.shape[0])
    clusters = []

    def addballs(p, cluster):
        if visited[p] == 0:
            visited[p] = 1
            cluster.append(p)
            for e in balls[p]:
                addballs(e, cluster)

    for i in xrange(select.shape[0]):
        cluster = []
        addballs(i, cluster)
        if len(cluster) > 0:
            clusters.append(cluster)
    return clusters
Ejemplo n.º 4
0
 def setUp(self):
     n = 100
     m = 4
     self.data = np.random.randn(n, m)
     self.bounds = np.ones(m)
     self.T = PeriodicCKDTree(self.bounds, self.data, leafsize=2)
     self.x = np.random.randn(m)
     self.p = 2.
     self.eps = 0
     self.d = 0.2
Ejemplo n.º 5
0
def test_random_ball_vectorized_compiled():

    n = 20
    m = 5
    bounds = np.ones(m)
    T = PeriodicCKDTree(bounds, np.random.randn(n, m))

    r = T.query_ball_point(np.random.randn(2, 3, m), 1)
    assert_equal(r.shape, (2, 3))
    assert_(isinstance(r[0, 0], list))
Ejemplo n.º 6
0
 def kdtree(self, bounds=[1., 1., 1.]):
     '''
     Return a KDTree with all halos, accounting for periodic boundaries
     '''
     if not hasattr(self, '_ctree'):
         from periodic_kdtree import PeriodicCKDTree
         points = np.array([
             halo['pos'].in_units("Mpc") / self.boxsize.in_units("Mpc")
             for halo in self
         ])
         T = PeriodicCKDTree(bounds, points)
         self._ctree = T
     return self._ctree
Ejemplo n.º 7
0
    def velocity_profile(self):
        radius_array = np.linspace(0, 200, self.N + 1)
        velocity_profile = np.zeros(self.N + 1)
        N_in_velocity = np.zeros(self.N + 1)

        bounds = np.array([self.box_size, self.box_size, self.box_size])
        tree = PeriodicCKDTree(bounds, self.galaxy_cat)
        print "Calculating velocity profile"
        for i in range(len(self.void_cat[:, 0])):
            #print i
            current_number_of_galaxies = 0
            current_velocity = 0
            for j in range(1, self.N + 1):
                neighbor_inds = tree.query_ball_point(self.void_cat[i, :],
                                                      r=radius_array[j])
                r_void = self.void_cat[i]
                galaxies_near_point = self.galaxy_cat[neighbor_inds]
                v_galaxy = self.velocity_cat[neighbor_inds]
                r_vec = r_void - galaxies_near_point
                galaxies_near_point = len(galaxies_near_point[:, 0])
                galaxies_in_shell = galaxies_near_point - current_number_of_galaxies

                radial_velocity = (v_galaxy * r_vec).sum(
                    axis=1) / np.linalg.norm(r_vec, axis=1)
                radial_velocity = np.sum(radial_velocity) - current_velocity

                velocity_profile[j] += radial_velocity / np.maximum(
                    1.0, galaxies_in_shell)
                N_in_velocity[j] += galaxies_in_shell

                current_velocity += radial_velocity
                current_number_of_galaxies += galaxies_in_shell
            #print velocity_profile / np.maximum(np.ones(self.N+1), N_in_velocity)
        v_final = (velocity_profile / len(self.void_cat[:, 0])
                   )  #/ np.maximum(np.ones(self.N+1), N_in_velocity))
        fig, ax = plt.subplots()
        ax.plot(radius_array, v_final)
        ax.set_xlabel("radius [Mpc/h]")
        ax.set_xlabel(r"$v_r(r)$ km/s")
        np.save("datafiles/velocity_profiles/velocity_profile" + self.handle,
                v_final)
        fig.savefig("figures/velocity_profiles/velocity_profile" +
                    self.handle + ".pdf")
def bonded(snap, i, j, box):
    type_list = snap.particles.types
    index_B = type_list.index('B')

    positions = snap.particles.position
    bodies = snap.particles.body
    type_id = snap.particles.typeid
    pos_i = positions[i, :]
    pos_j = positions[j, :]
    loc_B_i = np.where((bodies == i) & (type_id == index_B))[0]
    pos_B_i = positions[loc_B_i, :]
    loc_B_j = np.where((bodies == j) & (type_id == index_B))[0]
    pos_B_j = positions[loc_B_j, :]

    bounds = np.array([box.Lx, box.Ly, box.Lz])
    T = PeriodicCKDTree(bounds, pos_B_j)

    for m in range(len(pos_B_i)):
        cur_pos = pos_B_i[m, :]
        nn_dist, idx = T.query(cur_pos, k=1)
        if (nn_dist < 2.5):
            return True
    return False
Ejemplo n.º 9
0
 def setUp(self):
     test_random_far.setUp(self)
     self.kdtree = PeriodicCKDTree(self.bounds, self.data)
Ejemplo n.º 10
0
def boundary_stk(xvol, yvol, zvol, x_same_zone_bn, y_same_zone_bn, z_same_zone_bn, vol_same_zone_bn, zn, rad_val):
	# This function takes in the location of the boundary points (likely volume averaged) and
	# bins them to find the profile

	# Create tree of volume weighted boundary points
	boundary_pts = zip(xvol, yvol, zvol) 
	periodic_tree_boundary = PeriodicCKDTree(bounds, boundary_pts)

	x_part = []
	y_part = []
	z_part = []

	# Find particles within rad_val of zone radius that are not in zone
	idx = periodic_tree.query_ball_point([x_vol[zn],y_vol[zn],z_vol[zn]],rad_val)

	new_idx = [] # index of particles not in zone, but within 2*R_eff of zone
	for i in idx:
		if zone[i] != zn:
			new_idx.append(i)
			x_part.append(x[i])
			y_part.append(y[i])
			z_part.append(z[i])

	cls_dist = []
	cls_idx = []
	cls_dist_non_zn = []
	cls_idx_non_zn = []

	# Find closest distance for each particle in a zone to the boundary particle
	for i in range(0,len(x_same_zone_bn)):
		cls_dist.append(periodic_tree_boundary.query([x_same_zone_bn[i],y_same_zone_bn[i],z_same_zone_bn[i]])[0])
		cls_idx.append(periodic_tree_boundary.query([x_same_zone_bn[i],y_same_zone_bn[i],z_same_zone_bn[i]])[1])

	# Find closest distance for each particle not in a zone to the boundary particle
	for i in range(0,len(x_part)):
		cls_dist_non_zn.append(periodic_tree_boundary.query([x_part[i],y_part[i],z_part[i]])[0])
		cls_idx_non_zn.append(periodic_tree_boundary.query([x_part[i],y_part[i],z_part[i]])[1])

	# Calculate density for each cell in the zone
	# den_same_zone_bn = [(1./volume) for volume in vol_same_zone_bn[0]]
	vol_same_zone_bn = [(volume) for volume in vol_same_zone_bn[0]]

	# Calculate density for each particle 
	vol_non_zn_part = []
	for i in new_idx:
		# den_non_zn_part.append(1./vol[i])
		vol_non_zn_part.append(vol[i])
	
	# Bin the density, distance, and number counts from 0 to 2.5.  This binning is normalized to effective radius of each zone 
	den_bins = [] 
	dist_bins = []
	ncnt_bins = []

	den_bins_non_zn = []
	dist_bins_non_zn = []
	ncnt_bins_non_zn = []

	# Find den, dist, cnt for particles in zone
	for i in range(0,len(bins)-1):
		# Density, distance, and num counts of for each bin.  Bins are normalized to the effective radius of each zone
		den_temp, dist_temp, ncnt_temp = boundary_bin(vol_same_zone_bn, cls_dist, bins[i], bins[i+1])

		# Make arrays of den, dist, num counts of for bins
		if den_temp != np.nan:
			den_bins.append(den_temp)
		else:
			den_bins.append(0)

		if dist_temp != np.nan:
			dist_bins.append(dist_temp)
		else:
			dist_bins.append(0)
		if ncnt_temp != np.nan:
			ncnt_bins.append(ncnt_temp)
		else:
			ncnt_bins.append(0)

	# Find den, dist, cnt for particle not in zone
	for i in range(0,len(bins)-1):
		# Density, distance, and num counts of for each bin.  Bins are normalized to the effective radius of each zone
		den_temp2, dist_temp2, ncnt_temp2 = boundary_bin(vol_non_zn_part, cls_dist_non_zn, bins[i], bins[i+1])

		# Make arrays of den, dist, num counts of for bins
		if den_temp2 != np.nan:
			den_bins_non_zn.append(den_temp2)
		else:
			den_bins_non_zn.append(0)

		if dist_temp2 != np.nan:
			dist_bins_non_zn.append(dist_temp2)
		else:
			dist_bins.append(0)
		if ncnt_temp2 != np.nan:
			ncnt_bins_non_zn.append(ncnt_temp2)
		else:
			ncnt_bins_non_zn.append(0)

	return den_bins, dist_bins, ncnt_bins, den_bins_non_zn, dist_bins_non_zn, ncnt_bins_non_zn
Ejemplo n.º 11
0
		y_slice_zone.append(y_same_zone[i])

# Effective radii of each cell in slice that's within a zone
for i in slice_zone_idx:
	r_eff_zone_slice.append(r_eff_same_zone[i])

denminrad = np.int(find_nearest(x_slice_zone, x_denmin[np.int(zone[arb_ind])]))

#################################################################################

### CREATE TREE FOR X,Y,Z COORDINATES FOR ALL HALOS #########################

# Create tree with period boundary conditions
halos = zip(x.ravel(), y.ravel(), z.ravel()) #makes x,y,z single arrays
bounds = np.array([Lbox,Lbox,Lbox])
periodic_tree = PeriodicCKDTree(bounds, halos)

#############################################################################


### CREATE SPHERICAL DENSITY PROFILE FOR A SINGLE ZONE ###############################

# This will take any void and build shells around it up to 2*R_v
# and find the number density per shell using the volume of the shell.

R_shell = np.linspace(0.001, 2*zone_rad[np.int(zone[arb_ind])], 20) #shells from ~0 to 2*R_v in units of Mpc/h
V_shell = ((4.*pi)/3.)*R_shell**3. #volume of each shell in units of Mpc**3 
tot_numden = numpart/(Lbox**3.)


count = []
Ejemplo n.º 12
0
def overdensity_cylinder(gals,
                         coods,
                         R,
                         dc,
                         L,
                         pc_stats=False,
                         cluster_mass_lim=1e4,
                         n=100,
                         verbose=False):
    """
    Find overdensity statistics over the whole simulation box for cylindrical apertures.

    Args:
        gals - dataframe of galaxy properties
        coods - coordinates to calculate statistcis at. Typically defined as galaxy or random coordinates.
        R - aperture radius, cMpc
        dc - half aperture depth, cMpc
        L - box length, cMpc
        pc_stats - bool, calculate completeness and purity of each region
        cluster_mass_lim - limiting descendant mass above which to classify clusters, z0_central_mcrit200
        n - chunk length
        
    Returns:
        out_stats - output statistics, numpy array of shape [len(coods), 4]
                    0 - overdensity
                    1 - completeness
                    2 - purity
                    3 - descendant mass
    """

    dimensions = np.array([L, L, L])

    if verbose: print "Building KDtree..."
    T = PeriodicCKDTree(dimensions, gals[['zn_x', 'zn_y', 'zn_z']])

    avg = float(gals.shape[0]) / L**3  # average overdensity cMpc^-3

    out_stats = np.zeros((len(coods), 4))

    vol_avg = np.pi * R**2 * (2 *
                              dc) * avg  # average overdensity in chosen volume

    for j, c in coods.groupby(
            np.arange(len(coods)) //
            n):  # can't calculate distances all in one go, so need to chunk

        if verbose:  # print progress
            if j % 100 == 0:
                print round(
                    float(c.shape[0] * (j + 1)) / coods.shape[0] * 100, 2), '%'
                sys.stdout.flush()

        # find all galaxies within a sphere of radius the max extent of the cylinder
        gal_index = T.query_ball_point(c, r=(R**2 + dc**2)**0.5)

        # filter by cylinder using norm_coods()
        gal_index = [
            np.array(gal_index[k])[norm_coods(
                gals.iloc[gal_index[k]][['zn_x', 'zn_y', 'zn_z']].values,
                c.ix[k + j * n].values,
                R=R,
                half_deltac=dc,
                L=L)] for k in range(len(c))
        ]

        start_index = (j * n)  # save start index

        # calculate dgal
        out_stats[start_index:(start_index + len(c)),
                  0] = (np.array([len(x)
                                  for x in gal_index]) - vol_avg) / vol_avg

        if pc_stats:  # calculate completeness and purity statistics

            for i in range(len(gal_index)):

                cluster_ids = gals.iloc[gal_index[i]]
                cluster_ids = Counter(
                    cluster_ids[cluster_ids['z0_central_mcrit200'] >
                                cluster_mass_lim]['z0_centralId'])

                if len(cluster_ids) > 0:

                    cstats = np.zeros((len(cluster_ids), 2))

                    for k, (q, no) in enumerate(cluster_ids.items()):
                        cluster_gals = gals.ix[gals['z0_centralId'] == q]
                        cstats[k, 0] = float(no) / len(
                            cluster_gals)  # completeness
                        cstats[k, 1] = float(no) / len(gal_index[i])  # purity

                    # find id of max completeness and purity in cstats array
                    max_completeness = np.where(
                        cstats[:, 0] == cstats[:, 0].max())[0]
                    max_purity = np.where(cstats[:, 1] == cstats[:,
                                                                 1].max())[0]

                    # sometimes multiple clusters can have same completeness or purity in a single candidate
                    # - use the cluster with the highest complementary completeness/purity
                    if len(max_completeness) > 1:

                        # get matches between completeness and purity
                        matches = [x in max_purity for x in max_completeness]

                        if np.sum(matches) > 0:
                            # just use the first one
                            max_completeness = [np.where(matches)[0][0]]
                            max_purity = [np.where(matches)[0][0]]
                        else:
                            max_completeness = [
                                max_completeness[np.argmax(
                                    cstats[max_completeness, 1])]
                            ]

                    if len(max_purity) > 1:

                        matches = [x in max_completeness for x in max_purity]

                        if np.sum(matches) > 0:
                            max_completeness = [np.where(matches)[0][0]]
                            max_purity = [np.where(matches)[0][0]]

                        else:
                            max_purity = [
                                max_purity[np.argmax(cstats[max_completeness,
                                                            0])]
                            ]

                    # sometimes the cluster with the highest completeness does not have the highest purity, or vice versa
                    # - use the cluster with the highest combined purity/completeness added in quadrature
                    if max_completeness[0] != max_purity[0]:
                        max_completeness = [
                            np.argmax([pow(np.sum(x**2), 0.5) for x in cstats])
                        ]
                        max_purity = max_completeness

                    # save completeness and purity values
                    out_stats[start_index + i, 1] = cstats[max_completeness[0],
                                                           0]  # completeness
                    out_stats[start_index + i, 2] = cstats[max_purity[0],
                                                           1]  # purity

                    # save descendant mass
                    # filter by cluster id, save z0 halo mass
                    # can use either max_completeness or max_purity, both equal by this point

                    out_stats[start_index + i,
                              3] = gals.loc[gals['z0_centralId'] == cluster_ids
                                            .keys()[max_completeness[0]],
                                            'z0_central_mcrit200'].iloc[0]

                else:  # if no galaxies in aperture
                    out_stats[start_index + i, 1] = 0.
                    out_stats[start_index + i, 2] = 0.
                    out_stats[start_index + i, 3] = np.nan

    return out_stats
Ejemplo n.º 13
0
def NN_finder_all(initial_config_data, cut_off_distance, box_dim, path_to_test_dir, atom_list = None, save_results = False, re_calc = False):
	"""
	A very general nearest neigbor finder function calculate multiple atom's nearest neighbor all at once using
	the efficient cKDTree algorithm, the multiple atoms whose item number 
	is listed inside the atom_list input argument,
	the default is to calculate all atoms inside initial_config_data file
	User can customize which atoms to calculate by specifying in atom_list
	Input arguments:
	initial_config_data: instance of pandas.Dataframe
		configuration data
	
	cut_off_distance: dict
		dictionary contains the multiple pair cut-off distance
		currently use tuples as keys for immutability, frozenset may be another way
		but it reduce duplicates
		in order to access the turple key without order preference, convert
		
		https://stackoverflow.com/questions/36755714/how-to-ignore-the-order-of-elements-in-a-tuple
		https://www.quora.com/What-advantages-do-tuples-have-over-lists
		For example,
		{(1,1):3.7,(1,2):2.7,(2,2):3.0} means that atom_type 1 and 1 cut-off
		is 3.7, etc
	
	box_dim: list
		a list containing the spatial dimension of simulation box size in x, y, z
	
	path_to_test_dir: str
		str of current test result dir, under it, it save data into nn_results.pkl
		
	atom_list: list
		the list containing the item number of interested atoms whose nearest neighbors
		are being found
	
	save_results: boolean, default True
		specify whether to save the results dictionary into a nn_results_dict.pkl file
	
	Note:
	this cKDtree algorithm is efficient when:
	you have many points whose neighbors you want to find, you may save 
	substantial amounts of time by putting them in a cKDTree and using query_ball_tree
	
	for molecular simulation: 
	https://github.com/patvarilly/periodic_kdtree
	
	returns:
		nn: dict()
			key is item id of interested atom
			
			values is the pandas.Dataframe of nearest neighbor for atom
			of interest
	"""
	# set up path_to_file and check results out of this function before calling it
	# if check_results is True: 
	# if path_to_file is None or os.path.exists(path_to_file):
	# raise Exception("NN results file not found, please specify the correct path to the file")
		
	path_to_nn_results = path_to_test_dir + "/nn_results_dict.pkl"
	
	if re_calc is False:
		if os.path.exists(path_to_nn_results):
			print "nn results dictionary already calculated and saved in pkl file, skip calculation"
			return pickle.load(open(path_to_nn_results,'r'))
	nn = dict()
		
	# if there is no atom_list specified, use all atoms in initial_config_data
	if atom_list is None:
		atom_list = (initial_config_data["item"]).tolist()
	
	_data = initial_config_data
	
	groups = Atom.classify_df(_data)
	
	#_atom_data = initial_config_data[['x','y','z']]
	
	_interested_data = _data.loc[_data['item'].isin(atom_list)]
	
	interested_groups = Atom.classify_df(_interested_data)
	
	#_interested_atom = _interested_data[['x','y','z']]
	
	
	# build the efficient nearest neighbor KDTree algorithm
	# default distance metric Euclidian norm p = 2
	# create tree object using the larger points array
	for (i, int_group) in interested_groups.items():
		for (j, atom_group) in groups.items():
			# comparing atom_type_i and atom_type_j
			for pair in [(i,j),(j,i)]:
				if pair in cut_off_distance:
					 curr_cut_off = cut_off_distance[pair]
			
			# iterate over each row seems inefficient for (index, curr_atom) in int_group.iterrows()
			result_tree = PeriodicCKDTree(box_dim, atom_group[['x','y','z']].values)
			result_groups = result_tree.query_ball_point(int_group[['x','y','z']].values, curr_cut_off)
			#indices = np.unique(IT.chain.from_iterable(result_groups))
			
			#for (int_NN,(index,int_atom)) in (result_groups,int_group.iterrows()):
			k = 0
			for index,int_atom in int_group.iterrows():
				# int_NN is a list of index of NN, index is according to the order
				# in atom_group 
				# curr_NN is a dataframe storing NN found for current atom_group
				int_NN = result_groups[k]
				curr_NN = atom_group.iloc[int_NN]
				if int_atom["item"] not in nn:
					nn[int_atom["item"]] = curr_NN
				elif int_atom["item"] in nn:
					nn[int_atom["item"]] = nn[int_atom["item"]].append(curr_NN)				
				k = k + 1	
	# it is best practice to save this NN dictionary results into a pkl file 
	# to prevent rerun, if this file exists, let user know that
	# the file_of_nearest_neighbor exists before calling it
	if save_results is True:
		with open(path_to_nn_results, 'w') as f:
			pickle.dump(nn,f)
			f.close()
	return nn
def Run_Correlated_Sphere_Packing(input_parameters_filename='Parameters.in', seed_increment = 0):
    parameters = read_input_parameters(input_parameters_filename)
    reinit_flag=0

    '''increment in case of reinitialization'''
    parameters['seed']+=seed_increment

    '''use input parameters'''
    ndimensions,xmin,xmax,ymin,ymax,zmin,zmax,num_neighbors,set_leafsize_factor,periodic_geometry,kdt_eps,pnorm,filename,radii_dist,radius_mu,radius_sig2,nbins,nsamples,show_hist,target_porosity,percentilemin,percentilemax,find_all_neighbors,search_radius_factor_of_max_diameter,set_dt,blfac,damping,tstep,tprint,tstepmax,force_abstol,fmin,force_absmax_factor,force_reltol,max_overlap_factor,seed,Cmu,Csig,set_leafsize \
    =parameter_values(parameters, *parameters.values())
#     print(xmin,xmax)
    RandomState = np.random.RandomState(seed)

    '''form uniform hexahedral mesh'''
    ncells = nsamples
    xx=np.linspace(xmin,xmax,ncells+1)
    dcell = np.diff(xx)[0]
    domain_volume = (xmax-xmin)**3

    periodic_bounds = np.array([xmax,ymax,zmax])[:ndimensions]



    #PART I
    '''sample radii'''
    # #sample radii 
    # while target_porosity < v0:
    # v0 = (radii**3 * 4*np.pi/(3 * (xmax-xmin)**3))
    # v0 = v0/v0.sum()
    if(radii_dist=='lognormal'):
        mu = np.log(radius_mu)
        sig2 = radius_sig2
        Z = RandomState.lognormal(mu,sig2,nsamples)
        radii = np.exp(Z)

    if(show_hist==True):
        plt.hist(radii,nbins)
        plt.show()

    pmin = np.percentile(radii,percentilemin),
    pmax = np.percentile(radii,percentilemax)
    radii = radii[radii<pmax]
    radii = radii[radii>pmin]
    nsamplesclip = nsamples - radii.shape[0]
    nsamples = radii.shape[0]# - nsamplesclip
    radii = np.sort(radii)


    
    
    print('1,99 percentiles ', pmin,pmax)
    print('max, min, mean', radii.max(),radii.min(),radii.mean())



    '''rescale radii to obtain desired porosity'''
    rscale = ( (  np.sum(4*np.pi*(1/3.) * radii**3) ) / ( domain_volume * target_porosity ) )**(1/3)
    radii_scaled = radii/rscale
    radius_mu_scaled = radius_mu / rscale
    radius_sig2_scaled = radius_sig2 / rscale**2
    rmax = ( radii_scaled ).max()
    dmax = 2*rmax
    delta = dcell - 2*rmax
    search_radius = 2*dmax#+delta




    '''sample isotropic gaussian correlation'''
    Cnorm = RandomState.multivariate_normal(Cmu,Csig,nsamples)#,[nsamples,ndimensions])

    '''sample points uniformly in space '''
    pts = RandomState.uniform(xmin,xmax,[nsamples,ndimensions])

    '''sort radii and points '''
    radii_scaled = radii_scaled[::-1]
    pts = pts[::-1,:]
    
    '''get nearest neighbor distances'''
    t0 = time.time()

    if(periodic_geometry==True):
        kdt = PeriodicCKDTree(periodic_bounds, pts)
    else:
        kdt = scipy.spatial.KDTree(pts,leafsize=set_leafsize)
    if(find_all_neighbors==True):
        dist,neighbors = kdt.query(pts, k=num_neighbors, eps=kdt_eps, p = pnorm)

        print("NNE time " , time.time()-t0)

        '''sort by func(distances) eg sum'''
        distsum = (dist[:,1:].sum(axis=1))
        distmean = (dist[:,1:].mean(axis=1))
        distmedian = np.median(dist[:,1:],axis=1)
        distmin = (dist[:,1:].min(axis=1))
        distmax = (dist[:,1:].max(axis=1))

        isort_pts = np.argsort(distmedian)
        isort_radii = np.argsort(isort_pts)

        sorted_radii = radii[isort_radii].copy()
        edges = from_neighbors_to_edges(neighbors)[0]

    '''overlap'''
    max_overlap = radius_mu_scaled * max_overlap_factor
    #find neighbors I interesect (in 3/9/27 cells), use centers and radii to move away by dx if ||dx||<overlap_max


    '''BC, EQ separation, pore throat size, collision distances, PD'''
    boundary_layer = [blfac, xmax *(1- blfac)]
    iboundary = np.any(((pts<boundary_layer[0]).astype(int)+(pts>boundary_layer[1]).astype(int)) , axis=1)
    iinterior = np.all((pts>boundary_layer[0]).astype(int)*(pts<boundary_layer[1]).astype(int),axis=1)
    print(iboundary.shape,iinterior.shape)
    iinterior.sum(),iboundary.sum()

    #formerly, scaled_radii was defined another way
    scaled_radii = radii_scaled.copy()
    scaled_radius_mu = radius_mu_scaled.copy()

    num_inclusions_per_dim = int((nsamples)**(1/ndimensions))
    if(find_all_neighbors==True):
        eq_length_factor = (scaled_radii[neighbors[:,0:1]] + scaled_radii[neighbors[:,1:]])
    else:
        eq_length_factor = (4 * np.pi * (1/3) * (scaled_radii**3).mean())**(1/3) * np.array([[1]]) #???
    pore_space_per_particle = (xmax - eq_length_factor.mean()*num_inclusions_per_dim)/num_inclusions_per_dim
    medimean_eq_length = np.median( eq_length_factor.mean(axis=1))
    porespace_per_dim = num_inclusions_per_dim * medimean_eq_length
    porespace_per_particle  = (porespace_per_dim / (num_inclusions_per_dim - 1))/2
    scaled_radius_diam = scaled_radius_mu*2
    #set spacing
    collision_length_factor = eq_length_factor.copy()# - pore_space_per_particle /2
    eq_length_factor = eq_length_factor + pore_space_per_particle /2
    horizon_factor = eq_length_factor * 1
    # nneighbors = neighbors.shape[1]
    tsteps = np.arange(0,tstepmax)

    zmin = ymin = xmin
    zmax = ymax = xmax

    (pts.T[-1]-np.mod(pts.T[-1],xmax)).max()
    # pts.T[-1][ (pts.T[-1] - radii.T)>xmax]
    # (p - radii.T)>xmax
    (pts.T[-1]+radii_scaled > xmax).sum()
    ((pts**2).sum(axis=1)+radii_scaled**2 > xmax**2).sum()




    cond, conds = where_boundary_intersect(parameters,pts,radii_scaled)
    conds
    # (np.linalg.norm(pts,2,axis=1)+radii_scaled > xmax).sum()
    # pts.min()



    flag=0
    # while flag==0:
    pts, radii_scaled,flag = get_reflected_pts(pts,radii_scaled,xmin,xmax)
    print(radii_scaled.shape)
    nsamples = radii_scaled.shape[0]
    assert(radii_scaled.shape[0] == pts.shape[0])


    ''' Detect Collisions and Translate Spheres '''
    registered = []
    unregistered = [i for i in range(nsamples)]
    boundary = []
    t_list = []
    tlast = time.time()
    for i,(x,r) in enumerate(zip( pts , radii_scaled)):
        if(i%1000==0):
            print(i,x,r, time.time())
        if(i==0):
            registered.append(i)
            unregistered.remove(i)
            pts[i] = x
            radii_scaled[i] = r
        else:
            x,reinit_flag = overlap_correction(i, x, r, pts, radii_scaled, kdt, registered, unregistered,dmax, search_radius_factor_of_max_diameter, pnorm=2, eps=kdt_eps)
            if(reinit_flag==1):
                break;
            registered.append(i)
            unregistered.remove(i)
            pts[i] = x
            radii_scaled[i] = r

        t_list.append(time.time() - tlast)
        tlast = time.time()

    if(reinit_flag==1):
        print(" \n Reinitializing Simulation \n")
        return Run_Correlated_Sphere_Packing(seed_increment+1)
    else:
        print("\n No collisions found, continuing.. \n")
    t_list = np.array(t_list)

    registered = np.array(registered)

    pvolumes = radii_scaled**3 * 4 * np.pi / 3 if ndimensions==3 else radii_scaled**2 * np.pi 

    return parameters, radii_scaled, registered, unregistered, pts, pvolumes
Ejemplo n.º 15
0
 def setUp(self):
     test_small.setUp(self)
     self.kdtree = PeriodicCKDTree(self.bounds, self.data, leafsize=1)
Ejemplo n.º 16
0
n = 10000
r = 1000

bounds = np.ones(m)
data = np.concatenate(
    (np.random.randn(n // 2, m), np.random.randn(n - n // 2, m) + np.ones(m)))
queries = np.concatenate(
    (np.random.randn(r // 2, m), np.random.randn(r - r // 2, m) + np.ones(m)))

print "dimension %d, %d points" % (m, n)

t = time.time()
T1 = PeriodicKDTree(bounds, data)
print "PeriodicKDTree constructed:\t%g" % (time.time() - t)
t = time.time()
T2 = PeriodicCKDTree(bounds, data)
print "PeriodicCKDTree constructed:\t%g" % (time.time() - t)

t = time.time()
w = T1.query(queries)
print "PeriodicKDTree %d lookups:\t%g" % (r, time.time() - t)
del w

t = time.time()
w = T2.query(queries)
print "PeriodicCKDTree %d lookups:\t%g" % (r, time.time() - t)
del w

T3 = PeriodicCKDTree(bounds, data, leafsize=n)
t = time.time()
w = T3.query(queries)
Ejemplo n.º 17
0
    def delta_and_sigma_vz_galaxy(self, array_files=None, dictionary=False):
        """
        Calculates the density profile and velocity dispersion of voids in real space.
        Requires xi_vg_real_func() to be run first as this gives the upper and lower bounds
        for the radius array to avoid out of bounds for splines.
        """
        #radius_array = np.linspace(0, self.r_corr[-1], self.N + 1)
        radius_array = np.linspace(1, 200, self.N + 1)
        if array_files == None:
            bounds = np.array([self.box_size, self.box_size, self.box_size])
            tree = PeriodicCKDTree(bounds, self.galaxy_cat)

            delta = np.zeros(self.N + 1)
            v_z = np.zeros(self.N + 1)
            E_vz = np.zeros(self.N + 1)
            E_vz2 = np.zeros(self.N + 1)
            sigma_vz = np.zeros(self.N + 1)
            galaxies_in_shell_arr = np.zeros(self.N + 1)

            print "Starting density profile and velocity dispersion calculation"
            for i in range(len(self.void_cat[:, 0])):
                current_number_of_galaxies = 0
                current_E_vz = 0
                current_E_vz2 = 0
                E_vz_in_shell = 0
                E_vz2_in_shell = 0

                for j in range(1, self.N + 1):
                    # Find galaxy position and velocity in a given radius around the current void
                    neighbor_inds = tree.query_ball_point(self.void_cat[i, :],
                                                          r=radius_array[j])
                    shell_volume = 4.0 * np.pi * (radius_array[j]**3 -
                                                  radius_array[j - 1]**3) / 3.0
                    velocity_near_point = self.galaxy_vz[neighbor_inds]
                    galaxies_near_point = self.galaxy_cat[neighbor_inds]
                    galaxies_near_point = len(galaxies_near_point[:, 0])
                    galaxies_in_shell = galaxies_near_point - current_number_of_galaxies  # Subtracting previous sphere to get galaxies in current shell.

                    # calulcating terms used in expectation values E[v_z**2] and E[v_z]**2
                    if galaxies_near_point > 0:
                        E_vz2_in_shell = (sum(velocity_near_point**2) -
                                          current_E_vz2)
                        E_vz_in_shell = (sum(velocity_near_point) -
                                         current_E_vz)

                    galaxies_in_shell_arr[j] += galaxies_in_shell

                    E_vz[j] += E_vz_in_shell
                    E_vz2[j] += E_vz2_in_shell
                    delta[j] += galaxies_in_shell / shell_volume

                    current_E_vz += E_vz_in_shell
                    current_E_vz2 += E_vz2_in_shell
                    current_number_of_galaxies += galaxies_in_shell

            delta /= (len(self.void_cat[:, 0]) * len(self.galaxy_cat[:, 0]) /
                      self.box_size**3)
            delta -= 1
            for j in range(self.N + 1):
                if galaxies_in_shell_arr[j] > 0:
                    E_vz[j] /= galaxies_in_shell_arr[j]
                    E_vz2[j] /= galaxies_in_shell_arr[j]
            sigma_vz = np.sqrt(E_vz2 - E_vz**2)

            # Replacing zero values to avoid division by zero later
            sigma_vz[np.where(sigma_vz < 10.0)] = 100.0

            if dictionary:
                #Output for victor code
                r_dict = np.linspace(2.11, 118.0, 30)
                sigma_vz_spline = interpolate.interp1d(radius_array, sigma_vz)
                delta_spline = interpolate.interp1d(radius_array, delta)

                delta_new = delta_spline(r_dict)
                sigma_vz_new = sigma_vz_spline(r_dict)
                vr_dict = {}
                vr_dict["rvals"] = r_dict
                vr_dict["sigma_v_los"] = sigma_vz_new
                np.save(
                    "datafiles/velocity_profiles/sigma_vz_dict" + self.handle,
                    vr_dict)

                delta_dict = {}
                delta_dict["rvals"] = r_dict
                delta_dict["delta"] = delta_new
                np.save("datafiles/density_profiles/delta_dict" + self.handle,
                        delta_dict)

            fig, ax = plt.subplots()
            ax.plot(radius_array, delta)
            fig.savefig("delta_test.png")
            fig, ax = plt.subplots()
            ax.plot(radius_array, sigma_vz)
            fig.savefig("sigmavz_test.png")
            print len(delta)
            np.save("datafiles/density_profiles/delta" + self.handle, delta)
            np.save("datafiles/velocity_profiles/sigma_vz" + self.handle,
                    sigma_vz)
        else:

            delta = np.load(array_files[0])
            sigma_vz = np.load(array_files[1])
            fig, ax = plt.subplots()
            ax.plot(radius_array, delta)
            fig.savefig("delta_test.png")
            fig, ax = plt.subplots()
            ax.plot(radius_array, sigma_vz)
            fig.savefig("sigmavz_test.png")

        print "Splining density profile"
        print len(radius_array), len(delta)
        self.delta = interpolate.interp1d(radius_array, delta, kind="cubic")
        self.sigma_vz = interpolate.interp1d(radius_array,
                                             sigma_vz,
                                             kind="cubic")

        return self.delta, self.sigma_vz
delta = dcell - 2*rmax
search_radius = 2*dmax#+delta


    
'''sample isotropic gaussian correlation'''
Cnorm = np.random.multivariate_normal(Cmu,Csig,nsamples)#,[nsamples,ndimensions])

'''sample points'''
pts = np.random.uniform(xmin,xmax,[nsamples,ndimensions])

'''get nearest neighbor distances'''
t0 = time.time()

if(periodic_geometry==True):
    kdt = PeriodicCKDTree(periodic_bounds, pts)
else:
    kdt = scipy.spatial.KDTree(pts,leafsize=set_leafsize)
if(find_all_neighbors==True):
    dist,neighbors = kdt.query(pts, k=num_neighbors, eps=kdt_eps, p = pnorm)
    
    print("NNE time " , time.time()-t0)

    '''sort by func(distances) eg sum'''
    distsum = (dist[:,1:].sum(axis=1))
    distmean = (dist[:,1:].mean(axis=1))
    distmedian = np.median(dist[:,1:],axis=1)
    distmin = (dist[:,1:].min(axis=1))
    distmax = (dist[:,1:].max(axis=1))

    isort_pts = np.argsort(distmedian)
Ejemplo n.º 19
0
xmax = np.max(x[:, 0])
dx = xmax - xmin
ymin = np.min(x[:, 1])
ymax = np.max(x[:, 1])
dy = ymax - ymin
zmin = np.min(x[:, 2])
zmax = np.max(x[:, 2])
dz = zmax - zmin
s = (len(x), 10)
Nlist = np.zeros(s, dtype=np.int)
# Boundaries (0 or negative means open boundaries in that dimension)
#changing bounds manually
bounds = np.array([dx, dy, dz])  # xy periodic, open along z

# Build kd-tree
T = PeriodicCKDTree(bounds, x)

# Find 4 closest neighbors to a random point
# (d[j], i[j]) = distance and index of jth closest point
# Find neighbors within a fixed distance of a point
print "Building Neighborlist..."

neighbors = []
for i in xrange(len(x)):
    localneigh = T.query_ball_point(
        x[i], r=2.1)  #r = cutoff (Angstrom) for making Nlist
    #localneigh.insert(0,i)
    localneigh.remove(i)
    localneigh.insert(0, i)
    neighbors.append(localneigh)
Ejemplo n.º 20
0
 def setUp(self):
     self.data = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
                           [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
     self.bounds = 1.1 * np.ones(3)
     self.kdtree = PeriodicCKDTree(self.bounds, self.data)
def Run_Correlated_Sphere_Packing(input_parameters_filename="Parameters.in",
                                  seed_increment=0,
                                  seed=None,
                                  periodic_geometry=None,
                                  nsamples=None,
                                  target_porosity=None):
    parameters = read_input_parameters(input_parameters_filename)
    reinit_flag = 0
    '''debugging, ignore this'''
    print(','.join(np.array([name for name in parameters.keys()])))

    try:
        if (seed is not None):
            parameters['seed'] = seed
    except Exception as e:
        print('no seed specified')

    try:
        if (periodic_geometry is not None):
            parameters['periodic_geometry'] = periodic_geometry
    except Exception as e:
        print('no periodicv geometry specified')

    try:
        if (nsamples is not None):
            parameters['nsamples'] = nsamples
    except Exception as e:
        print('no nsampels specified')

    try:
        if (target_porosity is not None):
            parameters['target_porosity'] = target_porosity
    except Exception as e:
        print('no porosity specified')
    '''increment in case of reinitialization'''
    print(" seed_increment ", seed_increment, " type ", type(seed_increment))
    parameters['seed'] += seed_increment
    '''use input parameters'''
    periodic_geometry,ndimensions,xmin,xmax,ymin,ymax,zmin,zmax,seed,radius_mu,radius_sig2,Cmu,Csig,filename,radii_dist,nsamples,target_porosity,nbins,num_neighbors,set_leafsize_factor,kdt_eps,pnorm,search_radius_factor_of_max_diameter,find_all_neighbors,percentilemin,percentilemax,show_hist,set_dt,blfac,damping,tstep,tprint,tstepmax,force_abstol,fmin,force_absmax_factor,force_reltol,max_overlap_factor,set_leafsize_factor \
    =parameter_values(parameters, *parameters.values())
    #     print(xmin,xmax)
    RandomState = np.random.RandomState(seed)
    '''form uniform hexahedral mesh'''
    ncells = nsamples
    xx = np.linspace(xmin, xmax, ncells + 1)
    dcell = np.diff(xx)[0]
    domain_volume = (xmax - xmin) * (ymax - ymin) * (zmax - zmin)

    periodic_bounds = np.array([xmax, ymax, zmax])[:ndimensions]

    #PART I
    '''sample radii'''
    # #sample radii
    # while target_porosity < v0:
    # v0 = (radii**3 * 4*np.pi/(3 * (xmax-xmin)**3))
    # v0 = v0/v0.sum()
    if (radii_dist == 'lognormal'):
        Z = RandomState.lognormal(np.log(radius_mu), radius_sig2, nsamples)
        radii = np.exp(Z)

    if (show_hist == True):
        plt.hist(radii, nbins)
        plt.savefig('hist.png')

    pmin = np.percentile(radii, percentilemin),
    pmax = np.percentile(radii, percentilemax)
    radii = radii[radii < pmax]
    radii = radii[radii > pmin]
    nsamplesclip = nsamples - radii.shape[0]
    nsamples = radii.shape[0]  # - nsamplesclip
    radii = np.sort(radii)

    print('1,99 percentiles ', pmin, pmax)
    print('max, min, mean', radii.max(), radii.min(), radii.mean())
    '''rescale radii to obtain desired porosity'''
    rscale = ((np.sum(4 * np.pi * (1 / 3.) * radii**3)) /
              (domain_volume * (1 - target_porosity)))**(1 / 3)
    radii_scaled = radii / rscale
    radius_mu_scaled = radius_mu / rscale
    radius_sig2_scaled = radius_sig2 / rscale**2
    rmax = (radii_scaled).max()
    dmax = 2 * rmax
    delta = dcell - 2 * rmax
    search_radius = 2 * dmax  #+delta

    #TBD
    '''sample isotropic gaussian correlation'''
    Cnorm = RandomState.multivariate_normal(
        Cmu, Csig, nsamples)  #,[nsamples,ndimensions])
    '''sample points uniformly in space '''
    pts = RandomState.uniform(0, 1, [nsamples, ndimensions])
    pts[:, 0] = (pts[:, 0]) * (xmax - xmin) + xmin
    pts[:, 1] = (pts[:, 1]) * (ymax - ymin) + ymin
    pts[:, 2] = (pts[:, 2]) * (zmax - zmin) + zmin
    '''sort radii and points '''
    radii_scaled = radii_scaled[::-1]
    pts = pts[::-1, :]
    '''get nearest neighbor distances'''
    t0 = time.time()

    if (periodic_geometry == True):
        kdt = PeriodicCKDTree(periodic_bounds, pts)
    else:
        kdt = scipy.spatial.KDTree(
            pts)  #,leafsize=set_leafsize_factor * num_neighbors )
    if (find_all_neighbors == True):
        dist, neighbors = kdt.query(pts, k=num_neighbors, eps=kdt_eps, p=pnorm)

        print("NNE time ", time.time() - t0)
        '''sort by func(distances) eg sum'''
        distsum = (dist[:, 1:].sum(axis=1))
        distmean = (dist[:, 1:].mean(axis=1))
        distmedian = np.median(dist[:, 1:], axis=1)
        distmin = (dist[:, 1:].min(axis=1))
        distmax = (dist[:, 1:].max(axis=1))

        isort_pts = np.argsort(distmedian)
        isort_radii = np.argsort(isort_pts)

        sorted_radii = radii[isort_radii].copy()
        edges = from_neighbors_to_edges(neighbors)[0]
    '''overlap'''
    max_overlap = radius_mu_scaled * max_overlap_factor
    #find neighbors I interesect (in 3/9/27 cells), use centers and radii to move away by dx if ||dx||<overlap_max
    '''BC, EQ separation, pore throat size, collision distances, PD'''
    boundary_layer = [blfac, xmax * (1 - blfac)]
    print("BOUINDARY LAYER", boundary_layer)
    iboundary = np.any(((pts < boundary_layer[0]).astype(int) +
                        (pts > boundary_layer[1]).astype(int)),
                       axis=1)
    iinterior = np.all((pts > boundary_layer[0]).astype(int) *
                       (pts < boundary_layer[1]).astype(int),
                       axis=1)
    print("IBOUNDARY ", iboundary)
    print("IINTERIOR ", iinterior)
    print('\n \n \n ', " NUMBER OF BOUNDARY SPHERES ", iboundary.sum(),
          " TOTAL CONSIDERED ",
          iboundary.shape, '\n \n \n ', "NUMBER OF INTERIOR SPHERES",
          iinterior.sum(), " TOTAL CONSIDERED ", iinterior.shape, '\n \n \n ')
    # ,iboundary.sum()
    ''' Detect Collisions and Translate Spheres '''
    registered = []
    unregistered = [i for i in range(nsamples)]
    boundary = []
    t_list = []
    tlast = time.time()
    for i, (x, r) in enumerate(zip(pts, radii_scaled)):
        if (i % 1000 == 0):
            print(i, x, r, time.time())
        if (i == 0):
            registered.append(i)
            unregistered.remove(i)
            pts[i] = x
            radii_scaled[i] = r
        else:
            x, reinit_flag = overlap_correction(
                i,
                x,
                r,
                pts,
                radii_scaled,
                kdt,
                registered,
                unregistered,
                dmax,
                search_radius_factor_of_max_diameter,
                pnorm=2,
                eps=kdt_eps)
            if (reinit_flag == 1):
                break
            registered.append(i)
            unregistered.remove(i)
            pts[i] = x
            radii_scaled[i] = r

        t_list.append(time.time() - tlast)
        tlast = time.time()

    if (reinit_flag == 1):
        print(" \n Reinitializing Simulation \n")
        return Run_Correlated_Sphere_Packing(seed_increment + 1)
    else:
        print("\n No collisions found, continuing.. \n")
    t_list = np.array(t_list)

    registered = np.array(registered)

    print("STORE BOUNDARY SPHERE DATA")
    indices_boundary = np.where(iboundary)
    print("STORE INTERIOR SPHERE DATA")
    indices_interior = np.where(iinterior)

    interior_points = pts[indices_interior]
    boundary_points = pts[indices_boundary]

    print("SET POINT IDs (to keep track of boundary image spheres)")
    idx_points = np.arange(0, len(registered))

    boundary, boundary_indices, boundary_radii = [], [], [
    ]  #np.array([]), np.array([]), np.array([])
    if (periodic_geometry == 1):
        print(
            "COPY BOUNDARY POINTS TO IMAGE SPHERES ACROSS PERIODIC BOUNDARIES")
        print("NUM POINTS BEFORE BOUNDARY IMAGE COPY", pts.shape)
        flag = 0
        # while flag==0:
        # pts, radii_scaled,idx_points, flag = get_reflected_pts(pts,idx_points, radii_scaled,xmin,xmax)

        pts, radii_scaled, idx_points, boundary, boundary_indices, boundary_radii, flag = get_reflected_pts(
            pts, idx_points, boundary, boundary_indices, boundary_radii,
            radii_scaled, xmin, xmax)
        print(radii_scaled.shape)
        nsamples = radii_scaled.shape[0]
        assert (radii_scaled.shape[0] == pts.shape[0])
        print("NUM POINTS AFTER ", pts.shape)

    pvolumes = radii_scaled**3 * 4 * np.pi / 3 if ndimensions == 3 else radii_scaled**2 * np.pi
    porosity = 1 - (domain_volume - pvolumes.sum()) / domain_volume

    print("\n domain size ", " [xmin,xmax] ", xmin, xmax, " [ymin,ymax] ",
          ymin, ymax, " [zmin,zmax] ", zmin, zmax, " volume ", domain_volume)
    print('particle volumes: sum, mean, median, min, max', pvolumes.sum(),
          pvolumes.mean(), np.median(pvolumes), pvolumes.min(), pvolumes.max())
    print("\n \n \n porosity ", porosity)
    print("\n number of spheres ", registered.shape)
    print("\n number of registered spheres ", registered.shape)
    print("\n number of unregistered spheres ", registered.shape)
    print("\n sphere distribution parameters ", radius_mu, radius_sig2)
    print("\n mean coordination number ", )
    print("\n \n \n ")
    return parameters, radii_scaled, registered, unregistered, pts, pvolumes, idx_points, boundary, boundary_indices, boundary_radii
Ejemplo n.º 22
0
def event_local_atom_index(initial_config_data, triggered_atom_list, num_of_involved_atom, path_to_init_sad, box_dim, save_results = True, re_calc = False):
	"""
	this function get the local atom atom_ids as a list of lists (that will be flattern into a single list)
	from the triggered atoms (whose item_ids in triggered_atom_list) item_ids
	and num_of_involved atoms NN
	For example, each trigger atom has k th NN. Then for x triggered atoms,
	it would be a x element list of lists (each list with k elements). 
	It will be flattern into a list with k*N, that will be returned 
	
	As in NN_finder_all, PeriodicCkDtree can be easily extended to
	find kth NN for each of the triggered atoms.
	
	Currently, only one atom is triggered,
	In this case, triggered_atom_list is a single element list
	
	return: 
	triggered_atoms_NN_list: a list 
		comes from a flatten numpy array shape (len(triggered_atom_list)*k, )
		a numpy contains all NN of all triggered atoms preserving the order
		of the appearance of triggered atom in triggered_atom_list
		e.g. 1st k elements in triggered_atoms_NN_list belongs to the 1st
		element of triggered_atom_list, etc
	"""
	
	path_to_local_nn_results = path_to_init_sad + "/local_nn_results_dict.pkl"
	print "the triggering atoms are:", triggered_atom_list
	if re_calc is False:
		if os.path.exists(path_to_local_nn_results):
			print "local nn results already calculated and saved in local_nn_results_dict.pkl file, skip calculation"
			local_nn = pickle.load(open(path_to_local_nn_results,'r'))
			return (np.array(local_nn.values()).flatten()).tolist()
	local_nn = dict()
	
	if triggered_atom_list is None:
		raise Exception("try to calculate the NN for triggered atoms, but no triggered atom has been specified")
	
	_data = initial_config_data
	
	_interested_data = _data.loc[_data['item'].isin(triggered_atom_list)]
	
	result_tree = PeriodicCKDTree(box_dim, _data[['x','y','z']].values)
	# the query method calculated NN includes the triggered atom itself as the 1st NN with distance 0
	distances,locations = result_tree.query(_interested_data[['x','y','z']].values, num_of_involved_atom)
	
	if len(triggered_atom_list) > 1 and num_of_involved_atom >1:
		# the 1st element in the return numpy array with shape (k*len(triggered_atom_list),) 
		# is the triggered atom since the distance to itself is 0, which is the minumum
		# locations are ordered in terms of their increasing distance to the triggered atom
		k=0
		for index,row in _interested_data.iterrows():
			NN_array = np.array((_data.iloc[locations[k]])['item'])
			local_nn[row['item']]= (NN_array).tolist()
			k=k+1
		loc_index = locations.flatten()
		final_locations = np.array((_data.iloc[loc_index])['item']).tolist()
	elif len(triggered_atom_list) == 1:
		
		if type(locations) == int or type(locations) == float:
			loc_index = np.array([locations])
		else:
			loc_index = locations.flatten()
		locations = np.array((_data.iloc[loc_index])['item']).tolist()
		local_nn[triggered_atom_list[0]] = locations
		final_locations = locations
	else:
		# len(triggered_atom_list) >1 and num_of_involved_atom == 1:
		for x in triggered_atom_list:
			local_nn[x] = [x]
		final_locations = triggered_atom_list
	if save_results is True:
		with open(path_to_local_nn_results, 'w') as f:
			pickle.dump(local_nn,f)
			f.close()
	return final_locations