Beispiel #1
0
def get_rdf_A_B(types, atoms, listTypeName, listTypeNum, rMax, nBins = 200):
    #get index of element types of interest
    typeIndex=[]
    for iType in types:
        for j in range(len(listTypeName)):
            if iType == listTypeName[j]:
                typeIndex.append(j)
    typeAStart = 0
    typeAEnd = 0
    for i in range(len(listTypeNum)):
        if i < typeIndex[0]:
            typeAStart += listTypeNum[i]
    typeAEnd = typeAStart + listTypeNum[typeIndex[0]]
    #print(types[0],typeAStart,typeAEnd)
    typeBStart = 0
    typeBEnd = 0
    for i in range(len(listTypeNum)):
        if i < typeIndex[1]:
            typeBStart += listTypeNum[i]
    typeBEnd = typeBStart + listTypeNum[typeIndex[1]]
    #print(types[1],typeBStart,typeBEnd)

    atoms_A = atoms[typeAStart:typeAEnd]
    atoms_B = atoms[typeBStart:typeBEnd]
    atoms_new = atoms_A + atoms_B
    d = neighborlist.neighbor_list('d', atoms_new, rMax)
    dr = rMax/nBins
    edges = np.arange(0., rMax + 1.1 *dr, dr)
    h, binEdges = np.histogram(d, edges)
    rho = len(atoms_new) / atoms.get_volume() 
    factor = 4./3. * np.pi * rho * len(atoms_new)
    rdf = h / (factor * (binEdges[1:]**3 - binEdges[:-1]**3)) 

    plt.plot(binEdges[1:], rdf)
    plt.savefig('RDFADF/rdf_'+types[0]+'_'+types[1]+'.pdf')
    plt.close()

    peaks = (np.diff(np.sign(np.diff(rdf))) < 0).nonzero()[0] + 1 # local max
    firstPeakInd = np.argmax(rdf[peaks])
    firstPeak = binEdges[peaks[firstPeakInd]]
    #peaks_2 = np.delete(peaks, firstPeakInd)
    #secondPeakInd = np.argmax(rdf[peaks_2])
    #secondPeak = binEdges[peaks_2[secondPeakInd]]
    print("first peak of rdf: %12.8f" % firstPeak)
    #print("second peak of rdf: %12.8f" % secondPeak)
    #cutoff = (firstPeak + secondPeak)/2.0
    cutoff = firstPeak*1.2
    print("first NN cutoff set to: %12.8f" % cutoff)

    #calculate CN
    NBList = neighborlist.neighbor_list('ij', atoms_new, cutoff)
    nnn = np.bincount(NBList[0]) #number of nearesr neighbors
    typeA_CN = np.mean(nnn[:len(atoms_A)])
    typeB_CN = np.mean(nnn[len(atoms_A):])

    print("CN of %s : %8.6f" % (types[0], typeA_CN))
    print("CN of %s : %8.6f" % (types[1], typeB_CN))
    return(cutoff)
Beispiel #2
0
    def water_O_idx(self):
        # guess the o index of water
        i = neighbor_list('i', self.poses[0], {('O', 'H'): 1.3})
        j = neighbor_list('j', self.poses[0], {('O', 'H'): 1.3})
        cn = np.bincount(i)

        H2O_pair_list = []
        Ow_idx = np.where(cn == 2)[0]
        np.savetxt(os.path.join(os.path.dirname(self.xyz_file), "Ow_idx.dat"),
                   Ow_idx,
                   fmt='%d')
        return Ow_idx
def test_single_site_crystal_large_cutoff(crystal, ase_env, torch_env):
    cutoff = 2.0
    ase_env.cutoff = cutoff
    torch_env.cutoff = cutoff

    idx_i, idx_j, idx_S, dist = neighbor_list("ijSd",
                                              crystal,
                                              ase_env.cutoff,
                                              self_interaction=False)

    nbh_ase, offsets_ase = ase_env.get_environment(crystal)
    nbh_torch, offsets_torch = torch_env.get_environment(crystal)

    # get number of neighbors from index vector
    n_nbh = (np.unique(
        np.hstack((idx_i, np.arange(crystal.get_global_number_of_atoms()))),
        return_counts=True,
    )[1] - 1)

    # get number of neighbors from nbh matrix
    n_nbh_ase_env = np.sum(nbh_ase >= 0, 1)
    n_nbh_torch_env = np.sum(nbh_torch >= 0, 1)

    # Compare the returned indices
    nbh_ref = idx_j.reshape(crystal.get_global_number_of_atoms(), -1)
    sorted_nbh_ref = np.sort(nbh_ref, axis=-1)
    sorted_nbh_ase = np.sort(nbh_ase, axis=-1)
    sorted_nbh_torch = np.sort(nbh_torch, axis=-1)

    assert n_nbh.shape == n_nbh_ase_env.shape == n_nbh_torch_env.shape
    assert np.allclose(n_nbh, n_nbh_ase_env)
    assert np.allclose(n_nbh, n_nbh_torch_env)
    assert np.allclose(sorted_nbh_ref, sorted_nbh_ase)
    assert np.allclose(sorted_nbh_ref, sorted_nbh_torch)
    assert np.allclose(sorted_nbh_ase, sorted_nbh_torch)
Beispiel #4
0
def test_clist_nl():
    """Cell list neighbor test
    Compare with ASE implementation
    """
    from ase.build import bulk
    from ase.neighborlist import neighbor_list
    from pinn.layers import cell_list_nl

    to_test = [bulk('Cu'), bulk('Mg'), bulk('Fe')]
    ind, coord, cell = [], [], []
    for i, a in enumerate(to_test):
        ind.append([[i]] * len(a))
        coord.append(a.positions)
        cell.append(a.cell)

    with tf.Graph().as_default():
        tensors = {
            'ind_1': tf.constant(np.concatenate(ind, axis=0), tf.int32),
            'coord': tf.constant(np.concatenate(coord, axis=0), tf.float32),
            'cell': tf.constant(np.stack(cell, axis=0), tf.float32)
        }
        nl = cell_list_nl(tensors, rc=10)
        with tf.Session() as sess:
            dist_pinn = sess.run(nl['dist'])

    dist_ase = []
    for a in to_test:
        dist_ase.append(neighbor_list('d', a, 10))
    dist_ase = np.concatenate(dist_ase, 0)
    assert np.all(np.sort(dist_ase) - np.sort(dist_pinn) < 1e-4)
Beispiel #5
0
    def get_environment(self, atoms, grid=None):
        if grid is not None:
            raise NotImplementedError

        n_atoms = atoms.get_global_number_of_atoms()
        idx_i, idx_j, idx_S = neighbor_list(
            "ijS", atoms, self.cutoff, self_interaction=False
        )
        if idx_i.shape[0] > 0:
            uidx, n_nbh = np.unique(idx_i, return_counts=True)
            n_max_nbh = np.max(n_nbh)

            n_nbh = np.tile(n_nbh[:, np.newaxis], (1, n_max_nbh))
            nbh_range = np.tile(
                np.arange(n_max_nbh, dtype=np.int)[np.newaxis], (n_nbh.shape[0], 1)
            )

            mask = np.zeros((n_atoms, np.max(n_max_nbh)), dtype=np.bool)
            mask[uidx, :] = nbh_range < n_nbh
            neighborhood_idx = -np.ones((n_atoms, np.max(n_max_nbh)), dtype=np.float32)
            neighborhood_idx[mask] = idx_j

            offset = np.zeros((n_atoms, np.max(n_max_nbh), 3), dtype=np.float32)
            offset[mask] = idx_S
        else:
            neighborhood_idx = -np.ones((n_atoms, 1), dtype=np.float32)
            offset = np.zeros((n_atoms, 1, 3), dtype=np.float32)

        return neighborhood_idx, offset
Beispiel #6
0
 def _get_all_bonds(self):
     bonds_all = {}
     group1 = []
     group2 = []
     for potential in self.bond_potentials:
         for bond, params in self.potentials[potential].items():
             if bond in bonds_all:
                 raise Exception("Error: Bond names have to be unique!")
             group1 = [
                 a.index for a in self.atoms
                 if a.symbol in params["creation"]["group1"]
             ]
             group2 = [
                 a.index for a in self.atoms
                 if a.symbol in params["creation"]["group2"]
             ]
             neighbors = neighbor_list("ijd", self.atoms,
                                       params["creation"]["distance"][1])
             distances = params["creation"]["distance"]
             bonds = self._generate_bonds(group1, group2, neighbors,
                                          distances[0], distances[1])
             bonds_all[bond] = bonds
     print bonds_all.keys(), len(bonds_all.values()[0]), len(
         bonds_all.values()[1])
     return bonds_all
Beispiel #7
0
def get_lists(atoms):

    # build neighbor list
    nl = []
    for i in range(len(atoms)):
        nl.append([])

    tokens_i, tokens_j = neighbor_list('ij', atoms, cutoff_table)
    for i in range(len(tokens_i)):
        nl[tokens_i[i]].append(tokens_j[i])

    o = open('bonds.dat', 'w')
    # build bond list
    bond_list = []
    for i in range(len(nl)):
        if len(nl[i]) > 0:
            ai = i
            for j in range(len(nl[i])):
                aj = nl[i][j]
                if ai < aj:
                    bond_length = atoms.get_distance(ai, aj, mic=True)
                    bond_length = bond_length / 10.0
                    bond_list.append([ai, aj, bond_length])
                    o.write('%d %d %.4f\n' % (ai, aj, bond_length))
    o.close()
    n_bond = len(bond_list)
    print(n_bond, "bond terms")

    o = open('angles.dat', 'w')
    angle_list = []
    # build angle list
    for i in range(len(nl)):
        if len(nl[i]) > 1:
            aj = i
            for j in range(len(nl[i])):
                ai = nl[i][j]
                for k in nl[i][j + 1:]:
                    ak = k
                    angle = atoms.get_angle(ai, aj, ak, mic=True)
                    angle_list.append([ai, aj, ak, angle])
                    o.write('%d %d %d %.4f\n' % (ai, aj, ak, angle))
    o.close()
    n_angle = len(angle_list)
    print(n_angle, "angle terms")

    dihedral_list = []
    # build dihedral ist
    for i in bond_list:
        dj, dk = i[0], i[1]
        for j in nl[dj]:
            if j != dk:
                di = j
                for k in nl[dk]:
                    if k != dj:
                        dl = k
                        angle = atoms.get_dihedral(di, dj, dk, dl, mic=True)
                        dihedral_list.append([di, dj, dk, dl, angle])
    n_dihedral = len(dihedral_list)
    print(n_dihedral, "diheral terms")
    return nl, bond_list, angle_list, dihedral_list
Beispiel #8
0
    def update_nbr_list(self):
        """Update neighbor list and the periodic reindexing
            for the given Atoms object.

        Args:
            cutoff (float): maximum cutoff for which atoms are
                considered interacting.
        Returns:
            nbr_list (torch.LongTensor)
            offsets (torch.Tensor)
            nxyz (torch.Tensor)
        """

        if self.nbr_torch:
            edge_from, edge_to, offsets = torch_nbr_list(self, self.cutoff, device=self.device, directed=self.directed)
            nbr_list = torch.LongTensor(np.stack([edge_from, edge_to], axis=1))
        else:
            self.wrap()
            edge_from, edge_to, offsets = neighbor_list('ijS', self, self.cutoff) 
            nbr_list = torch.LongTensor(np.stack([edge_from, edge_to], axis=1))
            offsets = torch.Tensor(offsets)[nbr_list[:, 1] > nbr_list[:, 0]].detach().cpu().numpy()
            nbr_list = nbr_list[nbr_list[:, 1] > nbr_list[:, 0]]

        # torch.sparse has no storage yet.
        #offsets = offsets.dot(self.get_cell())
        offsets = sparsify_array(offsets.dot(self.get_cell()))

        self.nbr_list = nbr_list
        self.offsets = offsets

        return nbr_list, offsets
Beispiel #9
0
    def calculate(self, atoms=None, properties=['energy'],
                  system_changes=['positions', 'numbers', 'cell',
                                  'pbc', 'charges', 'magmoms']):
        Calculator.calculate(self, atoms, properties, system_changes)
        epsilon = self.parameters.epsilon
        rho0 = self.parameters.rho0
        r0 = self.parameters.r0
        rcut1 = self.parameters.rcut1 * r0
        rcut2 = self.parameters.rcut2 * r0

        forces = np.zeros((len(self.atoms), 3))
        preF = - 2 * epsilon * rho0 / r0

        i, j, d, D = neighbor_list('ijdD', atoms, rcut2)
        dhat = (D / d[:, None]).T

        expf = np.exp(rho0 * (1.0 - d / r0))
        fc = fcut(d, rcut1, rcut2)

        E = epsilon * expf * (expf - 2)
        dE = preF * expf * (expf - 1) * dhat
        energy = 0.5 * (E * fc).sum()

        F = (dE * fc + E * fcut_d(d, rcut1, rcut2) * dhat).T
        for dim in range(3):
            forces[:, dim] = np.bincount(i, weights=F[:, dim],
                                         minlength=len(atoms))

        self.results['energy'] = energy
        self.results['forces'] = forces
Beispiel #10
0
def cluster_size(fname):
    traj = TrajectoryReader(fname)
    sizes = []
    num_neigh = 6
    for atoms in traj:
        first, second = neighbor_list('ij', atoms, cutoff=3.0)
        cluster_size = 0
        n_count = [0 for _ in range(len(first))]
        for f, s in zip(first, second):
            if atoms[f].symbol == 'Mg' and atoms[s].symbol == 'Si':
                n_count[f] += 1
            elif atoms[f].symbol == 'Si' and atoms[s].symbol == 'Mg':
                n_count[f] += 1
        cluster_size = sum(1 for n in n_count if n >= num_neigh)
        sizes.append(cluster_size)

    fig = plt.figure(figsize=(4, 3))
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(sizes)
    ax.set_xlabel("Time")
    ax.set_ylabel("Fraction of solutes in cluster")
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    fig.tight_layout()
    fig.savefig("data/kmc_growth.png", dpi=200)
    plt.show()
def mgsi(initial=None, comment=None):
    if initial is None:
        pureAl = bulk('Al', cubic=True) * (N, N, N)
        pureAl = wrap_and_sort_by_position(pureAl)
        tags, _ = get_layers(pureAl, (0, 0, 1))
        success = False
        while not success:
            print("Generating new initial precipitate")
            initial, success = create_precipitate(pureAl.copy(), tags,
                                                  normal_radius)

    db = dataset.connect(DB_NAME)
    ref_tbl = db[REF]
    vac_tbl = db[VAC]
    sol_tbl = db[SOL]
    comment_tbl = db[COMMENT]

    runID = hex(random.randint(0, 2**32 - 1))
    if comment is not None:
        comment_tbl.insert({'runID': runID, 'comment': comment})

    eci = {}
    with open("data/almgsix_normal_ce.json", 'r') as infile:
        data = json.load(infile)
        eci = data['eci']

    settings = settingsFromJSON("data/settings_almgsiX_voldev.json")
    settings.basis_func_type = "binary_linear"
    atoms = attach_calculator(settings, initial.copy(), eci)
    atoms.numbers = initial.numbers
    ref_energy = atoms.get_potential_energy()
    ref_tbl.insert({'runID': runID, 'energy': ref_energy})

    for atom in atoms:
        if atom.symbol in ['Mg', 'Si']:
            pos = atom.position
            sol_tbl.insert({
                'runID': runID,
                'symbol': atom.symbol,
                'X': pos[0],
                'Y': pos[1],
                'Z': pos[2]
            })

    ref, neighbors = neighbor_list('ij', atoms, 3.0)
    for ref, nb in zip(ref, neighbors):
        if atoms[ref].symbol == 'Al' and atoms[nb].symbol in ['Mg', 'Si']:
            atoms[ref].symbol = 'X'
            e = atoms.get_potential_energy()
            atoms[ref].symbol = 'Al'
            pos = atoms[ref].position
            vac_tbl.insert({
                'runID': runID,
                'X': pos[0],
                'Y': pos[1],
                'Z': pos[2],
                'energy': e
            })
    atoms = removeAl(atoms)
Beispiel #12
0
def save_nb_list(file_in,cutoff):
    atoms = read(file_in)
    nblist=neighborlist.neighbor_list('ijD', atoms, cutoff)
    nnn = np.bincount(nblist[0]) #number of nearesr neighbors
    import matplotlib.pyplot as plt
    plt.hist(nnn, bins='auto')
    plt.savefig('cn_dist.eps')
    return(nnn,nblist[0],nblist[1],nblist[2])
def compute_sorted_distances_with_pbc(feature_parameters, frames,
                                      center_atom_id_mask):
    print("Warning: Sorted distances only works for one species")
    cutoff = feature_parameters["interaction_cutoff"]

    max_neighbors = 0
    for frame_id in range(len(frames)):
        frame = frames[frame_id]
        atom_i = neighborlist.neighbor_list('i', frame, cutoff)
        for atom_id in center_atom_id_mask[frame_id]:
            max_neighbors = max(max_neighbors,
                                np.max(np.sum(atom_i == atom_id)))

    padding_type = feature_parameters["padding_type"]
    if padding_type == "max":
        max_distance = cutoff
        #for frame_id in range(len(frames)):
        #    frame = frames[frame_id]
        #    atom_i, distances = neighborlist.neighbor_list('id', frame, cutoff)
        #    for atom_id in center_atom_id_mask[frame_id]:
        #       max_distance = max(max_distance, np.max(distances[atom_i == atom_id]))
        padding = max_distance
    elif padding_type == "zero":
        padding = 0
    else:
        raise ("Error padding_type " + padding_type + " is not available.")

    sorted_distances = np.ones(
        (sum([len(env_idx)
              for env_idx in center_atom_id_mask]), max_neighbors)) * padding

    k = 0
    for frame_id in range(len(frames)):
        frame = frames[frame_id]
        atom_i, distances = neighborlist.neighbor_list('id', frame, cutoff)
        for atom_id in center_atom_id_mask[frame_id]:
            # solution to extract distances from structure to env assumes atom_i is sorted
            sorted_distances_env = np.sort(distances[atom_i == atom_id])
            sorted_distances[
                k, :len(sorted_distances_env)] = sorted_distances_env
        k += 1
    return sorted_distances
Beispiel #14
0
def compute_rdf_cross(atom, cutoff_=5.0, nbins_=501, sym1_ = "O", sym2_ = "Si"):
    """
    returns auto and cross-RDF for different spcies and bin_edges
    """
    syms = np.asarray(atom.get_chemical_symbols())
    idx_o = np.where(syms == sym1_)[0]
    idx_s = np.where(syms == sym2_)[0]

    bins = np.linspace(0.0, cutoff_ + 2, 501)

    i, j, d, D = neighbor_list('ijdD', atom, cutoff=cutoff_, self_interaction=False)

    rdf_list = []

    _auxset = set(idx_o)
    d_list = []
    for idx in _auxset:
        a = list(j[np.where(i == idx)[0]])
        b = list(d[np.where(i == idx)[0]])
        d_list = d_list + [b[a.index(x)] for x in a if x in _auxset]

    h, bin_edges = np.histogram(d_list, bins)

    rdf = h / len(idx_o)
    rdf_list.append(rdf)

    _auxset = set(idx_s)
    d_list = []
    for idx in _auxset:
        a = list(j[np.where(i == idx)[0]])
        b = list(d[np.where(i == idx)[0]])
        d_list = d_list + [b[a.index(x)] for x in a if x in _auxset]

    h, bin_edges = np.histogram(d_list, bins)

    rdf = h / len(idx_s)
    rdf_list.append(rdf)

    _auxset = set(idx_s)
    d_list = []
    for idx in idx_o:
        a = list(j[np.where(i == idx)[0]])
        b = list(d[np.where(i == idx)[0]])
        d_list = d_list + [b[a.index(x)] for x in a if x in _auxset]

    h, bin_edges = np.histogram(d_list, bins)

    rdf = h / len(idx_o)
    rdf_list.append(rdf)

    return rdf_list, bin_edges
Beispiel #15
0
def get_lists(atoms):

    # build neighbor list
    nl = []
    for i in range(len(atoms)):
        nl.append([])

    tokens_i, tokens_j = neighbor_list('ij', atoms, cutoff_table)
    for i in range(len(tokens_i)):
        nl[tokens_i[i]].append(tokens_j[i])

    # build bond list
    bond_list = []
    for i in range(len(nl)):
        if len(nl[i]) > 0:
            ai = i
            for j in range(len(nl[i])):
                aj = nl[i][j]
                if ai < aj:
                    bond_list.append([ai, aj])
    n_bond = len(bond_list)
    print(n_bond, "bond terms")

    angle_list = []
    # build angle list
    for i in range(len(nl)):
        if len(nl[i]) > 1:
            aj = i
            for j in range(len(nl[i])):
                ai = nl[i][j]
                for k in nl[i][j + 1:]:
                    ak = k
                    angle_list.append([ai, aj, ak])
    n_angle = len(angle_list)
    print(n_angle, "angle terms")

    dihedral_list = []
    # build dihedral ist
    for i in bond_list:
        dj, dk = i[0], i[1]
        for j in nl[dj]:
            if j != dk:
                di = j
                for k in nl[dk]:
                    if k != dj:
                        dl = k
                        dihedral_list.append([di, dj, dk, dl])
    n_dihedral = len(dihedral_list)
    print(n_dihedral, "diheral terms")
    return nl, bond_list, angle_list, dihedral_list
Beispiel #16
0
    def update_atoms_nbr_list(self, cutoff):

        Atoms_list = self.get_list_atoms()

        intra_nbr_list = []
        for i, atoms in enumerate(Atoms_list):
            edge_from, edge_to = neighbor_list('ij', atoms, cutoff)
            nbr_list = torch.LongTensor(np.stack([edge_from, edge_to], axis=1))
            nbr_list = nbr_list[nbr_list[:, 1] > nbr_list[:, 0]]
            intra_nbr_list.append(
                self.props['num_subgraphs'][: i].sum() + nbr_list)

        intra_nbr_list = torch.cat(intra_nbr_list)
        self.atoms_nbr_list = intra_nbr_list
Beispiel #17
0
def covariance_matrix(A, B):
    neighbors_A = neighbor_list('ijDd', A, opt.rc)
    neighbors_B = neighbor_list('ijDd', B, opt.rc)

    i_list = set(neighbors_A[0])
    if opt.nocenter is not None:
        i_list = [i for i in i_list if A[i].number not in opt.nocenter]
    j_list = set(neighbors_B[0])
    if opt.nocenter is not None:
        j_list = [j for j in j_list if B[j].number not in opt.nocenter]
    size = (len(i_list), len(j_list))

    covariance_matrix = np.zeros(size)
    for ei, i in enumerate(i_list):
        environment_i = environment(neighbors_A, i)

        for ej, j in enumerate(j_list):
            environment_j = environment(neighbors_B, j)
            covariance_matrix[ei,
                              ej] = environment_kernel(environment_i,
                                                       environment_j)

    return covariance_matrix
Beispiel #18
0
def compute_rdf(atom, cutoff =5.0, nbins =1001):
    """
    returns RDF and bin_edges
    """
    N = len(atom)

    bins = np.linspace(0.0, cutoff + 2, nbins)

    i, j, d, D = neighbor_list('ijdD', atom, cutoff=cutoff, self_interaction=False)

    h, bin_edges = np.histogram(d, bins)

    rdf = h / N

    return rdf, bin_edges
Beispiel #19
0
def check_atoms_too_close(atoms):
    # (Empty atoms with neighbor_list is buggy in ASE-3.16.0)
    if not len(atoms):
        return

    # Skip test for numpy < 1.13.0 due to absence np.divmod:
    if not hasattr(np, 'divmod'):
        return

    from ase.neighborlist import neighbor_list
    from ase.data import covalent_radii
    radii = covalent_radii[atoms.numbers] * 0.01
    dists = neighbor_list('d', atoms, radii)
    if len(dists):
        raise AtomsTooClose('Atoms are too close, e.g. {} Å'.format(dists[0]))
Beispiel #20
0
def test_single_site_crystal_large_cutoff(crystal, ase_env):
    ase_env.cutoff = 0.7
    idx_i, idx_j, idx_S, dist = neighbor_list('ijSd', crystal, ase_env.cutoff,
                                              self_interaction=False)

    nbh_ase, offsets_ase = ase_env.get_environment(0, crystal)

    # get number of neighbors from index vector
    n_nbh = np.unique(np.hstack((idx_i, np.arange(crystal.get_number_of_atoms()))), return_counts=True)[1]-1

    # get number of neighbors from nbh matrix
    n_nbh_env = np.sum(nbh_ase >= 0, 1)

    assert n_nbh.shape == n_nbh_env.shape
    assert np.allclose(n_nbh, n_nbh_env)
def nl_cutoff_cov_vdw(sim_box, cut_off, vdw_cut_off=1.0, cov_cut_off=1.0):
    overlap_vdwr_sphere = np.array([vdw_radii[atomic_numbers[x]]
                                    for x in sim_box.get_chemical_symbols()])
    overlap_covr_sphere = np.array([covalent_radii[atomic_numbers[x]]
                                    for x in sim_box.get_chemical_symbols()])
    overlap_sphere = cut_off * \
        (vdw_cut_off * overlap_vdwr_sphere +
         cov_cut_off * overlap_covr_sphere)/2

    # for atoms without vdwr
    overlap_sphere[np.isnan(overlap_vdwr_sphere)
                   ] = cut_off * cov_cut_off * overlap_covr_sphere[np.isnan(overlap_vdwr_sphere)]

    i, j = neighbor_list('ij', sim_box, overlap_sphere, self_interaction=False)
    return i, j
Beispiel #22
0
def get_nij_and_nijk(atoms, rc, angular=False):
    ilist, jlist = neighbor_list('ij', atoms, cutoff=rc)
    nij = len(ilist)
    if angular:
        nl = {}
        for i, atomi in enumerate(ilist):
            if atomi not in nl:
                nl[atomi] = []
            nl[atomi].append(jlist[i])
        nijk = 0
        for atomi, nlist in nl.items():
            n = len(nlist)
            nijk += (n - 1 + 1) * (n - 1) // 2
    else:
        nijk = 0
    return nij, nijk
Beispiel #23
0
    def update_system_nbr_list(self, cutoff, exclude_atoms_nbr_list=True):
        """Update undirected neighbor list and the periodic reindexing
            for the given Atoms object.ß

        Args:
            cutoff (float): maximum cutoff for which atoms are
                considered interacting.

        Returns:
            nbr_list (torch.LongTensor)
            offsets (torch.Tensor)
            nxyz (torch.Tensor)
        """
        if self.nbr_torch:
            edge_from, edge_to, offsets = torch_nbr_list(self,
                                                         cutoff,
                                                         device=self.device)
            nbr_list = torch.LongTensor(np.stack([edge_from, edge_to], axis=1))
        else:
            edge_from, edge_to, offsets = neighbor_list('ijS', self, cutoff)
            nbr_list = torch.LongTensor(np.stack([edge_from, edge_to], axis=1))
            offsets = torch.Tensor(offsets)[nbr_list[:, 1] > nbr_list[:, 0]]
            nbr_list = nbr_list[nbr_list[:, 1] > nbr_list[:, 0]]

        if exclude_atoms_nbr_list:
            offsets_mat = torch.zeros(self.get_number_of_atoms(),
                                      self.get_number_of_atoms(), 3)
            nbr_list_mat = torch.zeros(self.get_number_of_atoms(),
                                       self.get_number_of_atoms()).to(
                                           torch.long)
            atom_nbr_list_mat = torch.zeros(self.get_number_of_atoms(),
                                            self.get_number_of_atoms()).to(
                                                torch.long)

            offsets_mat[nbr_list[:, 0], nbr_list[:, 1]] = offsets
            nbr_list_mat[nbr_list[:, 0], nbr_list[:, 1]] = 1
            atom_nbr_list_mat[self.atoms_nbr_list[:, 0],
                              self.atoms_nbr_list[:, 1]] = 1

            nbr_list_mat = nbr_list_mat - atom_nbr_list_mat
            nbr_list = nbr_list_mat.nonzero()
            offsets = offsets_mat[nbr_list[:, 0], nbr_list[:, 1], :]

        self.nbr_list = nbr_list

        self.offsets = sparsify_array(
            offsets.matmul(torch.Tensor(self.get_cell())))
Beispiel #24
0
def compute_rdf_subset(atoms, indexes, cutoff=6.0, nbins=201):
    """
    returns RDF computer over a subset of atoms and bin_edges
    """
    i, j, d, D = neighbor_list('ijdD', atoms, cutoff=6, self_interaction=False)
    dlist = []
    for idx in indexes:
        dlist.append(d[np.where(i == idx)[0]])
    dlist = np.concatenate(dlist, axis=0)
    bins = np.linspace(0.0, cutoff+2, nbins)

    N = len(indexes)
    h, bin_edges = np.histogram(dlist, bins)

    rdf = h / N

    return rdf, bin_edges
Beispiel #25
0
    def __init__(self, atoms, cutoff=4.0, elements=[]):
        from ase.neighborlist import neighbor_list
        self.atoms = atoms
        self.cutoff = cutoff
        self.elements = elements
        first_indx, second_indx, self.dist = neighbor_list(
            "ijd", atoms, cutoff)

        # neighbors
        self.neighbors = [[] for _ in range(len(self.atoms))]
        for i1, i2 in zip(first_indx, second_indx):
            self.neighbors[i1].append(i2)

        # Count how many pairs inside cutoff
        self.num_pairs = self.num_pairs_brute_force()
        self.avg_num_pairs = 0
        self.num_calls = 0
        self.symbols = [atom.symbol for atom in self.atoms]
Beispiel #26
0
def fingerprint(images, cutoff, etas, Rc, elements, cutoff_function=False):
    for count, atoms in enumerate(images):
        species = sorted(list(set(atoms.get_chemical_symbols())))
        N_atoms = len(atoms)
        N_species = len(species)
        N_etas = len(etas)
        neighbors = neighbor_list('ijdD', atoms, cutoff / 2 * np.ones(N_atoms))

        for i, atom in enumerate(atoms):
            if atom.symbol in elements:
                fingerprint = np.zeros([3, N_species, N_etas])
                for j, element in enumerate(species):
                    for k, (eta, R) in enumerate(zip(etas, Rc)):
                        indices = np.argwhere(neighbors[0] == atom.index)
                        neighbor_indices = neighbors[1][
                            indices]  #get indices of neighbor atoms
                        for index, neighbor_index in zip(
                                indices, neighbor_indices):
                            if atoms[neighbor_index[0]].symbol == element:
                                Rij = neighbors[2][index[
                                    0]]  #distance from central atom to neighbor
                                rij = neighbors[3][index[
                                    0]]  #distances vector from central atom to neighbor

                                if cutoff_function:
                                    fingerprint[:, j, k] += rij / Rij * np.exp(
                                        -((Rij - R) / eta)**2) * 0.5 * (
                                            np.cos(np.pi * Rij / cutoff) + 1)
                                else:
                                    fingerprint[:, j,
                                                k] += rij / Rij * np.exp(-(
                                                    (Rij - R) / eta)**2)

                fingerprint = fingerprint.reshape(3, N_species * N_etas)

                try:
                    fingerprints = np.concatenate((fingerprints, fingerprint),
                                                  axis=0)
                except UnboundLocalError:  #fingerprints not defined in very first iteration
                    fingerprints = fingerprint
            else:
                continue

    return fingerprints
Beispiel #27
0
def get_neighbours(atoms,
                   r_cut,
                   self_interaction=False,
                   neighbor_list=neighbor_list):
    """Return a list of pairs of atoms within a given distance of each other.

    Uses ase.neighborlist.neighbour_list to compute neighbors.

    Args:
        atoms: ase.atoms object to calculate neighbours for
        r_cut: cutoff radius (float). Pairs of atoms are considered neighbours
            if they are within a distance r_cut of each other (note that this
            is double the parameter used in the ASE's neighborlist module)
        neighbor_list: function (optional). Optionally replace the built-in
            ASE neighbour list with an alternative with the same call
            signature, e.g. `matscipy.neighbours.neighbour_list`.

    Returns: a tuple (i_list, j_list, d_list, fixed_atoms):
        i_list, j_list: i and j indices of each neighbour pair
        d_list: absolute distance between the corresponding pair
        fixed_atoms: indices of any fixed atoms
    """

    if isinstance(atoms, Filter):
        atoms = atoms.atoms

    i_list, j_list, d_list = neighbor_list('ijd', atoms, r_cut)

    # filter out self-interactions (across PBC)
    if not self_interaction:
        mask = i_list != j_list
        i_list = i_list[mask]
        j_list = j_list[mask]
        d_list = d_list[mask]

    # filter out bonds where 1st atom (i) in pair is fixed
    fixed_atoms = []
    for constraint in atoms.constraints:
        if isinstance(constraint, FixAtoms):
            fixed_atoms.extend(list(constraint.index))

    return i_list, j_list, d_list, fixed_atoms
Beispiel #28
0
def get_g2_map(atoms: Atoms,
               rc: float,
               nij_max: int,
               interactions: list,
               vap: VirtualAtomMap,
               offsets: np.ndarray,
               for_prediction=False,
               print_time=False):
    if for_prediction:
        iaxis = 0
    else:
        iaxis = 1
    g2_map = np.zeros((nij_max, iaxis + 2), dtype=np.int32)
    tlist = np.zeros(nij_max, dtype=np.int32)
    symbols = atoms.get_chemical_symbols()
    tic = time.time()
    ilist, jlist, n1 = neighbor_list('ijS', atoms, rc)
    if print_time:
        print(f"* ASE neighbor time: {time.time() - tic}")
    nij = len(ilist)
    tlist.fill(0)
    for i in range(nij):
        symboli = symbols[ilist[i]]
        symbolj = symbols[jlist[i]]
        tlist[i] = interactions.index('{}{}'.format(symboli, symbolj))
    ilist = np.pad(ilist + 1, (0, nij_max - nij), 'constant')
    jlist = np.pad(jlist + 1, (0, nij_max - nij), 'constant')
    n1 = np.pad(n1, ((0, nij_max - nij), (0, 0)), 'constant')
    n1 = n1.astype(np.float32)
    for count in range(len(ilist)):
        if ilist[count] == 0:
            break
        ilist[count] = vap.index_map[ilist[count]]
        jlist[count] = vap.index_map[jlist[count]]
    g2_map[:, iaxis + 0] = ilist
    g2_map[:, iaxis + 1] = offsets[tlist]
    return {
        "g2.v2g_map": g2_map,
        "g2.ilist": ilist,
        "g2.jlist": jlist,
        "g2.shift": n1
    }
Beispiel #29
0
    def compute_neighbors(self, cutoff):
        if self._last_cutoff == cutoff:
            return

        self._pairs = []

        nl_result = neighborlist.neighbor_list("ijdD", self._atoms, cutoff)
        for (i, j, d, D) in zip(*nl_result):
            if j < i:
                # we want a half neighbor list, so drop all duplicated
                # neighbors
                continue
            self._pairs.append((i, j, d, D))

        self._pairs_by_center = []
        for _ in range(self.size()):
            self._pairs_by_center.append([])

        for (i, j, d, D) in self._pairs:
            self._pairs_by_center[i].append((i, j, d, D))
            self._pairs_by_center[j].append((i, j, d, D))
Beispiel #30
0
def get_BOList(listTypeName, listTypeNum, atoms, cutoff):
    NBList = neighborlist.neighbor_list('ijDd', atoms, cutoff)
    nnn = np.bincount(NBList[0]) #number of nearesr neighbors
    MType = 'Si'
    typeIndex=[]
    for j in range(len(listTypeName)):
        if MType == listTypeName[j]:
            typeIndex.append(j)
    MTypeStart = 0
    MTypeEnd = 0
    for i in range(len(listTypeNum)):
        if i < typeIndex[0]:
            MTypeStart += listTypeNum[i]
    MTypeEnd = MTypeStart + listTypeNum[typeIndex[0]]
    OType = 'O'
    typeIndex=[]
    for j in range(len(listTypeName)):
        if 'O' == listTypeName[j]:
            typeIndex.append(j)
    OTypeStart = 0
    OTypeEnd = 0
    for i in range(len(listTypeNum)):
        if i < typeIndex[0]:
            OTypeStart += listTypeNum[i]
    OTypeEnd = OTypeStart + listTypeNum[typeIndex[0]]
    OList = np.arange(OTypeStart, OTypeEnd)
    OIndexList = ((NBList[1] >= OTypeStart) &\
                  (NBList[1] < OTypeEnd)).nonzero()[0]
    BOList = []
    for i in OList:
        if nnn[i] >=2:
            currentIndex = (NBList[0] == i).nonzero()[0]
            count = 0
            for j in (currentIndex):
                if (NBList[1][j] >= MTypeStart) and (NBList[1][j] < MTypeEnd):
                    count += 1;
            if count == 2:
                BOList.append(i)
    NBOList = list(set(OList) - set(BOList))
    return(BOList, NBOList)