def adjust_positions(self, atoms, new): p1, p2 = atoms.positions[self.indices] d, p = find_mic(np.array([p2 - p1]), atoms._cell, atoms._pbc) q1, q2 = new[self.indices] d, q = find_mic(np.array([q2 - q1]), atoms._cell, atoms._pbc) d *= 0.5 * (p - q) / q new[self.indices] = (q1 - d[0], q2 + d[0])
def check_closest_to_parent(positions, centroid_positions, all_centroid_positions, cell, pbc, eps): """ Checks which centroid the image is closest too, then measures whether or not that closest centroid is sufficiently close to the image's parent centroid. Args: positions (numpy.ndarray): Atomic positions of this image. centroid_positions (numpy.ndarray): The positions of the image's centroid. all_centroid_positions (list/numpy.ndarray): A list of positions for all centroids in the string. cell (numpy.ndarray): The 3x3 cell vectors for pbcs. pbc (numpy.ndarray): Three booleans declaring which dimensions have periodic boundary conditions for finding the minimum distance convention. eps (float): The maximum distance between the closest centroid and the parent centroid to be considered a match (i.e. no recentering necessary). Returns: (bool): Whether the image is closest to its own parent centroid. """ distances = [ np.linalg.norm(find_mic(c_pos - positions, cell, pbc)[0]) for c_pos in all_centroid_positions ] closest_centroid_positions = all_centroid_positions[np.argmin( distances)] match_distance = np.linalg.norm( find_mic(closest_centroid_positions - centroid_positions, cell, pbc)[0]) return match_distance < eps
def fit_raw(energies, forces, positions, cell=None, pbc=None): """Calculates parameters for fitting images to a band, as for a NEB plot.""" E = energies F = forces R = positions E = np.array(E) - E[0] n = len(E) Efit = np.empty((n - 1) * 20 + 1) Sfit = np.empty((n - 1) * 20 + 1) s = [0] dR = np.zeros_like(R) for i in range(n): if i < n - 1: dR[i] = R[i + 1] - R[i] if cell is not None and pbc is not None: dR[i], _ = find_mic(dR[i], cell, pbc) s.append(s[i] + np.sqrt((dR[i]**2).sum())) else: dR[i] = R[i] - R[i - 1] if cell is not None and pbc is not None: dR[i], _ = find_mic(dR[i], cell, pbc) lines = [] dEds0 = None for i in range(n): d = dR[i] if i == 0: ds = 0.5 * s[1] elif i == n - 1: ds = 0.5 * (s[-1] - s[-2]) else: ds = 0.25 * (s[i + 1] - s[i - 1]) d = d / np.sqrt((d**2).sum()) dEds = -(F[i] * d).sum() x = np.linspace(s[i] - ds, s[i] + ds, 3) y = E[i] + dEds * (x - s[i]) lines.append((x, y)) if i > 0: s0 = s[i - 1] s1 = s[i] x = np.linspace(s0, s1, 20, endpoint=False) c = np.linalg.solve(np.array([(1, s0, s0**2, s0**3), (1, s1, s1**2, s1**3), (0, 1, 2 * s0, 3 * s0**2), (0, 1, 2 * s1, 3 * s1**2)]), np.array([E[i - 1], E[i], dEds0, dEds])) y = c[0] + x * (c[1] + x * (c[2] + x * c[3])) Sfit[(i - 1) * 20:i * 20] = x Efit[(i - 1) * 20:i * 20] = y dEds0 = dEds Sfit[-1] = s[-1] Efit[-1] = E[-1] return ForceFit(s, E, Sfit, Efit, lines)
def test_numpy_array(): # Tests Issue #787 atoms = FaceCenteredCubic(size=[1, 1, 1], symbol='Cu', latticeconstant=2, pbc=True) find_mic(atoms.positions, np.array(atoms.cell), pbc=True)
def fit0(E, F, R, cell=None, pbc=None): """Constructs curve parameters from the NEB images.""" E = np.array(E) - E[0] n = len(E) Efit = np.empty((n - 1) * 20 + 1) Sfit = np.empty((n - 1) * 20 + 1) s = [0] dR = np.zeros_like(R) for i in range(n): if i < n - 1: dR[i] = R[i + 1] - R[i] if cell is not None and pbc is not None: dR[i], _ = find_mic(dR[i], cell, pbc) s.append(s[i] + sqrt((dR[i]**2).sum())) else: dR[i] = R[i] - R[i - 1] if cell is not None and pbc is not None: dR[i], _ = find_mic(dR[i], cell, pbc) lines = [] dEds0 = None for i in range(n): d = dR[i] if i == 0: ds = 0.5 * s[1] elif i == n - 1: ds = 0.5 * (s[-1] - s[-2]) else: ds = 0.25 * (s[i + 1] - s[i - 1]) d = d / sqrt((d**2).sum()) dEds = -(F[i] * d).sum() x = np.linspace(s[i] - ds, s[i] + ds, 3) y = E[i] + dEds * (x - s[i]) lines.append((x, y)) if i > 0: s0 = s[i - 1] s1 = s[i] x = np.linspace(s0, s1, 20, endpoint=False) c = np.linalg.solve(np.array([(1, s0, s0**2, s0**3), (1, s1, s1**2, s1**3), (0, 1, 2 * s0, 3 * s0**2), (0, 1, 2 * s1, 3 * s1**2)]), np.array([E[i - 1], E[i], dEds0, dEds])) y = c[0] + x * (c[1] + x * (c[2] + x * c[3])) Sfit[(i - 1) * 20:i * 20] = x Efit[(i - 1) * 20:i * 20] = y dEds0 = dEds Sfit[-1] = s[-1] Efit[-1] = E[-1] return s, E, Sfit, Efit, lines
def adjust_forces(self, atoms, forces): d = np.subtract.reduce(atoms.positions[self.indices]) d, p = find_mic(np.array([d]), atoms._cell, atoms._pbc) d = d[0] d *= 0.5 * np.dot(np.subtract.reduce(forces[self.indices]), d) / p**2 self.constraint_force = d forces[self.indices] += (-d, d)
def command(self, centroids_pos_list, cell, pbc): # How long is the piecewise parameterized path to begin with? lengths = self._find_lengths(centroids_pos_list, cell, pbc) length_tot = lengths[-1] length_per_frame = length_tot / (len(centroids_pos_list) - 1) # Find new positions for the re-parameterized jobs new_positions = [centroids_pos_list[0]] for n_left, cent in enumerate(centroids_pos_list[1:-1]): n = n_left + 1 length_target = n * length_per_frame # Find the last index not in excess of the target length try: all_not_over = np.argwhere(lengths < length_target) highest_not_over = np.amax(all_not_over) except ValueError: # If all_not_over is empty highest_not_over = 0 # Interpolate from the last position not in excess start = centroids_pos_list[highest_not_over] end = centroids_pos_list[highest_not_over + 1] disp = find_mic(end - start, cell, pbc)[0] interp_dir = disp / np.linalg.norm(disp) interp_mag = length_target - lengths[highest_not_over] new_positions.append(start + interp_mag * interp_dir) new_positions.append(centroids_pos_list[-1]) # Apply the new positions all at once centroids_pos_list = new_positions return {'centroids_pos_list': centroids_pos_list}
def get_closest_centroid_index(positions, all_centroid_positions, cell, pbc): distances = [ np.linalg.norm(find_mic(c_pos - positions, cell, pbc)[0]) for c_pos in all_centroid_positions ] return np.argmin(distances)
def set_positions(self, new, **kwargs): # First, adjust positions (due to fixed bond lengths): old = self.atoms.get_positions() oldcell = self.atoms.get_cell() masses = self.atoms.get_masses() for i in range(self.maxiter): converged = True for j, ab in enumerate(self.pairs): a = ab[0] b = ab[1] cd = self.bondlengths[j] r0 = old[a] - old[b] d0 = find_mic([r0], oldcell, self.atoms._pbc)[0][0] d1 = new[a] - new[b] - r0 + d0 m = 1 / (1 / masses[a] + 1 / masses[b]) x = 0.5 * (cd**2 - np.dot(d1, d1)) / np.dot(d0, d1) if abs(x) > self.tolerance or np.isnan(x) or np.isinf(x): new[a] += x * m / masses[a] * d0 new[b] -= x * m / masses[b] * d0 converged = False if converged: break else: raise RuntimeError('Did not converge') natoms = len(self.atoms) self.deform_grad = new[natoms:] / self.cell_factor current_cell = self.atoms.get_cell() new_cell = np.dot(self.orig_cell, self.deform_grad.T) scaled_pos = np.dot(new[:natoms], np.linalg.inv(current_cell)) self.atom_positions[:] = new[:natoms] self.atoms.set_positions(np.dot(scaled_pos, new_cell), **kwargs) self.atoms.set_positions(self.atom_positions, **kwargs) self.atom_positions = self.atoms.get_positions() # obsolete? self.atoms.set_cell(new_cell, scale_atoms=False)
def midpoint_points(x_1, y_1, z_1, x_2, y_2, z_2, meshobject): """ Function finds the distance between two points (defined in cartesian co-ordinates). Args: x_1: float x coordinate of point 1. y_1: float y coordinate of point 1. z_1: float z coordinate of point 1. x_2: float x coordinate of point 2. y_2: float y coordinate of point 2. z_2: float2 z coordinate of point 2. meshobject: Mesh object Object storing meshgrid and PBC conditions Returns: midpoint: numpy array (,3) x, y and z coordinates of the midpoint between points 1 and 2. """ import numpy as np from ase.geometry import find_mic vec1 = np.array([x_1, y_1, z_1]) vec2 = np.array([x_2, y_2, z_2]) mic_shift = find_mic((vec2 - vec1), cell=meshobject.Cell.array) midpoint = vec1 + mic_shift / 2 return midpoint
def get_descriptor(centres, xyz, species, nmax, lmax, rcut, gdens): coords = xyz.get_positions() ans = xyz.get_atomic_numbers() nspecies = len(species) common = [species.index(j) for j in sorted(list(set(ans)))] common = zip(common, sorted(list(set(ans)))) cell = xyz.get_cell() if cell.sum() == 0.0: pbc = False else: pbc = xyz.get_pbc() centind = [i for i, j in enumerate(ans) if j in centres] Nsoap = get_Nsoap(species, nmax, lmax) Ncenter = len(centind) desclist = np.ones((Ncenter, Nsoap)) for l, centre in enumerate(centind): f = np.zeros((nspecies, nmax + 1, lmax + 1, lmax + 1), dtype=complex) dr = find_mic(coords - coords[centre], cell=cell, pbc=pbc)[0] # compute density expansion for i, spec in common: labels = np.where(ans == spec)[0] for j in labels: rij, cost, phi = rconvert(dr[j]) if rij >= rcut: continue f[i] += gdens(rij, cost, phi) * cutoff(rij, rcut) # compute SOAP descriptor desc = np.zeros((nspecies, nspecies, nmax + 1, nmax + 1, lmax + 1)) counter = 0 for i in range(nspecies): for j in range(i, nspecies): desc[i, j] = power_spectrum(nmax, lmax, f[i], f[j]) desc[j, i] = desc[i, j].transpose(1, 0, 2) desclist[l] = desc.flatten() return desclist
def check_result(atoms, result): # check that permutation maps species onto like species assert (atoms.numbers == atoms.numbers[result.permutation]).all() # check rmsd delta = result.atoms.get_positions() - atoms.get_positions() _, x = find_mic(delta, cell=atoms.cell) assert_allclose(np.sqrt(np.mean(x**2)), result.rmsd, atol=TOL) # check inversion manually inverted = atoms[result.permutation] inverted.positions = -inverted.positions + 2 * result.axis inverted.wrap(eps=0) delta = result.atoms.get_positions() - inverted.get_positions() _, x = find_mic(delta, cell=atoms.cell) assert_allclose(np.sqrt(np.mean(x**2)), result.rmsd, atol=TOL)
def command(self, mixing_fraction, centroids_pos_list, running_average_list, cell, pbc): centroids_pos_list = np.array(centroids_pos_list) for i, cent in enumerate(centroids_pos_list): disp = find_mic(running_average_list[i] - cent, cell, pbc)[0] update = mixing_fraction * disp centroids_pos_list[i] += update return {'centroids_pos_list': centroids_pos_list}
def get_forces(self, apply_constraint=False): atoms_forces = self.atoms.get_forces() # Now, adjust forces: constraint_forces = -atoms_forces old = self.atoms.get_positions() oldcell = self.atoms.get_cell() masses = self.atoms.get_masses() for i in range(self.maxiter): converged = True for j, ab in enumerate(self.pairs): a = ab[0] b = ab[1] cd = self.bondlengths[j] d = old[a] - old[b] d = find_mic([d], oldcell, self.atoms._pbc)[0][0] dv = atoms_forces[a] / masses[a] - atoms_forces[b] / masses[b] m = 1 / (1 / masses[a] + 1 / masses[b]) x = -np.dot(dv, d) / cd**2 if abs(x) > self.tolerance or np.isnan(x) or np.isinf(x): atoms_forces[a] += x * m * d atoms_forces[b] -= x * m * d converged = False if converged: break else: raise RuntimeError('Did not converge') constraint_forces += atoms_forces stress = self.atoms.get_stress() volume = self.atoms.get_volume() virial = -volume * voigt_6_to_full_3x3_stress(stress) atoms_forces = np.dot(atoms_forces, self.deform_grad) dg_inv = np.linalg.inv(self.deform_grad) virial = np.dot(virial, dg_inv.T) if self.hydrostatic_strain: vtr = virial.trace() virial = np.diag([vtr / 3.0, vtr / 3.0, vtr / 3.0]) # Zero out components corresponding to fixed lattice elements if (self.mask != 1.0).any(): virial *= self.mask if self.constant_volume: vtr = virial.trace() np.fill_diagonal(virial, np.diag(virial) - vtr / 3.0) natoms = len(self.atoms) forces = np.zeros((natoms + 3, 3)) forces[:natoms] = atoms_forces forces[natoms:] = virial / self.cell_factor self.stress = -full_3x3_to_voigt_6_stress(virial) / volume return forces
def _calculate_harm(self, atoms): cell = atoms.get_cell() pbc = atoms.get_pbc() vectors = atoms.get_positions() - self.positions e = 0. f = np.zeros((self.N, 3)) for i, v in enumerate(vectors): v, d = find_mic([v], cell, pbc) e += 0.5 * self.k * (d**2) f[i] = -self.k * v return e, f
def interpolate(images, mic=False): """Given a list of images, linearly interpolate the positions of the interior images.""" pos1 = images[0].get_positions() pos2 = images[-1].get_positions() d = pos2 - pos1 if mic: d = find_mic(d, images[0].get_cell(), images[0].pbc)[0] d /= (len(images) - 1.0) for i in range(1, len(images) - 1): images[i].set_positions(pos1 + i * d)
def command(self, structure_initial, structure_final, n_images): pos_i = structure_initial.positions pos_f = structure_final.positions cell = structure_initial.cell pbc = structure_initial.pbc displacement = find_mic(pos_f - pos_i, cell, pbc)[0] interpolated_positions = [] for n, mix in enumerate(np.linspace(0, 1, n_images)): interpolated_positions += [pos_i + (mix * displacement)] return {'interpolated_positions': interpolated_positions}
def fit_raw(energies, forces, positions, cell=None, pbc=None): """Calculates parameters for fitting images to a band, as for a NEB plot.""" energies = np.array(energies) - energies[0] n_images = len(energies) fit_energies = np.empty((n_images - 1) * 20 + 1) fit_path = np.empty((n_images - 1) * 20 + 1) path = [0] for i in range(n_images - 1): dR = positions[i + 1] - positions[i] if cell is not None and pbc is not None: dR, _ = find_mic(dR, cell, pbc) path.append(path[i] + np.sqrt((dR**2).sum())) lines = [] # tangent lines lastslope = None for i in range(n_images): if i == 0: direction = positions[i + 1] - positions[i] dpath = 0.5 * path[1] elif i == n_images - 1: direction = positions[-1] - positions[-2] dpath = 0.5 * (path[-1] - path[-2]) else: direction = positions[i + 1] - positions[i - 1] dpath = 0.25 * (path[i + 1] - path[i - 1]) direction /= np.linalg.norm(direction) slope = -(forces[i] * direction).sum() x = np.linspace(path[i] - dpath, path[i] + dpath, 3) y = energies[i] + slope * (x - path[i]) lines.append((x, y)) if i > 0: s0 = path[i - 1] s1 = path[i] x = np.linspace(s0, s1, 20, endpoint=False) c = np.linalg.solve( np.array([(1, s0, s0**2, s0**3), (1, s1, s1**2, s1**3), (0, 1, 2 * s0, 3 * s0**2), (0, 1, 2 * s1, 3 * s1**2)]), np.array([energies[i - 1], energies[i], lastslope, slope])) y = c[0] + x * (c[1] + x * (c[2] + x * c[3])) fit_path[(i - 1) * 20:i * 20] = x fit_energies[(i - 1) * 20:i * 20] = y lastslope = slope fit_path[-1] = path[-1] fit_energies[-1] = energies[-1] return ForceFit(path, energies, fit_path, fit_energies, lines)
def symmetrized_layout(rmsd, atoms, inverted): ps = atoms.get_positions() v, _ = find_mic(inverted.get_positions() - ps, atoms.cell) meanpos = ps + v / 2 component_rmsd = np.sqrt(np.sum((ps - meanpos)**2) / len(atoms)) assert abs(rmsd - component_rmsd) < 1E-12 symmetrized = Atoms(positions=meanpos, numbers=atoms.numbers, cell=atoms.cell, pbc=atoms.pbc) symmetrized.set_cell(symmetrized.cell, scale_atoms=False) symmetrized.wrap(eps=0) return symmetrized
def interpolate(images, mic=False): """Given a list of images, linearly interpolate the positions of the interior images.""" pos1 = images[0].get_positions() pos2 = images[-1].get_positions() d = pos2 - pos1 if mic: d = find_mic(d, images[0].get_cell(), images[0].pbc)[0] d /= (len(images) - 1.0) for i in range(1, len(images) - 1): images[i].set_positions(pos1 + i * d) # Parallel NEB with Jacapo needs this: try: images[i].get_calculator().set_atoms(images[i]) except AttributeError: pass
def command(self, reference_positions, cutoff_distance, positions, velocities, previous_positions, previous_velocities, pbc, cell): distance = np.linalg.norm(find_mic(reference_positions - positions, cell=cell, pbc=pbc)[0], axis=-1) is_at_home = (distance < cutoff_distance)[:, np.newaxis] is_away = 1 - is_at_home return { 'positions': is_at_home * positions + is_away * previous_positions, 'velocities': is_at_home * velocities + is_away * -previous_velocities, 'reflected': is_away.astype(bool).flatten() }
def interpolate(self, initial=0, final=-1, mic=False): """Interpolate linearly between initial and final images.""" if final < 0: final = self.nimages + final n = final - initial pos1 = self.images[initial].get_positions() pos2 = self.images[final].get_positions() dist = (pos2 - pos1) if mic: cell = self.images[initial].get_cell() assert((cell == self.images[final].get_cell()).all()) pbc = self.images[initial].get_pbc() assert((pbc == self.images[final].get_pbc()).all()) dist, D_len = find_mic(dist, cell, pbc) dist /= n for i in range(1, n): self.images[initial + i].set_positions(pos1 + i * dist)
def command(self, positions_list, running_average_list, relax_endpoints, cell, pbc): # On the first step, divide by 2 to average two positions self._divisor += 1 # How much of the current step to mix into the average weight = 1. / self._divisor running_average_list = np.array( running_average_list) # Don't modify this input in place for i, pos in enumerate(positions_list): if (i == 0 or i == len(positions_list) - 1) and not relax_endpoints: continue else: disp = find_mic(pos - running_average_list[i], cell, pbc)[0] running_average_list[i] += weight * disp return {'running_average_list': running_average_list}
def safe_insertion_test(test_atoms, new_species, position): if len(test_atoms)>0: safe = True vec = test_atoms.get_positions() - position vec_min_image, vec_min_image_mag = find_mic(vec, cell = test_atoms.get_cell()) symbols = test_atoms.get_chemical_symbols() existing_atom_index = 0 while existing_atom_index < len(test_atoms) and safe: distance_cut = hard_radii[new_species] + hard_radii[symbols[existing_atom_index]] if vec_min_image_mag[existing_atom_index] < distance_cut: safe = False existing_atom_index += 1 return safe else: return True
def check_components(atoms, result, tol=TOL): for reduced in result: assert (atoms.pbc == reduced.atoms.pbc).all() assert (np.bincount(reduced.components) == reduced.factor).all() x = atoms.numbers[np.argsort(reduced.components)] x = x.reshape((len(atoms) // reduced.factor, reduced.factor)) assert (x == x[:, 0][:, None]).all() assert (x[:, 0] == reduced.atoms.numbers).all() assert ( atoms.numbers == reduced.atoms.numbers[reduced.components]).all() # check supercell is correct supercell = make_supercell(reduced.atoms, np.linalg.inv(reduced.map)) assert (supercell.pbc == atoms.pbc).all() assert_allclose(supercell.cell, atoms.cell, atol=tol) # check rmsd is correct comparator = CrystalComparator(atoms) indices = np.argsort(supercell.numbers, kind='merge') supercell = supercell[indices] supercell.wrap(eps=0) rmsd, permutation = comparator.calculate_rmsd( supercell.get_positions()) assert_allclose(rmsd, reduced.rmsd, atol=tol) # check components are correct indices = np.argsort(reduced.components) check = atoms[indices] components = reduced.components[indices] check.set_cell(reduced.atoms.cell, scale_atoms=False) check.wrap(eps=0) ps = check.get_positions() parents = components * reduced.factor vmin, _ = find_mic(ps - ps[parents], check.cell, pbc=check.pbc) positions = ps[parents] + vmin m = len(atoms) // reduced.factor meanpos = np.mean(positions.reshape((m, reduced.factor, 3)), axis=1) rmsd_check = np.sqrt(np.mean((positions - meanpos[components])**2)) assert_allclose(reduced.rmsd, rmsd_check, atol=tol)
def _find_lengths(a_list, cell, pbc): """ Finds the cummulative distance from job to job. Attribute: a_list (list/numpy.ndarray): List of positions whose lengths are to be calculated cell (numpy.ndarray): The cell of the structure pbc (numpy.ndarray): Periodic boundary condition of the structure Returns: lengths (list): Lengths of the positions in the list """ length_cummulative = 0 lengths = [length_cummulative] # First length is zero, all other lengths are wrt the first position in the list for n_left, term in enumerate(a_list[1:]): disp = find_mic(term - a_list[n_left], cell, pbc)[0] length_cummulative += np.linalg.norm(disp) lengths.append(length_cummulative) return lengths
def find_max_movement(atoms_initial, atoms_final): ''' Given ase.Atoms objects, find the furthest distance that any single atom in a set of atoms traveled (in Angstroms) Args: initial_atoms `ase.Atoms` of the structure in its initial state final_atoms `ase.Atoms` of the structure in its final state Returns: max_movement A float indicating the further movement of any single atom before and after relaxation (in Angstroms) ''' # Calculate the distances for each atom distances = atoms_final.positions - atoms_initial.positions # Reduce the distances in case atoms wrapped around (the minimum image # convention) _, movements = find_mic(distances, atoms_final.cell, atoms_final.pbc) max_movement = max(movements) return max_movement
def interpolate(images, mic=False, interpolate_cell=False, use_scaled_coord=False): """Given a list of images, linearly interpolate the positions of the interior images. mic: bool Map movement into the unit cell by using the minimum image convention. interpolate_cell: bool Interpolate the three cell vectors linearly just like the atomic positions. Not implemented for NEB calculations! use_scaled_coord: bool Use scaled/internal/fractional coordinates instead of real ones for the interpolation. Not implemented for NEB calculations! """ if use_scaled_coord: pos1 = images[0].get_scaled_positions(wrap=mic) pos2 = images[-1].get_scaled_positions(wrap=mic) else: pos1 = images[0].get_positions() pos2 = images[-1].get_positions() d = pos2 - pos1 if not use_scaled_coord and mic: d = find_mic(d, images[0].get_cell(), images[0].pbc)[0] d /= (len(images) - 1.0) if interpolate_cell: cell1 = images[0].get_cell() cell2 = images[-1].get_cell() cell_diff = cell2 - cell1 cell_diff /= (len(images) - 1.0) for i in range(1, len(images) - 1): # first the new cell, otherwise scaled positions are wrong if interpolate_cell: images[i].set_cell(cell1 + i * cell_diff) new_pos = pos1 + i * d if use_scaled_coord: images[i].set_scaled_positions(new_pos) else: images[i].set_positions(new_pos)
def mic(dr, cell, pbc=True): """ Apply minimum image convention to an array of distance vectors. Parameters: dr : array_like Array of distance vectors. cell : array_like Simulation cell. pbc : array_like, optional Periodic boundary conditions in x-, y- and z-direction. Default is to assume periodic boundaries in all directions. Returns: dr : array Array of distance vectors, wrapped according to the minimum image convention. """ dr, _ = find_mic(dr, cell, pbc) return dr
def calculate(self, atoms, properties, system_changes): Calculator.calculate(self, atoms, properties, system_changes) P = atoms.get_positions() d = [] D = [] for p in P: Di = P - p if self.mic: Di, di = find_mic(Di, atoms.get_cell(), atoms.get_pbc()) else: di = np.sqrt((Di**2).sum(1)) d.append(di) D.append(Di) d = np.array(d) D = np.array(D) dd = d - self.target d.ravel()[::len(d) + 1] = 1 # avoid dividing by zero d4 = d**4 e = 0.5 * (dd**2 / d4).sum() f = -2 * ((dd * (1 - 2 * dd / d) / d**5)[..., np.newaxis] * D).sum(0) self.results = {'energy': e, 'forces': f}
def get_forces(self): """Evaluate and return the forces.""" images = self.images forces = np.empty(((self.nimages - 2), self.natoms, 3)) energies = np.empty(self.nimages) if self.remove_rotation_and_translation: # Remove translation and rotation between # images before computing forces: for i in range(1, self.nimages): minimize_rotation_and_translation(images[i - 1], images[i]) if self.method != 'aseneb': energies[0] = images[0].get_potential_energy() energies[-1] = images[-1].get_potential_energy() if not self.parallel: # Do all images - one at a time: for i in range(1, self.nimages - 1): energies[i] = images[i].get_potential_energy() forces[i - 1] = images[i].get_forces() elif self.world.size == 1: def run(image, energies, forces): energies[:] = image.get_potential_energy() forces[:] = image.get_forces() threads = [threading.Thread(target=run, args=(images[i], energies[i:i + 1], forces[i - 1:i])) for i in range(1, self.nimages - 1)] for thread in threads: thread.start() for thread in threads: thread.join() else: # Parallelize over images: i = self.world.rank * (self.nimages - 2) // self.world.size + 1 try: energies[i] = images[i].get_potential_energy() forces[i - 1] = images[i].get_forces() except: # Make sure other images also fail: error = self.world.sum(1.0) raise else: error = self.world.sum(0.0) if error: raise RuntimeError('Parallel NEB failed!') for i in range(1, self.nimages - 1): root = (i - 1) * self.world.size // (self.nimages - 2) self.world.broadcast(energies[i:i + 1], root) self.world.broadcast(forces[i - 1], root) imax = 1 + np.argsort(energies[1:-1])[-1] self.emax = energies[imax] t1 = find_mic(images[1].get_positions() - images[0].get_positions(), images[0].get_cell(), images[0].pbc)[0] if self.method == 'eb': beeline = (images[self.nimages - 1].get_positions() - images[0].get_positions()) beelinelength = np.linalg.norm(beeline) eqlength = beelinelength / (self.nimages - 1) nt1 = np.linalg.norm(t1) for i in range(1, self.nimages - 1): t2 = find_mic(images[i + 1].get_positions() - images[i].get_positions(), images[i].get_cell(), images[i].pbc)[0] nt2 = np.linalg.norm(t2) if self.method == 'eb': # Tangents are bisections of spring-directions # (formula C8 of paper III) tangent = t1 / nt1 + t2 / nt2 # Normalize the tangent vector tangent /= np.linalg.norm(tangent) elif self.method == 'improvedtangent': # Tangents are improved according to formulas 8, 9, 10, # and 11 of paper I. if energies[i + 1] > energies[i] > energies[i - 1]: tangent = t2.copy() elif energies[i + 1] < energies[i] < energies[i - 1]: tangent = t1.copy() else: deltavmax = max(abs(energies[i + 1] - energies[i]), abs(energies[i - 1] - energies[i])) deltavmin = min(abs(energies[i + 1] - energies[i]), abs(energies[i - 1] - energies[i])) if energies[i + 1] > energies[i - 1]: tangent = t2 * deltavmax + t1 * deltavmin else: tangent = t2 * deltavmin + t1 * deltavmax # Normalize the tangent vector tangent /= np.linalg.norm(tangent) else: if i < imax: tangent = t2 elif i > imax: tangent = t1 else: tangent = t1 + t2 tt = np.vdot(tangent, tangent) f = forces[i - 1] ft = np.vdot(f, tangent) if i == imax and self.climb: # imax not affected by the spring forces. The full force # with component along the elestic band converted # (formula 5 of Paper II) if self.method == 'aseneb': f -= 2 * ft / tt * tangent else: f -= 2 * ft * tangent elif self.method == 'eb': f -= ft * tangent # Spring forces # (formula C1, C5, C6 and C7 of Paper III) f1 = -(nt1 - eqlength) * t1 / nt1 * self.k[i - 1] f2 = (nt2 - eqlength) * t2 / nt2 * self.k[i] if self.climb and abs(i - imax) == 1: deltavmax = max(abs(energies[i + 1] - energies[i]), abs(energies[i - 1] - energies[i])) deltavmin = min(abs(energies[i + 1] - energies[i]), abs(energies[i - 1] - energies[i])) f += (f1 + f2) * deltavmin / deltavmax else: f += f1 + f2 elif self.method == 'improvedtangent': f -= ft * tangent # Improved parallel spring force (formula 12 of paper I) f += (nt2 * self.k[i] - nt1 * self.k[i - 1]) * tangent else: f -= ft / tt * tangent f -= np.vdot(t1 * self.k[i - 1] - t2 * self.k[i], tangent) / tt * tangent t1 = t2 nt1 = nt2 return forces.reshape((-1, 3))