def del_atoms(x=None): rcut = 2.0 #x = Atoms('crack.xyz') if x == None: x = Atoms('1109337334_frac.xyz') else: pass x.set_cutoff(3.0) x.calc_connect() x.calc_dists() rem=[] r = farray(0.0) u = fzeros(3) print len(x) for i in frange(x.n): for n in frange(x.n_neighbours(i)): j = x.neighbour(i, n, distance=3.0, diff=u) if x.distance_min_image(i, j) < rcut and j!=i: rem.append(sorted([j,i])) if i%10000==0: print i rem = list(set([a[0] for a in rem])) if len(rem) > 0: print rem x.remove_atoms(rem) else: print 'No duplicate atoms in list.' x.write('crack_nodup.xyz') return x
def test_complex_array(self): d = Dictionary() d['complex'] = fzeros(10, dtype='D') a = FortranArray(d.get_array('complex')) a[:] = 1 + 2j self.assertEqual(a.dtype.kind, 'c') self.assertArrayAlmostEqual(a, d['complex'])
def _get_environments(self, atoms, coff=3.0, cotw=0.5, nmax=12, lmax=6, gs=0.5, cw=1.0): """ Compute environments for each atom Returns tuple consisting of a) Dictionary giving number of atoms for a given species b) An array of partial power spectra of dimension (natoms, nspecies, nspecies, nmax**2*(lmax+1) """ env_list = [] at = atoms.copy() # Set cutoff radius for each atom at.set_cutoff(coff); at.calc_connect(); # calculate connections (not sure why this is necessary) # Create dictionary giving number of atoms of a given species species = dict([(i,len(list(j))) for (i,j) in itertools.groupby(sorted(at.z))]) # Specify species in format suitable for quippy descriptor # e.g. if the molecule contains Hydrogen and Carbon lspecies is # "n_species=2 species_Z={1 6}" lspecies = ("n_species=%d " % len(species.keys())) + \ "species_Z={" + " ".join(map(str, sorted(species.keys()))) + "}" for sp in sorted(species.keys()): # Create descriptor for current species quippy_str = "soap central_weight="+str(cw)+" covariance_sigma0=0.0 atom_sigma="+str(gs)+" cutoff="+str(coff)+" cutoff_transition_width="+str(cotw)+" n_max="+str(nmax)+" l_max="+str(lmax)+' '+lspecies+' Z='+str(sp) #quippy_str = "soap central_weight="+str(cw)+" covariance_sigma0=0.0 atom_sigma="+str(gs)+" cutoff="+str(coff)+" cutoff_transition_width="+str(cotw)+" n_max="+str(nmax)+" l_max="+str(lmax)+' '+lspecies+' Z='+str(sp)+' xml_version=0' desc = quippy.descriptors.Descriptor(quippy_str) # Create output array psp = quippy.fzeros((desc.dimensions(),desc.descriptor_sizes(at)[0])) # Compute power spectrum desc.calc(at,descriptor_out=psp) # Transpose such that each row of psp is the power specturm of the # environment as seen from one atom of species sp # Each row is a flattened array with "original" dimension # (s1, n1, s2, n2, l) # where # s1 is index of species alpha [0:nspecies-1] # n1 is the index of spherical harmonics basis [0:nmax-1] # s2 is index of species beta [0:s1] # n2 is the index of spherical harmonics basis [0:nmax-1] if s2<s1, else [0:n1] # l is the index of spherical harmonics basis [0:lmax] # when s1=s2 the power spectrum is half as long # The call to convert reshapes the quippy format to # (nspecies, nspecies, nmax**2*(lmax+1)) psp = np.array(psp.T) for soap in psp: env_list.append(convert(nmax, lmax, len(species.keys()), soap)) # Convert list of 3d arrays to single 4d array outarray = np.array(env_list) assert outarray.shape[0] == len(at.z) assert outarray.shape[0] == sum(species.values()) return species, outarray
def test_logical_array(self): d = Dictionary() d['logical'] = fzeros(5, dtype='bool') a = FortranArray(d.get_array('logical')) a[:] = [True, False, False, True, True] self.assertEqual( a.dtype, dtype('int32')) # Fortran logical represented as int32 internally self.assertArrayAlmostEqual(a, d['logical'])
def delete_atoms(self, grain=None, rcut=2.0): """ Delete atoms below a certain distance threshold. Args: grain(:class:`quippy.Atoms`): Atoms object of the grain. rcut(float): Atom deletion criterion. Returns: :class:`quippy.Atoms` object with atoms nearer than deletion criterion removed. """ io = ImeallIO() if grain == None: x = Atoms('{0}.xyz'.format(os.path.join(self.grain_dir, self.gbid))) else: x = Atoms(grain) x.set_cutoff(2.4) x.calc_connect() x.calc_dists() rem = [] u = fzeros(3) for i in frange(x.n): for n in frange(x.n_neighbours(i)): j = x.neighbour(i, n, distance=3.0, diff=u) if x.distance_min_image(i, j) < rcut and j != i: rem.append(sorted([j, i])) rem = list(set([a[0] for a in rem])) if len(rem) > 0: x.remove_atoms(rem) else: print 'No duplicate atoms in list.' if grain == None: self.name = '{0}_d{1}'.format(self.gbid, str(rcut)) self.subgrain_dir = io.make_dir(self.calc_dir, self.name) self.struct_file = gbid + '_' + 'n' + str( len(rem)) + 'd' + str(rcut) x.write('{0}.xyz'.format( os.path.join(self.subgrain_dir, self.struct_file))) return len(rem) else: return x
def relax_atoms_cell(atoms, tol=1e-3, stol=None, method='lbfgs_precon', max_steps=100, mask=None, traj_file=None, hydrostatic_strain=False, constant_volume=False, precon_apply_positions=True, precon_apply_cell=True, symmetrize=False, **kwargs): import model #print "relax_atoms_cell using method",method if symmetrize: atoms.set_calculator(SymmetrizedCalculator(model.calculator, atoms)) else: atoms.set_calculator(model.calculator) ## print "relax_atoms_cell initial e ", atoms.get_potential_energy() ## print "relax_atoms_cell initial f ", atoms.get_forces() ## print "relax_atoms_cell initial s ", atoms.get_stress() if hasattr(model, 'Optimizer'): method = 'model_optimizer' if method != 'cg_n': atoms = UnitCellFilter(atoms, mask=mask, hydrostatic_strain=hydrostatic_strain, constant_volume=constant_volume) if method.startswith('lbfgs') or method == 'fire' or method == 'cg_n': if method == 'cg_n': from quippy import Minim, fzeros atoms.info['Minim_Hydrostatic_Strain'] = hydrostatic_strain atoms.info['Minim_Constant_Volume'] = constant_volume if mask is not None: atoms.info['Minim_Lattice_Fix'] = fzeros((3, 3)) if not mask[0]: atoms.info['Minim_Lattice_Fix'][1, 1] = 1.0 if not mask[1]: atoms.info['Minim_Lattice_Fix'][2, 2] = 1.0 if not mask[2]: atoms.info['Minim_Lattice_Fix'][3, 3] = 1.0 if not mask[3]: atoms.info['Minim_Lattice_Fix'][1, 2] = 1.0 atoms.info['Minim_Lattice_Fix'][2, 1] = 1.0 if not mask[4]: atoms.info['Minim_Lattice_Fix'][2, 3] = 1.0 atoms.info['Minim_Lattice_Fix'][3, 2] = 1.0 if not mask[5]: atoms.info['Minim_Lattice_Fix'][1, 3] = 1.0 atoms.info['Minim_Lattice_Fix'][3, 1] = 1.0 opt = Minim(atoms, relax_positions=True, relax_cell=True, method='cg_n') else: from ase.optimize.precon.precon import Exp from ase.optimize.precon.lbfgs import PreconLBFGS precon = None if method.endswith('precon'): precon = Exp(3.0, apply_positions=precon_apply_positions, apply_cell=precon_apply_cell, recalc_mu=True) if method.startswith('lbfgs'): opt = PreconLBFGS(atoms, precon=precon, **kwargs) else: opt = FIRE(atoms, **kwargs) if traj_file is not None: traj = open(traj_file, 'w') def write_trajectory(): try: write(traj, atoms.atoms, format='extxyz') except: write(traj, atoms, format='extxyz') opt.attach(write_trajectory) if method != 'cg_n' and isinstance(opt, PreconLBFGS): opt.run(tol, max_steps, smax=stol) else: opt.run(tol, max_steps) if traj_file is not None: traj.close() elif method == 'model_optimizer': opt = model.Optimizer(atoms.atoms) opt.run() else: raise ValueError('unknown method %s!' % method) if isinstance(atoms, UnitCellFilter): return atoms.atoms else: return atoms
def parse(self, fat, coff=5.0, cotw=0.5, nmax=4, lmax=3, gs=0.5, cw=1.0, nocenter=[], noatom=[], kit=None): """ Takes a frame in the QUIPPY format and computes a list of its environments. """ # removes atoms that are to be ignored at = fat.copy() nol = [] for s in range(1, at.z.size + 1): if at.z[s] in noatom: nol.append(s) if len(nol) > 0: at.remove_atoms(nol) self.nmax = nmax self.lmax = lmax self.species = {} for z in at.z: if z in self.species: self.species[z] += 1 else: self.species[z] = 1 self.zspecies = self.species.keys() self.zspecies.sort() lspecies = 'n_species=' + str(len(self.zspecies)) + ' species_Z={ ' for z in self.zspecies: lspecies = lspecies + str(z) + ' ' lspecies = lspecies + '}' at.set_cutoff(coff) at.calc_connect() self.nenv = 0 for sp in self.species: if sp in nocenter: self.species[sp] = 0 continue # Option to skip some environments # first computes the descriptors of species that are present desc = quippy.descriptors.Descriptor( "soap central_weight=" + str(cw) + " covariance_sigma0=0.0 atom_sigma=" + str(gs) + " cutoff=" + str(coff) + " cutoff_transition_width=" + str(cotw) + " n_max=" + str(nmax) + " l_max=" + str(lmax) + ' ' + lspecies + ' Z=' + str(sp)) try: psp = np.asarray( desc.calc(at, desc.dimensions(), self.species[sp])).T except TypeError: psp = quippy.fzeros( (desc.dimensions(), desc.descriptor_sizes(at)[0])) desc.calc(at, descriptor_out=psp) psp = np.array(psp.T) # now repartitions soaps in environment descriptors lenv = [] for p in psp: nenv = environ(nmax, lmax, self.alchem) nenv.convert(sp, self.zspecies, p) lenv.append(nenv) self.env[sp] = lenv self.nenv += self.species[sp] # adds kit data if kit is None: kit = {} for sp in kit: if not sp in self.species: self.species[sp] = 0 self.env[sp] = [] for k in range(self.species[sp], kit[sp]): self.env[sp].append( environ(self.nmax, self.lmax, self.alchem, sp)) self.nenv += 1 self.species[sp] = kit[sp] self.zspecies = self.species.keys() self.zspecies.sort() # also compute the global (flattened) fingerprint self.globenv = environ(nmax, lmax, self.alchem) for k, se in self.env.items(): for e in se: self.globenv.add(e) # divides by the number of atoms in the structure for sij in self.globenv.soaps: self.globenv.soaps[sij] *= 1.0 / self.nenv