コード例 #1
0
ファイル: gaussian.py プロジェクト: puckvg/chemhelp
def calculate(atoms,
              coordinates,
              parameters=DEFAULT_PARAMETERS,
              label=None,
              write_only=True,
              n_threads=1,
              mem=1):
    """

    """

    if label is None:
        filename = "_tmp_" + "gaussian"
    else:
        filename = label

    comfile = filename + ".com"

    method = parameters["method"]
    basis = parameters["basis"]

    if parameters["opt"] is not None:
        calculator = GaussianCalculator(method=method,
                                        basis=basis,
                                        opt=parameters["opt"],
                                        Freq="()",
                                        label=filename)
    else:
        calculator = GaussianCalculator(method=method,
                                        basis=basis,
                                        label=filename)

    if parameters['force'] is None:
        del calculator.parameters['force']

    molecule = ase.Atoms(atoms, coordinates)
    molecule.set_calculator(calculator)

    calculator.write_input(molecule)

    if n_threads > 1:
        jstr = r"%nprocshared={:}\n%mem={:}GB".format(n_threads, mem)
        cmd = ["sed", "-i", "' 1 s/.*/" + jstr + r"\n&/'", comfile]
        cmd = " ".join(cmd)
        subprocess.call(cmd, shell=True)

    if write_only:
        return True

    # Calculate
    command = calculator.command
    command = command.replace("PREFIX", filename)
    run_gaussian(command)

    # calculator = molecule.get_calculator()
    # calculator.calculate()

    # molecule.get_potential_energy()

    help(calculator)

    # TODO Get All Properties
    properties = read_properties(filename + ".log")

    return properties
コード例 #2
0
ファイル: build.py プロジェクト: SINGROUP/cluskit
    def get_unique_clusters(self, eAA,eAB,eBB,cEA,cEB, typeA = None, typeB = None, ntypeB = None, n_clus = 1):
        """Gets n_clus clusters all constructed with the given parameters. It uses the Clusterer
        class to generate the clusters. First, 5 times as many nanoclusters are created,
        then they are reduced to n_clus, keeping the most dissimilar structures.

        The Scaffold.descriptor_setup attribute is used for the similarity metric,
        the Scaffold.bond_matrix attribute is used for the connectivity, and hence 
        interactions (eAA, eAB, eBB) to acquire a configuration suitable to the pseudo-energies

        Args:
            eAA (float): pseudo-energy of A-A interaction
            eAB (float): pseudo-energy of A-B interaction
            eBB (float): pseudo-energy of B-B interaction
            eEA (float): pseudo-energy of segregation of A into the core.
            eEB (float): pseudo-energy of segregation of A into the core.
            typeA (int): element of type A in atomic number of PSE.
            typeB (int): element of type B in atomic number of PSE.
            ntypeB (int): number of atoms of type B in cluster. This argument controls the composition.
            n_clus (int): number of cluster to be returned.

        Returns:
            list :  Most dissimilar clusters (cluskit.Cluster objects) at the given Pseudo-energy
                    parameters.         
        """

        # get default values where needed.

        if not typeA:
            typeA = self.default_A
        if  not typeB:
            typeB = self.default_B
        if  not ntypeB:
            ntypeB = self.default_n_B


        atoms = self.ase_object
        bond_matrix = self.bond_matrix
        desc = self.descriptor_setup
        # making sure atomic numbers are adapted by descriptor
        desc.atomic_numbers = [typeA, typeB]

        final_atoms_list = []
        
        positions = atoms.get_positions()
        coreEnergies = [ cEA, cEB ]
        
        atoms_list = []
        for i in range(0,n_clus*5):
            cluster = Clusterer(bond_matrix, positions, ntypeB, eAA, eAB, eBB, com=None, coreEnergies=coreEnergies)
        
            kT = self.evolve_temperature
            nsteps =  self.evolve_n_steps

            cluster.Evolve(kT, nsteps)
            actual_types = cluster.atomTypes.copy()
            actual_types[actual_types == 0] = typeA
            actual_types[actual_types == 1] = typeB
        
            atoms.set_atomic_numbers(actual_types)
            new_atoms = ase.Atoms(numbers=actual_types, positions=positions)
            new_atoms.info = {"eAA" : eAA, "eAB" : eAB, "eBB" : eBB, "cEA" : cEA, "cEB" : cEB}
            atoms_list.append(new_atoms)


        x = desc.create(atoms_list, n_jobs = 1,  positions=None, verbose=False)

        ranks = cluskit.cluster._rank_fps(x, K = None, greedy =False)
        for i in range(0,n_clus):
            cluskit_atoms = Cluster(atoms_list[ranks[i]])
            final_atoms_list.append(cluskit_atoms)

        cluster.Reset()
        return final_atoms_list
コード例 #3
0
    assert len(chemical_symbols) == coordinates.shape[0]
    atomic_numbers = {'H': 0, 'C': 6}
    mass = coordinates.new_tensor(
        [atomic_masses[atomic_numbers[s]] for s in chemical_symbols])
    inv_r = 1 / coordinates.norm(dim=1)
    potential_energies = -mass * inv_r
    return potential_energies.sum()


###############################################################################
# with this potential, we could define an ASE calculator:
calculator = md.Calculator(gravity)

###############################################################################
# Now let's create two atoms, initially at (1, 0, 0) and (2, 0, 0)
planets = ase.Atoms('CH', [[1, 0, 0], [2, 0, 0]])

###############################################################################
# For the purpose of demonstration, we make these two atoms's trajector a perfect
# circle at the XY plane. To do so, we need to carefully set the initial velocity.
# It is not hard to get that, the velocity is :math:`v = \sqrt{\frac{GM}{r}}`.
planets.set_velocities([[0, 1, 0], [0, 1 / math.sqrt(2), 0]])
planets.set_calculator(calculator)

###############################################################################
# Now we can start the dynamics:
X1 = []
Y1 = []
Z1 = []
X2 = []
Y2 = []
コード例 #4
0
import numpy
import ase
import tsase
import time

head = "%5s %16s" % ("Natom", "ljocl speedup")
print head
print "=" * len(head)

ljocl = tsase.calculators.ljocl()
lj = tsase.calculators.lj()

for N in range(10, 400, 20):
    a = ase.Atoms(["H"]*N, positions=numpy.random.normal(0, 1, (N,3)))
    a.center(100)
    b = a.copy()
    a.set_calculator(ljocl)
    b.set_calculator(lj)

    Q = 10
    tcl = []
    t = []
    for i in range(Q):
        r = numpy.random.normal(0, 1, (N,3))
        a.set_positions(r.copy())
        b.set_positions(r.copy())
        t0 = time.time()
        fa = a.get_forces()
        tcl.append(time.time() - t0)
        t0 = time.time()
        fb = b.get_forces()
コード例 #5
0
#!/usr/bin/env python
import ase
import numpy as np
import tsase
from random import random
from sys import argv

cell_size = 5.0

atoms = ase.Atoms(argv[1])
atoms.set_cell((cell_size, cell_size, cell_size))
atoms.set_positions(np.random.random((len(atoms), 3)) * 10.0)

tsase.io.write_con('matter1.con', atoms)

atoms.positions += random() * 3.0
atoms.rattle(stdev=.915, seed=int(random() * 2**30))
atoms.rotate_euler(center='COM',
                   phi=random() * np.pi * 2,
                   theta=random() * np.pi,
                   psi=random() * np.pi / 2)

tsase.io.write_con('matter2.con', atoms)
コード例 #6
0
    def write_conformer_file(self, conformer, include_rotors=True):
        """
        A method to write Arkane files for a single Conformer object

        Parameters:
        - conformer (Conformer): a Conformer object that you want to write an Arkane file for
        - scratch (str): the directory where you want to write arkane files to, there should be a 'species/SMILES/' subdirectory

        Returns:
        - None
        """
        label = conformer.smiles

        if not os.path.exists(os.path.join(self.directory, "species", label, label + ".log")):
            logging.info("There is no lowest energy conformer file...")
            return False

        if os.path.exists(os.path.join(self.directory, "species", label, label + '.py')):
            PATH = os.path.join(self.directory, "species", label, label + '.py')
            logging.info(f"Species input file already written... Renaming it {PATH} and creating a new one.")
            shutil.move(
                PATH,
                PATH.replace('py', 'old.py')
            )

        parser = cclib.io.ccread(os.path.join(
            self.directory, "species", label, label + ".log"), loglevel=logging.ERROR)
        symbol_dict = {
            35: "Br",
            17: "Cl",
            9:  "F",
            8:  "O",
            7:  "N",
            6:  "C",
            1:  "H",
        }

        atoms = []

        for atom_num, coords in zip(parser.atomnos, parser.atomcoords[-1]):
            atoms.append(ase.Atom(symbol=symbol_dict[atom_num], position=coords))

        conformer._ase_molecule = ase.Atoms(atoms)
        conformer.update_coords_from("ase")
        mol = conformer.rmg_molecule
        output = ['#!/usr/bin/env python',
                  '# -*- coding: utf-8 -*-', ]

        output += ["",
                   f"spinMultiplicity = {conformer.rmg_molecule.multiplicity}",
                   ""]

        output += ["energy = {", f"    '{self.model_chemistry}': Log('{label}.log'),", "}", ""]  # fix this

        output += [f"geometry = Log('{label}.log')", ""]

        output += [
            f"frequencies = Log('{label}.log')", ""]

        if include_rotors:
            output += ["rotors = ["]
            if len(conformer.torsions) ==0:
                conformer.get_molecules()
                conformer.get_geometries() 
            for torsion in conformer.torsions:
                output += [self.get_rotor_info(conformer, torsion)]
        output += ["]"]
        
        input_string = ""

        for t in output:
            input_string += t + "\n"

        with open(os.path.join(self.directory, "species", label, label + '.py'), "w") as f:
            f.write(input_string)
        return True
コード例 #7
0
    my_comm.broadcast(cell, 0)
    my_comm.broadcast(pbc, 0)

    if pbc == 1:
        pbc = True
    else:
        pbc = False

    if first_time:
        cell.shape = (3, 3)
        positions.shape = (natoms, 3)

        atomic_symbols = ''.join(
            [ase.chemical_symbols[int(i)] for i in atomic_numbers])
        atoms = ase.Atoms(atomic_symbols,
                          positions=positions,
                          cell=cell,
                          pbc=pbc)

        calc = create_gpaw(my_comm)
        atoms.set_calculator(calc)
    else:
        atoms.set_positions(positions)

    calculation_failed = numpy.array((0, ), 'i')
    try:
        f1 = atoms.get_forces()
        e1 = atoms.get_potential_energy()
        e1 = numpy.array([
            e1,
        ])
    except gpaw.KohnShamConvergenceError:
コード例 #8
0
ファイル: castep.py プロジェクト: essil1/ase-laser
def read_castep_md(fd,
                   index=None,
                   return_scalars=False,
                   units=units_CODATA2002):
    """Reads a .md file written by a CASTEP MolecularDynamics task
    and returns the trajectory stored therein as a list of atoms object.

    Note that the index argument has no effect as of now."""

    from ase.calculators.singlepoint import SinglePointCalculator

    factors = {
        't': units['t0'] * 1E15,  # fs
        'E': units['Eh'],  # eV
        'T': units['Eh'] / units['kB'],
        'P': units['Eh'] / units['a0']**3 * units['Pascal'],
        'h': units['a0'],
        'hv': units['a0'] / units['t0'],
        'S': units['Eh'] / units['a0']**3,
        'R': units['a0'],
        'V': np.sqrt(units['Eh'] / units['me']),
        'F': units['Eh'] / units['a0']
    }

    # fd is closed by embracing read() routine
    lines = fd.readlines()

    l = 0
    while 'END header' not in lines[l]:
        l += 1
    l_end_header = l
    lines = lines[l_end_header + 1:]
    times = []
    energies = []
    temperatures = []
    pressures = []
    traj = []

    # Initialization
    time = None
    Epot = None
    Ekin = None
    EH = None
    temperature = None
    pressure = None
    symbols = None
    positions = None
    cell = None
    velocities = None
    symbols = []
    positions = []
    velocities = []
    forces = []
    cell = np.eye(3)
    cell_velocities = []
    stress = []

    for (l, line) in enumerate(lines):
        fields = line.split()
        if len(fields) == 0:
            if l != 0:
                times.append(time)
                energies.append([Epot, EH, Ekin])
                temperatures.append(temperature)
                pressures.append(pressure)
                atoms = ase.Atoms(symbols=symbols,
                                  positions=positions,
                                  cell=cell)
                atoms.set_velocities(velocities)
                if len(stress) == 0:
                    atoms.set_calculator(
                        SinglePointCalculator(atoms=atoms,
                                              energy=Epot,
                                              forces=forces))
                else:
                    atoms.set_calculator(
                        SinglePointCalculator(atoms=atoms,
                                              energy=Epot,
                                              forces=forces,
                                              stress=stress))
                traj.append(atoms)
            symbols = []
            positions = []
            velocities = []
            forces = []
            cell = []
            cell_velocities = []
            stress = []
            continue
        if len(fields) == 1:
            time = factors['t'] * float(fields[0])
            continue

        if fields[-1] == 'E':
            E = [float(x) for x in fields[0:3]]
            Epot, EH, Ekin = [factors['E'] * Ei for Ei in E]
            continue

        if fields[-1] == 'T':
            temperature = factors['T'] * float(fields[0])
            continue

        # only printed in case of variable cell calculation or calculate_stress
        # explicitly requested
        if fields[-1] == 'P':
            pressure = factors['P'] * float(fields[0])
            continue
        if fields[-1] == 'h':
            h = [float(x) for x in fields[0:3]]
            cell.append([factors['h'] * hi for hi in h])
            continue

        # only printed in case of variable cell calculation
        if fields[-1] == 'hv':
            hv = [float(x) for x in fields[0:3]]
            cell_velocities.append([factors['hv'] * hvi for hvi in hv])
            continue

        # only printed in case of variable cell calculation
        if fields[-1] == 'S':
            S = [float(x) for x in fields[0:3]]
            stress.append([factors['S'] * Si for Si in S])
            continue
        if fields[-1] == 'R':
            symbols.append(fields[0])
            R = [float(x) for x in fields[2:5]]
            positions.append([factors['R'] * Ri for Ri in R])
            continue
        if fields[-1] == 'V':
            V = [float(x) for x in fields[2:5]]
            velocities.append([factors['V'] * Vi for Vi in V])
            continue
        if fields[-1] == 'F':
            F = [float(x) for x in fields[2:5]]
            forces.append([factors['F'] * Fi for Fi in F])
            continue

    if index is None:
        pass
    else:
        traj = traj[index]

    if return_scalars:
        data = [times, energies, temperatures, pressures]
        return data, traj
    else:
        return traj
コード例 #9
0
ファイル: castep.py プロジェクト: essil1/ase-laser
def read_castep_cell(fd,
                     index=None,
                     calculator_args={},
                     find_spg=False,
                     units=units_CODATA2002):
    """Read a .cell file and return an atoms object.
    Any value found that does not fit the atoms API
    will be stored in the atoms.calc attribute.

    By default, the Castep calculator will be tolerant and in the absence of a
    castep_keywords.json file it will just accept all keywords that aren't 
    automatically parsed.
    """

    from ase.calculators.castep import Castep

    cell_units = {  # Units specifiers for CASTEP
        'bohr': units_CODATA2002['a0'],
        'ang': 1.0,
        'm': 1e10,
        'cm': 1e8,
        'nm': 10,
        'pm': 1e-2
    }

    calc = Castep(**calculator_args)

    if calc.cell.castep_version == 0 and calc._kw_tol < 3:
        # No valid castep_keywords.json was found
        print('read_cell: Warning - Was not able to validate CASTEP input.')
        print('           This may be due to a non-existing '
              '"castep_keywords.json"')
        print('           file or a non-existing CASTEP installation.')
        print('           Parsing will go on but keywords will not be '
              'validated and may cause problems if incorrect during a CASTEP '
              'run.')

    celldict = read_freeform(fd)

    def parse_blockunit(line_tokens, blockname):
        u = 1.0
        if len(line_tokens[0]) == 1:
            usymb = line_tokens[0][0].lower()
            u = cell_units.get(usymb, 1)
            if usymb not in cell_units:
                warnings.warn(
                    ('read_cell: Warning - ignoring invalid '
                     'unit specifier in %BLOCK {0} '
                     '(assuming Angstrom instead)').format(blockname))
            line_tokens = line_tokens[1:]
        return u, line_tokens

    # Arguments to pass to the Atoms object at the end
    aargs = {'pbc': True}

    # Start by looking for the lattice
    lat_keywords = [w in celldict for w in ('lattice_cart', 'lattice_abc')]
    if all(lat_keywords):
        warnings.warn('read_cell: Warning - two lattice blocks present in the'
                      ' same file. LATTICE_ABC will be ignored')
    elif not any(lat_keywords):
        raise ValueError('Cell file must contain at least one between '
                         'LATTICE_ABC and LATTICE_CART')

    if 'lattice_abc' in celldict:

        lines = celldict.pop('lattice_abc').split('\n')
        line_tokens = [l.split() for l in lines]

        u, line_tokens = parse_blockunit(line_tokens, 'lattice_abc')

        if len(line_tokens) != 2:
            warnings.warn('read_cell: Warning - ignoring additional '
                          'lines in invalid %BLOCK LATTICE_ABC')

        abc = [float(p) * u for p in line_tokens[0][:3]]
        angles = [float(phi) for phi in line_tokens[1][:3]]

        aargs['cell'] = cellpar_to_cell(abc + angles)

    if 'lattice_cart' in celldict:

        lines = celldict.pop('lattice_cart').split('\n')
        line_tokens = [l.split() for l in lines]

        u, line_tokens = parse_blockunit(line_tokens, 'lattice_cart')

        if len(line_tokens) != 3:
            warnings.warn('read_cell: Warning - ignoring more than '
                          'three lattice vectors in invalid %BLOCK '
                          'LATTICE_CART')

        aargs['cell'] = [[float(x) * u for x in lt[:3]] for lt in line_tokens]

    # Now move on to the positions
    pos_keywords = [w in celldict for w in ('positions_abs', 'positions_frac')]

    if all(pos_keywords):
        warnings.warn('read_cell: Warning - two lattice blocks present in the'
                      ' same file. POSITIONS_FRAC will be ignored')
        del celldict['positions_frac']
    elif not any(pos_keywords):
        raise ValueError('Cell file must contain at least one between '
                         'POSITIONS_FRAC and POSITIONS_ABS')

    aargs['symbols'] = []
    pos_type = 'positions'
    pos_block = celldict.pop('positions_abs', None)
    if pos_block is None:
        pos_type = 'scaled_positions'
        pos_block = celldict.pop('positions_frac', None)
    aargs[pos_type] = []

    lines = pos_block.split('\n')
    line_tokens = [l.split() for l in lines]

    if not 'scaled' in pos_type:
        u, line_tokens = parse_blockunit(line_tokens, 'positions_abs')
    else:
        u = 1.0

    # Here we extract all the possible additional info
    # These are marked by their type

    add_info = {
        'SPIN': (float, 0.0),  # (type, default)
        'MAGMOM': (float, 0.0),
        'LABEL': (str, 'NULL')
    }
    add_info_arrays = dict((k, []) for k in add_info)

    def parse_info(raw_info):

        re_keys = (r'({0})\s*[=:\s]{{1}}\s'
                   r'*([^\s]*)').format('|'.join(add_info.keys()))
        # Capture all info groups
        info = re.findall(re_keys, raw_info)
        info = {g[0]: add_info[g[0]][0](g[1]) for g in info}
        return info

    # Array for custom species (a CASTEP special thing)
    # Usually left unused
    custom_species = None

    for tokens in line_tokens:
        # Now, process the whole 'species' thing
        spec_custom = tokens[0].split(':', 1)
        elem = spec_custom[0]
        if len(spec_custom) > 1 and custom_species is None:
            # Add it to the custom info!
            custom_species = list(aargs['symbols'])
        if custom_species is not None:
            custom_species.append(tokens[0])
        aargs['symbols'].append(elem)
        aargs[pos_type].append([float(p) * u for p in tokens[1:4]])
        # Now for the additional information
        info = ' '.join(tokens[4:])
        info = parse_info(info)
        for k in add_info:
            add_info_arrays[k] += [info.get(k, add_info[k][1])]

    # Now on to the species potentials...
    if 'species_pot' in celldict:
        lines = celldict.pop('species_pot').split('\n')
        line_tokens = [l.split() for l in lines]

        for tokens in line_tokens:
            if len(tokens) == 1:
                # It's a library
                all_spec = (set(custom_species) if custom_species is not None
                            else set(aargs['symbols']))
                for s in all_spec:
                    calc.cell.species_pot = (s, tokens[0])
            else:
                calc.cell.species_pot = tuple(tokens[:2])

    # Ionic constraints
    raw_constraints = {}

    if 'ionic_constraints' in celldict:
        lines = celldict.pop('ionic_constraints').split('\n')
        line_tokens = [l.split() for l in lines]

        for tokens in line_tokens:
            if not len(tokens) == 6:
                continue
            _, species, nic, x, y, z = tokens
            # convert xyz to floats
            x = float(x)
            y = float(y)
            z = float(z)

            nic = int(nic)
            if (species, nic) not in raw_constraints:
                raw_constraints[(species, nic)] = []
            raw_constraints[(species, nic)].append(np.array([x, y, z]))

    # Symmetry operations
    if 'symmetry_ops' in celldict:
        lines = celldict.pop('symmetry_ops').split('\n')
        line_tokens = [l.split() for l in lines]

        # Read them in blocks of four
        blocks = np.array(line_tokens).astype(float)
        if (len(blocks.shape) != 2 or blocks.shape[1] != 3
                or blocks.shape[0] % 4 != 0):
            warnings.warn('Warning: could not parse SYMMETRY_OPS'
                          ' block properly, skipping')
        else:
            blocks = blocks.reshape((-1, 4, 3))
            rotations = blocks[:, :3]
            translations = blocks[:, 3]

            # Regardless of whether we recognize them, store these
            calc.cell.symmetry_ops = (rotations, translations)

    # Anything else that remains, just add it to the cell object:
    for k, val in celldict.items():
        try:
            calc.cell.__setattr__(k, val)
        except Exception:
            raise RuntimeError('Problem setting calc.cell.%s = %s' % (k, val))

    # Get the relevant additional info
    aargs['magmoms'] = np.array(add_info_arrays['SPIN'])
    # SPIN or MAGMOM are alternative keywords
    aargs['magmoms'] = np.where(aargs['magmoms'] != 0, aargs['magmoms'],
                                add_info_arrays['MAGMOM'])
    labels = np.array(add_info_arrays['LABEL'])

    aargs['calculator'] = calc

    atoms = ase.Atoms(**aargs)

    # Spacegroup...
    if find_spg:
        # Try importing spglib
        try:
            import spglib
        except ImportError:
            try:
                from pyspglib import spglib
            except ImportError:
                # spglib is not present
                warnings.warn('spglib not found installed on this system - '
                              'automatic spacegroup detection is not possible')
                spglib = None

        if spglib is not None:
            symmd = spglib.get_symmetry_dataset(atoms)
            atoms_spg = Spacegroup(int(symmd['number']))
            atoms.info['spacegroup'] = atoms_spg

    atoms.new_array('castep_labels', labels)
    if custom_species is not None:
        atoms.new_array('castep_custom_species', np.array(custom_species))

    fixed_atoms = []
    constraints = []
    for (species, nic), value in raw_constraints.items():
        absolute_nr = atoms.calc._get_absolute_number(species, nic)
        if len(value) == 3:
            # Check if they are linearly independent
            if np.linalg.det(value) == 0:
                print('Error: Found linearly dependent constraints attached '
                      'to atoms %s' % (absolute_nr))
                continue
            fixed_atoms.append(absolute_nr)
        elif len(value) == 2:
            direction = np.cross(value[0], value[1])
            # Check if they are linearly independent
            if np.linalg.norm(direction) == 0:
                print('Error: Found linearly dependent constraints attached '
                      'to atoms %s' % (absolute_nr))
                continue
            constraint = ase.constraints.FixedLine(a=absolute_nr,
                                                   direction=direction)
            constraints.append(constraint)
        elif len(value) == 1:
            constraint = ase.constraints.FixedPlane(a=absolute_nr,
                                                    direction=np.array(
                                                        value[0],
                                                        dtype=np.float32))
            constraints.append(constraint)
        else:
            print('Error: Found %s statements attached to atoms %s' %
                  (len(value), absolute_nr))

    # we need to sort the fixed atoms list in order not to raise an assertion
    # error in FixAtoms
    if fixed_atoms:
        constraints.append(
            ase.constraints.FixAtoms(indices=sorted(fixed_atoms)))
    if constraints:
        atoms.set_constraint(constraints)

    atoms.calc.atoms = atoms
    atoms.calc.push_oldstate()

    return atoms
コード例 #10
0
    def createDatabaseFromFeatureset(database, featureFile, length, threshold=20, mode=None,
                                     label_type=None, classes=None, n_classes=None, oversample=False, sample_factor=50,
                                     pbc=(1, 1, 1), noProtons=False):

        featureFile = h5py.File(featureFile)

        indexes = np.arange(length)
        np.random.shuffle(indexes)

        labels = []
        for i in indexes:
            labels.append(featureFile[str(i) + '/label'].value)

        hist = np.histogram(labels, 25)
        maxdist = []
        for i in indexes:
            print(i)
            # Add Ligand
            atom_list = []
            property_list = []
            ligcoords = featureFile[str(i) + '/ligcoords'].value

            ligAtNum = np.char.decode(featureFile[str(i) + '/lignum'].value)
            ligFeatures = featureFile[str(i) + '/lig'].value
            x = ligcoords[:, 0].mean()
            y = ligcoords[:, 1].mean()
            z = ligcoords[:, 2].mean()
            mean = np.array([x, y, z])
            for j in range(len(ligcoords)):
                maxdist.append(np.linalg.norm(ligcoords[j] - mean))
                if noProtons:
                    if ligAtNum[j] != 'H':
                        atom_list.append(ase.Atom(ligAtNum[j], ligcoords[j]))
                        property_list.append(ligFeatures[j])
                else:
                    atom_list.append(ase.Atom(ligAtNum[j], ligcoords[j]))
                    property_list.append(ligFeatures[j])

            #Add Protein-Atoms in Cutoff-Range
            protcoords = featureFile[str(i) + '/protcoords'].value
            protAtNum = np.char.decode(featureFile[str(i) + '/protnum'].value)
            protFeatures = featureFile[str(i) + '/prot'].value

            for j in range(len(protcoords)):
                dist = np.linalg.norm(protcoords[j] - mean)
                if dist <= threshold:
                    if noProtons:
                        if protAtNum[j] != 'H':
                            atom_list.append(ase.Atom(protAtNum[j], protcoords[j]))
                            property_list.append(protFeatures[j])
                    else:
                        atom_list.append(ase.Atom(protAtNum[j], protcoords[j]))
                        property_list.append(protFeatures[j])

            # Create Complex
            complexe = [ase.Atoms(atom_list, pbc=pbc)]

            label = featureFile[str(i) + '/label'].value

            affi = PreprocessingSchnet.classLabel(label, mode, label_type, classes=classes, n_classes=n_classes,
                                                  min_v=np.min(labels), max_v=np.max(labels))
            affi[0]['props'] = np.array(property_list)

            if not oversample:
                database.add_systems(complexe, affi)
            else:
                classn = np.zeros(25)
                for j in range(len(hist[1]) - 1):
                    if j == len(hist[1]) - 2:
                        if hist[1][j] <= labels[i] <= hist[1][j + 1]:
                            classn[j] = 1
                    else:
                        if hist[1][j] <= labels[i] < hist[1][j + 1]:
                            classn[j] = 1

                if np.unique(classn, return_counts=True)[1][1] != 1:
                    print('warning -> Onehot is more than one')
                    print(classn)

                ind = np.argmax(classn)
                if hist[0][ind] == 0:
                    print('Warning -> zero-sample')
                    continue

                n_sampling = int(np.ceil((1 / hist[0][ind]) * sample_factor * 25))
                print(i, len(indexes), n_sampling, ind)
                for _ in range(n_sampling):
                    database.add_systems(complexe, affi)
        print(np.max(maxdist))
コード例 #11
0
    def createDatabase(database, threshold=20, data_path='../Data/train/',
                       index_path='../Data/index/INDEX_refined_data.2016',
                       ligand_end='_ligand.sdf', alt_ligand_end='_ligand.pdb', prot_end='_pocket.pdb', mode=None,
                       label_type=None, classes=None, n_classes=None, oversample=False, sample_factor=50,
                       pbc=(1, 1, 1)):

        ligandPaths = PreprocessingSchnet.getAllMolPaths(data_path, ligand_end)
        ligandPaths2 = PreprocessingSchnet.getAllMolPaths(data_path, alt_ligand_end)
        proteinPaths = PreprocessingSchnet.getAllMolPaths(data_path, prot_end)
        labels = PreprocessingSchnet.getLabels(data_path, index_path)

        indexes = np.arange(len(proteinPaths[0]))
        np.random.shuffle(indexes)

        hist = np.histogram(labels, 25)

        for i in indexes:
            atom_list = []
            try:
                atoms2 = read(ligandPaths[0][i], format='sdf')
                for at in atoms2:
                    atom_list.append(at)
                x = atoms2.positions[:, 0].mean()
                y = atoms2.positions[:, 1].mean()
                z = atoms2.positions[:, 2].mean()
            except:
                try:
                    atoms3 = read(ligandPaths2[0][i], format='proteindatabank')
                    x = atoms3.positions[:, 0].mean()
                    y = atoms3.positions[:, 1].mean()
                    z = atoms3.positions[:, 2].mean()
                    for at in atoms3:
                        atom_list.append(at)
                except:
                    print('Does not work')
                    continue

            affi = PreprocessingSchnet.classLabel(labels[i], mode, label_type, classes=classes, n_classes=n_classes,
                                                  min_v=np.min(labels), max_v=np.max(labels))

            mean = np.array([x, y, z])

            atoms = read(proteinPaths[0][i], format='proteindatabank')

            for at in atoms:
                dist = np.linalg.norm(at.position - mean)
                if dist <= threshold:
                    atom_list.append(at)

            complexe = [ase.Atoms(atom_list, pbc=pbc)]

            if not oversample:
                database.add_systems(complexe, affi)
            else:
                classn = np.zeros(25)
                for j in range(len(hist[1]) - 1):
                    if j == len(hist[1]) - 2:
                        if hist[1][j] <= labels[i] <= hist[1][j + 1]:
                            classn[j] = 1
                    else:
                        if hist[1][j] <= labels[i] < hist[1][j + 1]:
                            classn[j] = 1

                if np.unique(classn, return_counts=True)[1][1] != 1:
                    print('warning -> Onehot is more than one')
                    print(classn)

                ind = np.argmax(classn)
                if hist[0][ind] == 0:
                    print('Warning -> zero-sample')
                    continue

                n_sampling = int(np.ceil((1 / hist[0][ind]) * sample_factor * 25))
                print(i, len(indexes), n_sampling, ind)
                for _ in range(n_sampling):
                    database.add_systems(complexe, affi)
コード例 #12
0
 def test_two_triangles(self):
     a = ase.Atoms('4Xe', [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]])
     a.center(vacuum=10)
     c1, c2, c3 = find_triangles_2d(a, 1.1)
     self.assertArrayAlmostEqual(np.transpose(
         [c1, c2, c3]), [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]])
コード例 #13
0
ファイル: test_descriptor.py プロジェクト: juukaup/QUIP
    def setUp(self):
        at_h2 = ase.Atoms('H2', positions=[[0, 0, 0], [0, 0, 1]])
        self.quip_at_h2 = quippy.convert.ase_to_quip(at_h2, None)

        # set up the atoms object for descriptor
        at_C2H = ase.build.bulk('C', 'diamond', 3.5)
        at_C2H.append(ase.Atom('H', (0.2, 0.2, 0.1)))
        self.at_C2H = at_C2H

        self.ref_shapes = {
            'cutoff': (3, ),
            'grad_index_0based': (119, 2),
            'cutoff_grad': (119, 3),
            'descriptor_index_0based': (3, 1),
            'descriptor': (3, 51),
            'grad': (119, 3, 51)
        }

        self.grad_array_ref = np.array(
            [[[
                1.13442398e-01, -2.53763797e-04, 5.43671281e-06,
                -5.32119600e-04, -1.22629493e-04, -2.54495213e-02,
                -2.35215689e-03, 4.75523231e-05, -5.02800402e-03,
                -1.08561620e-03, -1.95363216e-01, -1.08721467e-02,
                1.99774343e-04, -2.37566525e-02, -4.73489941e-03,
                1.37877959e-01, 1.29099558e-03, -6.02464018e-05,
                1.60898786e-03, 1.20934568e-03, -6.20758739e-03,
                8.93220421e-03, -5.05352272e-04, 1.07208747e-02,
                8.71357869e-03, 8.31695444e-02, 5.34043119e-04,
                -7.42710259e-04, -2.67193323e-03, 3.30851049e-03,
                -1.26713676e-03, -1.17631427e-05, 5.17499647e-07,
                -1.41057631e-05, -1.10223791e-05, 5.22142751e-06,
                -8.09832380e-05, 4.52441394e-06, -9.39773456e-05,
                -7.95184362e-05, -1.07805976e-03, -2.24556230e-06,
                1.11276662e-05, 3.32533921e-05, -4.37921512e-05,
                6.98018370e-06, -1.28322312e-08, -8.24126629e-08,
                -2.07186884e-07, 2.89555893e-07, 0.00000000e+00
            ],
              [
                  1.13442398e-01, -2.53763797e-04, 5.43671281e-06,
                  -5.32119600e-04, -1.22629493e-04, -2.54495213e-02,
                  -2.35215689e-03, 4.75523231e-05, -5.02800402e-03,
                  -1.08561620e-03, -1.95363216e-01, -1.08721467e-02,
                  1.99774343e-04, -2.37566525e-02, -4.73489941e-03,
                  1.37877959e-01, 1.29099558e-03, -6.02464018e-05,
                  1.60898786e-03, 1.20934568e-03, -6.20758739e-03,
                  8.93220421e-03, -5.05352272e-04, 1.07208747e-02,
                  8.71357869e-03, 8.31695444e-02, 5.34043119e-04,
                  -7.42710259e-04, -2.67193323e-03, 3.30851049e-03,
                  -1.26713676e-03, -1.17631427e-05, 5.17499647e-07,
                  -1.41057631e-05, -1.10223791e-05, 5.22142751e-06,
                  -8.09832380e-05, 4.52441394e-06, -9.39773456e-05,
                  -7.95184362e-05, -1.07805976e-03, -2.24556230e-06,
                  1.11276662e-05, 3.32533921e-05, -4.37921512e-05,
                  6.98018370e-06, -1.28322312e-08, -8.24126629e-08,
                  -2.07186884e-07, 2.89555893e-07, 0.00000000e+00
              ],
              [
                  5.62089907e-02, -1.38457839e-04, 4.28026688e-05,
                  -5.04141229e-04, 4.35011295e-06, -1.88932655e-02,
                  -1.32007005e-03, 3.73714437e-04, -4.68615400e-03,
                  7.28182168e-05, -1.07136020e-01, -6.25813743e-03,
                  1.62136592e-03, -2.17410718e-02, 5.17941589e-04,
                  7.80934164e-02, 2.83529951e-04, -4.81867697e-04,
                  2.41304526e-03, 3.50601925e-04, 3.36108042e-03,
                  2.47429899e-03, -3.13883417e-03, 1.64861857e-02,
                  2.66528855e-03, 5.20708919e-02, 4.27468889e-03,
                  1.38348816e-03, -6.99894308e-04, 2.08750362e-03,
                  -7.13087093e-04, -2.60254817e-06, 4.33984399e-06,
                  -2.18638435e-05, -3.08862541e-06, -5.27388207e-05,
                  -2.24182986e-05, 2.84397065e-05, -1.49421137e-04,
                  -2.36711867e-05, -6.69317942e-04, -5.21238036e-05,
                  -1.56666324e-05, 8.45214919e-06, -2.82017984e-05,
                  4.29940247e-06, 3.18081205e-07, 8.57747996e-08,
                  -5.08881135e-08, 1.89987523e-07, 0.00000000e+00
              ]],
             [[
                 -2.14158073e-02, 3.79218050e-06, 7.23513681e-07,
                 -7.84788979e-05, -3.50583379e-05, -2.89216230e-02,
                 9.42399116e-05, 1.36864275e-05, -8.59430651e-04,
                 -3.89273192e-04, -1.85999243e-02, 6.81736736e-04,
                 8.43225045e-05, -4.63094307e-03, -2.14568411e-03,
                 2.44466455e-02, 6.58481915e-04, 7.63834099e-05,
                 -1.11484001e-03, -5.59330752e-04, 3.33924676e-02,
                 3.63485980e-03, 2.89678116e-04, -7.41765752e-03,
                 -4.14043902e-03, 4.03743618e-02, -7.19821888e-03,
                 -1.90522488e-03, 1.93777581e-03, -2.42525548e-03,
                 -1.95582360e-04, -5.54976344e-06, -6.34720740e-07,
                 9.35790219e-06, 4.82053557e-06, -2.71507207e-04,
                 -3.05150718e-05, -2.25446976e-06, 6.22363378e-05,
                 3.57353505e-05, -4.90105993e-04, 8.71726430e-05,
                 2.41405489e-05, -2.33132462e-05, 3.01491476e-05,
                 2.97118096e-06, -5.27727787e-07, -1.52394728e-07,
                 1.40208541e-07, -1.87271542e-07, 0.00000000e+00
             ],
              [
                  -7.13262538e-03, 1.21559754e-06, 2.15802281e-07,
                  -2.57498259e-05, -1.21494351e-05, -9.63070849e-03,
                  3.03991732e-05, 4.02761567e-06, -2.77321872e-04,
                  -1.41451530e-04, -6.19188729e-03, 2.20220478e-04,
                  2.46353751e-05, -1.47508784e-03, -8.10301233e-04,
                  8.15552197e-03, 2.13260519e-04, 2.21561296e-05,
                  -3.12256451e-04, -2.68959761e-04, 1.11385465e-02,
                  1.17755131e-03, 8.23436392e-05, -2.08160223e-03,
                  -2.00118852e-03, 1.34618090e-02, -2.32850330e-03,
                  -5.66277047e-04, 5.10435977e-04, -1.24908308e-03,
                  -6.52530791e-05, -1.79946963e-06, -1.85135523e-07,
                  2.64035551e-06, 2.27137694e-06, -9.05709849e-05,
                  -9.89870315e-06, -6.44420881e-07, 1.75971204e-05,
                  1.69397481e-05, -1.63414980e-04, 2.82144027e-05,
                  7.19229070e-06, -6.15369080e-06, 1.53724156e-05,
                  9.90684444e-07, -1.70902096e-07, -4.55229079e-08,
                  3.71006568e-08, -9.44582726e-08, 0.00000000e+00
              ],
              [
                  -7.13261775e-03, 1.25063479e-06, 2.30790120e-07,
                  -2.57290744e-05, -1.21030381e-05, -9.63069593e-03,
                  3.11208306e-05, 4.34616488e-06, -2.76819936e-04,
                  -1.40330434e-04, -6.19187696e-03, 2.25093240e-04,
                  2.67853396e-05, -1.47054829e-03, -8.02378204e-04,
                  8.15553045e-03, 2.17634857e-04, 2.41402926e-05,
                  -3.08810890e-04, -2.61269181e-04, 1.11385564e-02,
                  1.19974567e-03, 9.21255151e-05, -2.04750454e-03,
                  -1.96097972e-03, 1.34618137e-02, -2.39215608e-03,
                  -5.97462401e-04, 5.95062268e-04, -1.35135072e-03,
                  -6.52531544e-05, -1.83482509e-06, -2.01046527e-07,
                  2.61227166e-06, 2.20985030e-06, -9.05710725e-05,
                  -1.00752907e-05, -7.19750335e-07, 1.73150421e-05,
                  1.66247639e-05, -1.63415039e-04, 2.89740666e-05,
                  7.58164016e-06, -7.17685195e-06, 1.66062942e-05,
                  9.90684816e-07, -1.75428638e-07, -4.79369204e-08,
                  4.32667739e-08, -1.01876902e-07, 0.00000000e+00
              ]]])
コード例 #14
0
    def __getitem__(self, i=-1):
        self._open()

        if isinstance(i, slice):
            return [self[j] for j in range(*i.indices(self._len()))]

        N = self._len()
        if 0 <= i < N:
            # Non-periodic boundaries have cell_length == 0.0
            cell_lengths = \
                np.array(self.nc.variables[self._cell_lengths_var][i][:])
            pbc = np.abs(cell_lengths > 1e-6)

            # Do we have a cell origin?
            if self._has_variable(self._cell_origin_var):
                origin = np.array(
                    self.nc.variables[self._cell_origin_var][i][:])
            else:
                origin = np.zeros([3], dtype=float)

            # Do we have an index variable?
            if self._has_variable(self.index_var):
                index = np.array(self.nc.variables[self.index_var][i][:]) + \
                    self.index_offset
            else:
                index = np.arange(self.n_atoms)

            # Read element numbers
            self.numbers = self._get_data(self._numbers_var,
                                          i,
                                          index,
                                          exc=False)
            if self.numbers is None:
                self.numbers = np.ones(self.n_atoms, dtype=int)
            if self.types_to_numbers is not None:
                self.numbers = self.types_to_numbers[self.numbers]
            self.masses = atomic_masses[self.numbers]

            # Read positions
            positions = self._get_data(self._positions_var, i, index)

            # Determine cell size for non-periodic directions from shrink
            # wrapped cell.
            for dim in np.arange(3)[np.logical_not(pbc)]:
                origin[dim] = positions[:, dim].min()
                cell_lengths[dim] = positions[:, dim].max() - origin[dim]

            # Construct cell shape from cell lengths and angles
            cell = cellpar_to_cell(
                list(cell_lengths) +
                list(self.nc.variables[self._cell_angles_var][i]))

            # Compute momenta from velocities (if present)
            momenta = self._get_data(self._velocities_var, i, index, exc=False)
            if momenta is not None:
                momenta *= self.masses.reshape(-1, 1)

            # Fill info dict with additional data found in the NetCDF file
            info = {}
            for name in self.extra_per_frame_atts:
                info[name] = np.array(self.nc.variables[name][i])

            # Create atoms object
            atoms = ase.Atoms(positions=positions,
                              numbers=self.numbers,
                              cell=cell,
                              celldisp=origin,
                              momenta=momenta,
                              masses=self.masses,
                              pbc=pbc,
                              info=info)

            # Attach additional arrays found in the NetCDF file
            for name in self.extra_per_frame_vars:
                atoms.set_array(name, self._get_data(name, i, index))
            for name in self.extra_per_file_vars:
                atoms.set_array(name, self._get_data(name, i, index))
            self._close()
            return atoms

        i = N + i
        if i < 0 or i >= N:
            self._close()
            raise IndexError('Trajectory index out of range.')
        return self[i]
コード例 #15
0
def example_dft_atomic_kinds(cp2k_code):
    """Run DFT calculation with different atomic kinds."""

    print("Testing CP2K GEOP_OPT on Si with different atomic kinds (DFT)...")

    thisdir = os.path.dirname(os.path.realpath(__file__))

    # Structure.
    pos = [[0., 0., 0.], [1.90598, 1.10041807, 0.77811308]]
    cell = [[3.81196, 0.0, 0.0], [1.90598, 3.3012541982101, 0.0],
            [1.90598, 1.10041806607, 3.1124523066333]]
    tags = [0, 1]
    atoms = ase.Atoms(symbols='Si2',
                      pbc=True,
                      cell=cell,
                      positions=pos,
                      tags=tags)
    structure = StructureData(ase=atoms)

    # Basis set.
    basis_file = SinglefileData(
        file=os.path.join(thisdir, "..", "files", "BASIS_MOLOPT"))

    # Pseudopotentials.
    pseudo_file = SinglefileData(
        file=os.path.join(thisdir, "..", "files", "GTH_POTENTIALS"))

    # Parameters.
    parameters = Dict(
        dict={
            'FORCE_EVAL': {
                'METHOD': 'Quickstep',
                'DFT': {
                    'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',
                    'POTENTIAL_FILE_NAME': 'GTH_POTENTIALS',
                    'QS': {
                        'EPS_DEFAULT': 1.0e-12,
                        'WF_INTERPOLATION': 'ps',
                        'EXTRAPOLATION_ORDER': 3,
                    },
                    'MGRID': {
                        'NGRIDS': 4,
                        'CUTOFF': 280,
                        'REL_CUTOFF': 30,
                    },
                    'XC': {
                        'XC_FUNCTIONAL': {
                            '_': 'LDA',
                        },
                    },
                    'POISSON': {
                        'PERIODIC': 'none',
                        'PSOLVER': 'MT',
                    },
                },
                'SUBSYS': {
                    'KIND': [
                        {
                            '_': 'Si',
                            'ELEMENT': 'Si',
                            'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
                            'POTENTIAL': 'GTH-LDA-q4'
                        },
                        {
                            '_': 'Si1',
                            'ELEMENT': 'Si',
                            'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
                            'POTENTIAL': 'GTH-LDA-q4'
                        },
                    ],
                },
            },
            'MOTION': {
                'GEO_OPT': {
                    'MAX_FORCE': 1e-4,
                    'MAX_ITER': '3',
                    'OPTIMIZER': 'BFGS',
                    'BFGS': {
                        'TRUST_RADIUS': '[bohr] 0.1',
                    },
                },
            },
            'GLOBAL': {
                'RUN_TYPE': 'GEO_OPT',
            }
        })

    # Construct process builder.
    builder = cp2k_code.get_builder()
    builder.structure = structure
    builder.parameters = parameters
    builder.code = cp2k_code
    builder.file = {
        'basis': basis_file,
        'pseudo': pseudo_file,
    }
    builder.metadata.options.resources = {
        "num_machines": 1,
        "num_mpiprocs_per_machine": 1,
    }
    builder.metadata.options.max_wallclock_seconds = 1 * 3 * 60

    print("Submitted calculation...")
    run(builder)
コード例 #16
0
ファイル: castep.py プロジェクト: essil1/ase-laser
def read_castep_castep_old(fd, index=None):
    """
    DEPRECATED
    Now replaced by ase.calculators.castep.Castep.read(). Left in for future
    reference and backwards compatibility needs, as well as a fallback for
    when castep_keywords.py can't be created.

    Reads a .castep file and returns an atoms  object.
    The calculator information will be stored in the calc attribute.
    If more than one SCF step is found, a list of all steps
    will be stored in the traj attribute.

    Note that the index argument has no effect as of now.

    Please note that this routine will return an atom ordering as found
    within the castep file. This means that the species will be ordered by
    ascending atomic numbers. The atoms witin a species are ordered as given
    in the original cell file.
    """
    from ase.calculators.singlepoint import SinglePointCalculator

    lines = fd.readlines()

    traj = []
    energy_total = None
    energy_0K = None
    for i, line in enumerate(lines):
        if 'NB est. 0K energy' in line:
            energy_0K = float(line.split()[6])
        # support also for dispersion correction
        elif 'NB dispersion corrected est. 0K energy*' in line:
            energy_0K = float(line.split()[-2])
        elif 'Final energy, E' in line:
            energy_total = float(line.split()[4])
        elif 'Dispersion corrected final energy' in line:
            pass
            # dispcorr_energy_total = float(line.split()[-2])
            # sedc_apply = True
        elif 'Dispersion corrected final free energy' in line:
            pass  # dispcorr_energy_free = float(line.split()[-2])
        elif 'dispersion corrected est. 0K energy' in line:
            pass  # dispcorr_energy_0K = float(line.split()[-2])
        elif 'Unit Cell' in line:
            cell = [x.split()[0:3] for x in lines[i + 3:i + 6]]
            cell = np.array([[float(col) for col in row] for row in cell])
        elif 'Cell Contents' in line:
            geom_starts = i
            start_found = False
            for j, jline in enumerate(lines[geom_starts:]):
                if jline.find('xxxxx') > 0 and start_found:
                    geom_stop = j + geom_starts
                    break
                if jline.find('xxxx') > 0 and not start_found:
                    geom_start = j + geom_starts + 4
                    start_found = True
            species = [line.split()[1] for line in lines[geom_start:geom_stop]]
            geom = np.dot(
                np.array([[float(col) for col in line.split()[3:6]]
                          for line in lines[geom_start:geom_stop]]), cell)
        elif 'Writing model to' in line:
            atoms = ase.Atoms(cell=cell,
                              pbc=True,
                              positions=geom,
                              symbols=''.join(species))
            # take 0K energy where available, else total energy
            if energy_0K:
                energy = energy_0K
            else:
                energy = energy_total
            # generate a minimal single-point calculator
            sp_calc = SinglePointCalculator(atoms=atoms,
                                            energy=energy,
                                            forces=None,
                                            magmoms=None,
                                            stress=None)
            atoms.set_calculator(sp_calc)
            traj.append(atoms)
    if index is None:
        return traj
    else:
        return traj[index]
コード例 #17
0
    def write_ts_input(self, transitionstate):
        """
        A method to write Arkane files for a single TS object

        Parameters:
        - transitionstate (TS): a TS object that you want to write an Arkane file for
        - scratch (str): the directory where you want to write arkane files to, there should be a 'ts/REACTION_LABEL/' subdirectory

        Returns:
        - None
        """

        label = transitionstate.reaction_label

        if os.path.exists(os.path.join(self.directory, "ts", label, label + '.py')):
            PATH = os.path.join(self.directory, "ts", label, label + '.py')
            logging.info(f"TS input file already written... Renaming it {PATH} and creating a new one.")
            shutil.move(
                PATH,
                PATH.replace('py', 'old.py')
            )
            
        if not os.path.exists(os.path.join(self.directory, "ts", label, label + ".log")):
            logging.info("There is no lowest energy conformer file...")
            return False

        parser = cclib.io.ccread(os.path.join(self.directory, "ts", label, label + ".log"), loglevel=logging.ERROR)
        symbol_dict = {
            17: "Cl",
            9:  "F",
            8:  "O",
            7:  "N",
            6:  "C",
            1:  "H",
        }

        atoms = []
        for atom_num, coords in zip(parser.atomnos, parser.atomcoords[-1]):
            atoms.append(ase.Atom(symbol=symbol_dict[atom_num], position=coords))

        transitionstate._ase_molecule = ase.Atoms(atoms)
        transitionstate.update_coords_from("ase")

        output = ['#!/usr/bin/env python',
                  '# -*- coding: utf-8 -*-']

        transitionstate.rmg_molecule.update_multiplicity()

        output += ["",
                   f"spinMultiplicity = {transitionstate.rmg_molecule.multiplicity}",
                   ""]

        output += ["energy = {", f"    '{self.model_chemistry}': Log('{label}.log'),", "}", ""]  # fix this

        output += [f"geometry = Log('{label}.log')", ""]

        output += [
            f"frequencies = Log('{label}.log')", ""]

        output += ["rotors = []", ""]  # TODO: Fix this

        input_string = ""

        for t in output:
            input_string += t + "\n"

        with open(os.path.join(self.directory, "ts", label, label + '.py'), "w") as f:
            f.write(input_string)
        return True
コード例 #18
0
ファイル: castep.py プロジェクト: essil1/ase-laser
def read_castep_phonon(fd,
                       index=None,
                       read_vib_data=False,
                       gamma_only=True,
                       frequency_factor=None,
                       units=units_CODATA2002):
    """
    Reads a .phonon file written by a CASTEP Phonon task and returns an atoms
    object, as well as the calculated vibrational data if requested.

    Note that the index argument has no effect as of now.
    """

    # fd is closed by embracing read() routine
    lines = fd.readlines()

    atoms = None
    cell = []
    N = Nb = Nq = 0
    scaled_positions = []
    symbols = []
    masses = []

    # header
    l = 0
    while l < len(lines):

        line = lines[l]

        if 'Number of ions' in line:
            N = int(line.split()[3])
        elif 'Number of branches' in line:
            Nb = int(line.split()[3])
        elif 'Number of wavevectors' in line:
            Nq = int(line.split()[3])
        elif 'Unit cell vectors (A)' in line:
            for ll in range(3):
                l += 1
                fields = lines[l].split()
                cell.append([float(x) for x in fields[0:3]])
        elif 'Fractional Co-ordinates' in line:
            for ll in range(N):
                l += 1
                fields = lines[l].split()
                scaled_positions.append([float(x) for x in fields[1:4]])
                symbols.append(fields[4])
                masses.append(float(fields[5]))
        elif 'END header' in line:
            l += 1
            atoms = ase.Atoms(symbols=symbols,
                              scaled_positions=scaled_positions,
                              cell=cell)
            break

        l += 1

    # Eigenmodes and -vectors
    if frequency_factor is None:
        Kayser_to_eV = 1E2 * 2 * np.pi * units['hbar'] * units['c']
    # N.B. "fixed default" unit for frequencies in .phonon files is "cm-1"
    # (i.e. the latter is unaffected by the internal unit conversion system of
    # CASTEP!) set conversion factor to convert therefrom to eV by default for
    # now
    frequency_factor = Kayser_to_eV
    qpoints = []
    weights = []
    frequencies = []
    displacements = []
    for nq in range(Nq):
        fields = lines[l].split()
        qpoints.append([float(x) for x in fields[2:5]])
        weights.append(float(fields[5]))
    freqs = []
    for ll in range(Nb):
        l += 1
        fields = lines[l].split()
        freqs.append(frequency_factor * float(fields[1]))
    frequencies.append(np.array(freqs))

    # skip the two Phonon Eigenvectors header lines
    l += 2

    # generate a list of displacements with a structure that is identical to
    # what is stored internally in the Vibrations class (see in
    # ase.vibrations.Vibrations.modes):
    #      np.array(displacements).shape == (Nb,3*N)

    disps = []
    for ll in range(Nb):
        disp_coords = []
        for lll in range(N):
            l += 1
            fields = lines[l].split()
            disp_x = float(fields[2]) + float(fields[3]) * 1.0j
            disp_y = float(fields[4]) + float(fields[5]) * 1.0j
            disp_z = float(fields[6]) + float(fields[7]) * 1.0j
            disp_coords.extend([disp_x, disp_y, disp_z])
        disps.append(np.array(disp_coords))
    displacements.append(np.array(disps))

    if read_vib_data:
        if gamma_only:
            vibdata = [frequencies[0], displacements[0]]
        else:
            vibdata = [qpoints, weights, frequencies, displacements]
        return vibdata, atoms
    else:
        return atoms
コード例 #19
0
def view(sdict, rep=(1,1,1)):
    ase_str=ase.Atoms(symbols=sdict['species'], positions=sdict['positions'], cell=sdict['cell'])
    return ase_view(ase_str*rep)
コード例 #20
0
 def as_format(self, format):
     with io.StringIO() as fout:
         mol = ase.Atoms(self.species, self.coords)
         ase.io.write(fout, mol, format=format)
         content = fout.getvalue()
     return content
コード例 #21
0
def example_precision(cp2k_code):
    """Test structure roundtrip precision ase->aiida->cp2k->aiida->ase."""

    print("Testing structure roundtrip precision ase->aiida->cp2k->aiida->ase...")

    thisdir = os.path.dirname(os.path.realpath(__file__))

    # Structure.
    epsilon = 1e-10  # expected precision in Angstrom
    dist = 0.74 + epsilon
    positions = [(0, 0, 0), (0, 0, dist)]
    cell = np.diag([4, -4, 4 + epsilon])
    atoms = ase.Atoms('H2', positions=positions, cell=cell)
    structure = StructureData(ase=atoms)

    # Basis set.
    basis_file = SinglefileData(file=os.path.join(thisdir, "..", "files", "BASIS_MOLOPT"))

    # Pseudopotentials.
    pseudo_file = SinglefileData(file=os.path.join(thisdir, "..", "files", "GTH_POTENTIALS"))

    # Parameters.
    parameters = Dict(
        dict={
            'GLOBAL': {
                'RUN_TYPE': 'MD',
            },
            'MOTION': {
                'MD': {
                    'TIMESTEP': 0.0,  # do not move atoms
                    'STEPS': 1,
                },
            },
            'FORCE_EVAL': {
                'METHOD': 'Quickstep',
                'DFT': {
                    'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',
                    'POTENTIAL_FILE_NAME': 'GTH_POTENTIALS',
                    'SCF': {
                        'MAX_SCF': 1,
                    },
                    'XC': {
                        'XC_FUNCTIONAL': {
                            '_': 'LDA',
                        },
                    },
                },
                'SUBSYS': {
                    'KIND': {
                        '_': 'DEFAULT',
                        'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
                        'POTENTIAL': 'GTH-LDA',
                    },
                },
            },
        })

    # Construct process builder.
    builder = cp2k_code.get_builder()
    builder.structure = structure
    builder.parameters = parameters
    builder.code = cp2k_code
    builder.file = {
        'basis': basis_file,
        'pseudo': pseudo_file,
    }
    builder.metadata.options.resources = {
        "num_machines": 1,
        "num_mpiprocs_per_machine": 1,
    }
    builder.metadata.options.max_wallclock_seconds = 1 * 60 * 60

    print("Submitted calculation...")
    calc = run(builder)

    # Check structure preservation.
    atoms2 = calc['output_structure'].get_ase()

    # Zeros should be preserved exactly.
    if np.all(atoms2.positions[0] == 0.0):
        print("OK, zeros in structure were preserved exactly.")
    else:
        print("ERROR!")
        print("Zeros in structure changed: ", atoms2.positions[0])
        sys.exit(3)

    # Other values should be preserved with epsilon precision.
    dist2 = atoms2.get_distance(0, 1)
    if abs(dist2 - dist) < epsilon:
        print("OK, structure preserved with %.1e Angstrom precision" % epsilon)
    else:
        print("ERROR!")
        print("Structure changed by %e Angstrom" % abs(dist - dist2))
        sys.exit(3)

    # Check cell preservation.
    cell_diff = np.amax(np.abs(atoms2.cell - cell))
    if cell_diff < epsilon:
        print("OK, cell preserved with %.1e Angstrom precision" % epsilon)
    else:
        print("ERROR!")
        print("Cell changed by %e Angstrom" % cell_diff)
        sys.exit(3)
コード例 #22
0
ファイル: xtal.py プロジェクト: yfyh2013/ase
def crystal(symbols=None,
            basis=None,
            spacegroup=1,
            setting=1,
            cell=None,
            cellpar=None,
            ab_normal=(0, 0, 1),
            a_direction=None,
            size=(1, 1, 1),
            onduplicates='warn',
            symprec=0.001,
            pbc=True,
            primitive_cell=False,
            **kwargs):
    """Create an Atoms instance for a conventional unit cell of a
    space group.

    Parameters:

    symbols : str | sequence of str | sequence of Atom | Atoms
        Element symbols of the unique sites.  Can either be a string
        formula or a sequence of element symbols. E.g. ('Na', 'Cl')
        and 'NaCl' are equivalent.  Can also be given as a sequence of
        Atom objects or an Atoms object.
    basis : list of scaled coordinates
        Positions of the unique sites corresponding to symbols given
        either as scaled positions or through an atoms instance.  Not
        needed if *symbols* is a sequence of Atom objects or an Atoms
        object.
    spacegroup : int | string | Spacegroup instance
        Space group given either as its number in International Tables
        or as its Hermann-Mauguin symbol.
    setting : 1 | 2
        Space group setting.
    cell : 3x3 matrix
        Unit cell vectors.
    cellpar : [a, b, c, alpha, beta, gamma]
        Cell parameters with angles in degree. Is not used when `cell`
        is given.
    ab_normal : vector
        Is used to define the orientation of the unit cell relative
        to the Cartesian system when `cell` is not given. It is the
        normal vector of the plane spanned by a and b.
    a_direction : vector
        Defines the orientation of the unit cell a vector. a will be
        parallel to the projection of `a_direction` onto the a-b plane.
    size : 3 positive integers
        How many times the conventional unit cell should be repeated
        in each direction.
    onduplicates : 'keep' | 'replace' | 'warn' | 'error'
        Action if `basis` contain symmetry-equivalent positions:
            'keep'    - ignore additional symmetry-equivalent positions
            'replace' - replace
            'warn'    - like 'keep', but issue an UserWarning
            'error'   - raises a SpacegroupValueError
    symprec : float
        Minimum "distance" betweed two sites in scaled coordinates
        before they are counted as the same site.
    pbc : one or three bools
        Periodic boundary conditions flags.  Examples: True,
        False, 0, 1, (1, 1, 0), (True, False, False).  Default
        is True.
    primitive_cell : bool
        Wheter to return the primitive instead of the conventional
        unit cell.

    Keyword arguments:

    All additional keyword arguments are passed on to the Atoms
    constructor.  Currently, probably the most useful additional
    keyword arguments are `info`, `constraint` and `calculator`.

    Examples:

    Two diamond unit cells (space group number 227)

    >>> diamond = crystal('C', [(0,0,0)], spacegroup=227,
    ...     cellpar=[3.57, 3.57, 3.57, 90, 90, 90], size=(2,1,1))
    >>> ase.view(diamond)  # doctest: +SKIP

    A CoSb3 skutterudite unit cell containing 32 atoms

    >>> skutterudite = crystal(('Co', 'Sb'),
    ...     basis=[(0.25,0.25,0.25), (0.0, 0.335, 0.158)],
    ...     spacegroup=204, cellpar=[9.04, 9.04, 9.04, 90, 90, 90])
    >>> len(skutterudite)
    32
    """
    sg = Spacegroup(spacegroup, setting)
    if (not isinstance(symbols, basestring)
            and hasattr(symbols, '__getitem__') and len(symbols) > 0
            and isinstance(symbols[0], ase.Atom)):
        symbols = ase.Atoms(symbols)
    if isinstance(symbols, ase.Atoms):
        basis = symbols
        symbols = basis.get_chemical_symbols()
    if isinstance(basis, ase.Atoms):
        basis_coords = basis.get_scaled_positions()
        if cell is None and cellpar is None:
            cell = basis.cell
        if symbols is None:
            symbols = basis.get_chemical_symbols()
    else:
        basis_coords = np.array(basis, dtype=float, copy=False, ndmin=2)
    sites, kinds = sg.equivalent_sites(basis_coords,
                                       onduplicates=onduplicates,
                                       symprec=symprec)
    symbols = parse_symbols(symbols)
    symbols = [symbols[i] for i in kinds]
    if cell is None:
        cell = cellpar_to_cell(cellpar, ab_normal, a_direction)

    info = dict(spacegroup=sg)
    if primitive_cell:
        info['unit_cell'] = 'primitive'
    else:
        info['unit_cell'] = 'conventional'

    if 'info' in kwargs:
        info.update(kwargs['info'])
    kwargs['info'] = info

    atoms = ase.Atoms(symbols,
                      scaled_positions=sites,
                      cell=cell,
                      pbc=pbc,
                      **kwargs)

    if isinstance(basis, ase.Atoms):
        for name in basis.arrays:
            if not atoms.has(name):
                array = basis.get_array(name)
                atoms.new_array(name, [array[i] for i in kinds],
                                dtype=array.dtype,
                                shape=array.shape[1:])

    if primitive_cell:
        from ase.build import cut
        prim_cell = sg.scaled_primitive_cell
        atoms = cut(atoms, a=prim_cell[0], b=prim_cell[1], c=prim_cell[2])

    if size != (1, 1, 1):
        atoms = atoms.repeat(size)
    return atoms
コード例 #23
0
    def pre_dft_iten_data(self, strain=0.02, tag="tpath"):
        ##### we expand the box to be 2 by 2 by 2 ####

        msize = [2, 2, 2]
        #  atom_num = 4 * msize[0] * msize[1] * msize[2];

        (a, b, c) = [1. / msize[i] for i in range(3)]
        prelist = [2, 6, 10, 14, 18]

        #  if tag == "opath":
        for j in prelist:
            dirname = "dir-%.3f-%.3d" % (strain, j)
            dirnamecnt = "cnt-%.3f-%.3d" % (strain, j)

            if (not os.path.isdir(dirnamecnt)):
                os.mkdir(dirnamecnt)

            ### the returned base already has strain ###
            os.chdir(dirname)
            (energy, stress, base) = self.read_Strain_stress()

            ### exapnd the box ###
            for i in range(3):
                base[i] = base[i] * msize[i]

            ### assign atoms positions ###
            temp_pos = [0, 0, 0]

            ### initialize atoms ###
            atoms = ase.Atoms(cell=base, pbc=[1, 1, 1])

            if tag == "opath":
                for z in range(msize[0]):
                    for y in range(msize[1]):
                        for x in range(msize[2]):
                            temp_pos[0] = x * a
                            temp_pos[1] = y * b
                            temp_pos[2] = z * c
                            pos = self.relative_to_cell(temp_pos, base)
                            atoms.append(ase.Atom("Nb", pos))

                            temp_pos[0] = (x + 0.5) * a
                            temp_pos[1] = (y + 0.5) * b
                            temp_pos[2] = z * c
                            pos = self.relative_to_cell(temp_pos, base)
                            atoms.append(ase.Atom("Nb", pos))

                            temp_pos[0] = x * a
                            temp_pos[1] = (y + 0.5) * b
                            temp_pos[2] = (z + 0.5) * c
                            pos = self.relative_to_cell(temp_pos, base)
                            atoms.append(ase.Atom("Nb", pos))

                            temp_pos[0] = (x + 0.5) * a
                            temp_pos[1] = y * b
                            temp_pos[2] = (z + 0.5) * c
                            pos = self.relative_to_cell(temp_pos, base)
                            atoms.append(ase.Atom("Nb", pos))

            if tag == "tpath":
                for z in range(msize[0]):
                    for y in range(msize[1]):
                        for x in range(msize[2]):

                            temp_pos[0] = x * a
                            temp_pos[1] = y * b
                            temp_pos[2] = z * c
                            pos = self.relative_to_cell(temp_pos, base)
                            atoms.append(ase.Atom("Nb", pos))

                            temp_pos[0] = (x + 0.5) * a
                            temp_pos[1] = (y + 0.5) * b
                            temp_pos[2] = (z + 0.5) * c
                            pos = self.relative_to_cell(temp_pos, base)
                            atoms.append(ase.Atom("Nb", pos))

                os.chdir(self.root_dir)
                os.chdir(dirnamecnt)

                ase.io.write("POSCAR", images=atoms, format='vasp')
                os.system("cp POSCAR POSCAR.vasp")
                self.prepare_dislocation_vasp_infiles()

                os.chdir(self.root_dir)
        return
コード例 #24
0
    def __init__(self,
                 structures=[],
                 info={},
                 cell_reduce=False,
                 progress=False,
                 suppress_ase_warnings=True):
        """
        Initialize the AtomsCollection

        | Args:
        |    structures (list[str] or list[ase.Atoms]): list of file names or
        |                                               Atoms that will form
        |                                               the collection
        |    info (dict): dictionary of general information to attach
        |                 to this collection
        |    cell_reduce (bool): if True, perform a Niggli cell reduction on
        |                        all loaded structures
        |    progress (bool): visualize a progress bar for the loading process
        |    suppress_ase_warnings (bool): suppress annoying ASE warnings when
        |                                  loading files (default is True)
        """

        # Start by parsing out the structures
        self.structures = []

        if isinstance(structures, ase.Atoms):
            # Well, it's just one...
            structures = [structures]
        elif inspect.isgenerator(structures):
            # Let's unravel it
            iter_structs = structures
            structures = []
            for s in iter_structs:
                structures.append(s)

        if progress:
            sys.stdout.write("Loading collection...\n")
        s_n = len(structures)
        for s_i, struct in enumerate(structures):
            if progress:
                # Progress bar
                sys.stdout.write("\rLoading: {0}".format(
                    utils.progbar(s_i + 1, s_n)))
            # Is it an Atoms object?
            if type(struct) is ase.Atoms:
                self.structures.append(ase.Atoms(struct))
                # Copy all arrays
                for k in struct.arrays.keys():
                    if not self.structures[-1].has(k):
                        self.structures[-1].new_array(k, struct.get_array(k))
                if struct.calc is not None:
                    # Prevents pointless attempts at re-calculating
                    self.structures[-1].calc._old_atoms = self.structures[-1]
            # Or is it a string?
            elif utils.is_string(struct):
                with utils.silence_stdio(suppress_ase_warnings,
                                         suppress_ase_warnings):
                    self.structures.append(ase_io.read(str(struct)))
                # If there's no name, give it the filename
                if 'name' not in self.structures[-1].info:
                    self.structures[-1].info['name'] = utils.seedname(struct)
            else:
                raise TypeError('Structures must be Atoms objects or valid '
                                'file names,'
                                ' not {0}'.format(type(struct).__name__))
            if cell_reduce:
                # Here we must keep the energy if it was present
                # We do this by hand because ASE has its good reasons
                # for severing the atoms-calculator connection when changing
                # the unit cell.
                try:
                    _E = self.structures[-1].calc.results['energy']
                except (KeyError, AttributeError):
                    _E = None
                niggli_reduce(self.structures[-1])
                if _E is not None:
                    _calc = SinglePointCalculator(self.structures[-1],
                                                  energy=_E)
                    self.structures[-1].set_calculator(_calc)

        if progress:
            sys.stdout.write('\nLoaded {0} structures\n'.format(s_n))

        self._all = _AllCaller(self.structures, ase.Atoms)

        self._arrays = {}

        # Now assign the info
        if type(info) is not dict:
            raise TypeError('Info must be dict,'
                            ' not {0}'.format(type(info).__name__))
        else:
            self.info = info.copy()
コード例 #25
0
ファイル: build.py プロジェクト: SINGROUP/cluskit
    def __init__(self, symbols=None,
                positions=None, numbers=None,
                tags=None, momenta=None, masses=None,
                magmoms=None, charges=None,
                scaled_positions=None,
                cell=None, pbc=None, celldisp=None,
                constraint=None,
                calculator=None,
                info=None,
                max_bondlength=None):

        # TODO: solve inheritance elegantly
        self.ase_object = super(Scaffold, self).__init__(symbols=symbols,
                    positions=positions, numbers=numbers,
                    tags=tags, momenta=momenta, masses=masses,
                    magmoms=magmoms, charges=charges,
                    scaled_positions=scaled_positions,
                    cell=cell, pbc=pbc, celldisp=celldisp,
                    constraint=constraint,
                    calculator=calculator,
                    info=info)
        self.ase_object = ase.Atoms(symbols=symbols,
                    positions=positions, numbers=numbers,
                    tags=tags, momenta=momenta, masses=masses,
                    magmoms=magmoms, charges=charges,
                    scaled_positions=scaled_positions,
                    cell=cell, pbc=pbc, celldisp=celldisp,
                    constraint=constraint,
                    calculator=calculator,
                    info=info)

        if max_bondlength == None:
            self.bond_matrix = _get_voronoi_connectivity(self.ase_object.get_positions())
        else:
            self.bond_matrix = _get_connectivity(self.ase_object.get_positions(), 
                max_bondlength = max_bondlength)

        # setting default values for atomic types
        atomic_numbers = sorted(list(set(self.ase_object.get_atomic_numbers())))

        self.default_A = atomic_numbers[0] 
        if len(atomic_numbers) > 1:
            self.default_B = atomic_numbers[1]
        else:
            self.default_B = 0

        self.default_n_A = np.sum(self.ase_object.get_atomic_numbers() == self.default_A)
        self.default_n_B = len(self.ase_object.get_atomic_numbers()) - self.default_n_A 

        self.com = self.ase_object.get_center_of_mass()
        self.distances_to_com = _get_distances_to_com(self.ase_object)

        if 0 in atomic_numbers:
            # dscribe does not allow 0 as atomic index
            atomic_numbers = np.array(atomic_numbers) + 1

        self.descriptor_setup = dscribe.descriptors.SOAP(
            species=atomic_numbers,
            periodic=False,
            rcut=5.0,
            nmax=8,
            lmax=6,
            sparse=False,
            average=True
            )

        self.evolve_temperature = 0.2
        self.evolve_n_steps = 1000

        return
コード例 #26
0
def MakeParallelAtoms(atoms, nCells, cell=None, pbc=None, distribute=True):
    """Build parallel simulation from serial lists of atoms.

    Call simultaneously on all processors.  Each processor having
    atoms should pass a list of atoms as the first argument, or None
    if this processor does not contribute with any atoms.  If the
    cell and/or pbc arguments are given, they must be given on
    all processors, and be identical.  If it is not given, a supercell
    is attempted to be extracted from the atoms on the processor with
    lowest rank.

    This is the preferred method for creating parallel simulations.
    """
    import cPickle, cStringIO

    mpi = asap3.mpi
    #comm = mpi.world.duplicate()
    comm = mpi.world

    # Sanity check: is the node layout reasonable
    nNodes = nCells[0] * nCells[1] * nCells[2]
    if nNodes != comm.size:
        raise RuntimeError("Wrong number of CPUs: %d != %d*%d*%d" %
                           (comm.size, nCells[0], nCells[1], nCells[2]))
    t1 = np.zeros((3, ))
    t2 = np.zeros((3, ))
    comm.min(t1)
    comm.max(t2)
    if (t1[0] != t2[0] or t1[1] != t2[1] or t1[2] != t2[2]):
        raise RuntimeError, "CPU layout inconsistent."

    # If pbc and/or cell are given, they may be shorthands in need of
    # expansion.
    if pbc:
        try:
            plen = len(pbc)
        except TypeError:
            # It is a scalar, interpret as a boolean.
            if pbc:
                pbc = (1, 1, 1)
            else:
                pbc = (0, 0, 0)
        else:
            if plen != 3:
                raise ValueError, "pbc must be a scalar or a 3-sequence."
    if cell:
        cell = array(cell)  # Make sure it is a numeric array.
        if cell.shape == (3, ):
            cell = array([[cell[0], 0, 0], [0, cell[1], 0], [0, 0, cell[2]]])
        elif cell.shape != (3, 3):
            raise ValueError, "Unit cell must be a 3x3 matrix or a 3-vector."

    # Find the lowest CPU with atoms, and let that one distribute
    # which data it has.  All other CPUs check for consistency.
    if atoms is None:
        hasdata = None
        mynum = comm.size
    else:
        hasdata = {}
        for name in atoms.arrays.keys():
            datatype = np.sctype2char(atoms.arrays[name])
            shape = atoms.arrays[name].shape[1:]
            hasdata[name] = (datatype, shape)
        mynum = comm.rank
        if pbc is None:
            pbc = atoms.get_pbc()
        if cell is None:
            cell = atoms.get_cell()
    root = comm.min(mynum)  # The first CPU with atoms
    # Now send hasdata, cell and pbc to all other CPUs
    package = cPickle.dumps((hasdata, cell, pbc), 2)
    package = comm.broadcast_string(package, root)
    rootdata, rootcell, rootpbc = cPickle.loads(package)
    if rootdata is None or len(rootdata) == 0:
        raise ValueError, "No data from 'root' atoms.  Empty atoms?!?"

    # Check for consistent cell and pbc arguments
    if cell is not None:
        if rootcell is None:
            raise TypeError, "Cell given on another processor than the atoms."
        if (cell.ravel() - rootcell.ravel()).max() > 1e-12:
            raise ValueError, "Inconsistent cell specification."
    else:
        cell = rootcell  # May still be None
    if pbc is not None:
        if rootpbc is None:
            raise TypeError, "PBC given on another processor than the atoms."
        if (pbc != rootpbc).any():
            raise ValueError, "Inconsistent pbc specification."
    else:
        pbc = rootpbc

    # Check for consistent atoms data
    if hasdata is not None:
        if hasdata != rootdata:
            raise ValueError, "Atoms do not contain the sama data on different processors."
    if "positions" not in rootdata:
        raise ValueError, "Atoms do not have positions!"

    # Create empty atoms
    if atoms is None:
        atoms = ase.Atoms(cell=cell, pbc=pbc)
        for name in rootdata.keys():
            if atoms.arrays.has_key(name):
                assert np.sctype2char(atoms.arrays[name]) == rootdata[name][0]
                assert len(atoms.arrays[name]) == 0
            else:
                shape = (0, ) + rootdata[name][1]
                atoms.arrays[name] = np.zeros(shape, rootdata[name][0])

    return ParallelAtoms(nCells,
                         comm,
                         atoms,
                         cell=cell,
                         pbc=pbc,
                         distribute=distribute)
コード例 #27
0
ファイル: build.py プロジェクト: SINGROUP/cluskit
    def get_unique_clusters_in_range(self,
        eAA = [-1,1], eAB = [-1,1], eBB = [-1,1], cEA = [-1,1], cEB = [-1,1],
        typeA = None, typeB = None, ntypeB = None, n_clus = 1):
        """Similar method to get_unique_clusters with an additional loop.
        A parameter grid is generated on which nanoclusters are configured
        at each grid point. The most dissimilar structures are chosen based
        on a similarity metric (given by the self.descriptor_setup attribute).

        The grid is chosen as small as possible. In order to get a finer grid, increase
        n_clus. The nanoclusters are returned in an ordered list such that the most
        dissimilar clusters come first.

        Args:
            eAA (list of 2 floats): pseudo-energy of A-A interaction
            eAB (list of 2 floats): pseudo-energy of A-B interaction
            eBB (list of 2 floats): pseudo-energy of B-B interaction
            eEA (list of 2 floats): pseudo-energy of segregation of A into the core.
            eEB (list of 2 floats): pseudo-energy of segregation of A into the core.
            typeA (int): element of type A in atomic number of PSE.
            typeB (int): element of type B in atomic number of PSE.
            ntypeB (int): number of atoms of type B in cluster. This argument controls the composition.
            n_clus (int):   number of cluster to be returned. Affects the internal coarseness of the
                            parameter grid

        Returns:
            list :  Most dissimilar clusters (cluskit.Cluster objects) in the given Pseudo-energy
                    range. 
        """

        # get default values where needed.

        if not typeA:
            typeA = self.default_A
        if  not typeB:
            typeB = self.default_B
        if  not ntypeB:
            ntypeB = self.default_n_B


        atoms = self.ase_object
        bond_matrix = self.bond_matrix
        desc = self.descriptor_setup
        # making sure atomic numbers are adapted by descriptor
        desc.atomic_numbers = [typeA, typeB]

        final_atoms_list = []
        atoms_list = []

        positions = atoms.get_positions()
        

        # discretizing pseudo-energy search space
        steps = [2,2,2,2,2]
        ranges = np.array([
            eAA[1] - eAA[0],
            eAB[1] - eAB[0],
            eBB[1] - eBB[0],
            cEA[1] - cEA[0],
            cEB[1] - cEB[0],
            ], dtype='float')

        step_sizes = ranges.copy()

        for i in range(100):
            # internal numpy use of complex number, see np.mgrid 
            grid = np.mgrid[
                eAA[0]:eAA[1]:complex(0,steps[0]),
                eAB[0]:eAB[1]:complex(0,steps[0]),
                eBB[0]:eBB[1]:complex(0,steps[0]),
                cEA[0]:cEA[1]:complex(0,steps[0]),
                cEB[0]:cEB[1]:complex(0,steps[0]),
            ]

            # check size
            size = grid[0].ravel().shape[0]

            if size < n_clus:
                idx = np.argmax(step_sizes)
                steps[idx] +=1
                step_sizes[idx] = (ranges[idx] - 1.0) / steps[idx]
            else:
                break

        # looping over different pseudo-energies

        grid_1, grid_2, grid_3, grid_4, grid_5 = grid
        grid_points = np.vstack([grid_1.ravel(), grid_2.ravel(),
            grid_3.ravel(), grid_4.ravel(), grid_5.ravel()]).transpose()

        # 5 floats per grid_point: pseudo-energies eAA, eAB, eBB, cEA and cEB
        #print('shape grid points', grid_points.shape)
        for count, grid_point in enumerate(grid_points):
            coreEnergies = [ grid_point[3], grid_point[4] ]
        
            cluster = Clusterer(bond_matrix, positions, ntypeB, grid_point[0], grid_point[1], grid_point[2], com=None, coreEnergies=coreEnergies)
        
            kT = self.evolve_temperature
            nsteps =  self.evolve_n_steps

            cluster.Evolve(kT, nsteps)
            actual_types = cluster.atomTypes.copy()
            actual_types[actual_types == 0] = typeA
            actual_types[actual_types == 1] = typeB
        
            atoms.set_atomic_numbers(actual_types)
            new_atoms = ase.Atoms(numbers=actual_types, positions=positions)
            new_atoms.info = {"eAA" : grid_point[0], "eAB" : grid_point[1], "eBB" : grid_point[2], "cEA" : grid_point[3], "cEB" : grid_point[4]}
            atoms_list.append(new_atoms)


        x = desc.create(atoms_list, n_jobs = 1,  positions=None, verbose=False)

        ranks = cluskit.cluster._rank_fps(x, K = None, greedy =False)
        for i in range(0,n_clus):
            cluskit_atoms = Cluster(atoms_list[ranks[i]])
            final_atoms_list.append(cluskit_atoms)

        cluster.Reset()
        return final_atoms_list
コード例 #28
0
    def generate_rand_given_distance(self, r_min=2.0, r_max=2.3, dirname=None):
        ####### Spherical coordinates to Cartesian coordinates ########
        # theta = [0, 2pi);   phi = [0, pi)
        # x = r * cos theta * sin phi
        # y = r * sin theta * sin phi
        # z = r * cos phi
        ###############################################################
        # put a point in the center => put second => put third => ...

        #### initialize the atoms class and the supercell ####
        size = [4, 4, 4]
        lx, ly, lz = size[0] * self.lat, \
            size[1] * self.lat, \
            size[2] * self.lat

        supercell = np.mat([[lx, 0, 0], [0, ly, 0], [0, 0, lz]])

        pcub = []

        ##### all periodicities  6 share face      #####
        ##### 12 share length   and 8 shear corner #####
        pcub.append(np.array([lx, 0, 0]))
        pcub.append(np.array([0, ly, 0]))
        pcub.append(np.array([0, 0, lz]))

        pcub.append(np.array([lx, ly, 0]))
        pcub.append(np.array([lx, 0, lz]))
        pcub.append(np.array([0, ly, lz]))

        pcub.append(np.array([lx, -ly, 0]))
        pcub.append(np.array([lx, 0, -lz]))
        pcub.append(np.array([0, ly, -lz]))

        pcub.append(np.array([lx, ly, lz]))
        pcub.append(np.array([-lx, ly, lz]))
        pcub.append(np.array([lx, -ly, lz]))
        pcub.append(np.array([lx, ly, -lz]))

        for i in range((3 + 6 + 4)):
            pcub.append(-1 * pcub[i])

        atoms = ase.Atoms(cell=supercell, pbc=[1, 1, 1])
        print(atoms.get_cell())

        atom_num = 210
        m_pi = np.pi
        m_2pi = 2 * np.pi

        ### put the first atom in it ###
        atoms.append(
            ase.Atom(symbol="Nb", position=(0.5 * lx, 0.5 * ly, 0.5 * lz)))

        r_lim = 0.5 * r_min + 0.2

        if r_lim <= 2.0:
            r_lim = 2.0

        r_shl = r_max - r_min

        for i in range(atom_num - 1):
            cnt = 0
            while (True):
                theta = np.random.rand() * m_2pi
                phi = np.random.rand() * m_pi
                r = r_min + np.random.rand() * r_shl

                dx = r * cos(theta) * sin(phi)
                dy = r * sin(theta) * sin(phi)
                dz = r * cos(phi)

                pos_new = atoms[i - int(cnt / 30)].position + np.array(
                    [dx, dy, dz])
                cnt += 1
                if cnt >= 30 * i:
                    cnt = 0

                ###### add periodic  #######
                for k in range(3):
                    if (pos_new[k] > supercell[k, k]):
                        pos_new[k] -= supercell[k, k]
                    elif (pos_new[k] < 0):
                        pos_new[k] += supercell[k, k]

                ###### see whether larger than rmin #######
                a = []
                for atom in atoms:
                    a.append(np.linalg.norm(atom.position - pos_new) > r_lim)

                if (all(np.array(a)) is True):
                    #### check the distance with images  #####
                    b = []
                    for j in range(len(pcub)):
                        image = pcub[j]
                        a = []
                        for atom in atoms:
                            a.append(
                                np.linalg.norm(atom.position + image -
                                               pos_new) > r_lim)
                            if (all(np.array(a)) is True):
                                b.append(True)
                        else:
                            b.append(False)

                    if ((len(b) == len(pcub)) and (all(np.array(b)) is True)):
                        break
                    ####  break the while loop ####

            atoms.append(ase.Atom(symbol="Nb", position=pos_new))
            print(len(atoms))
        self.write_poscar(atoms)
        self.prepare_dislocation_vasp_infiles(dirname)
        return
コード例 #29
0
def diffMap2(input_filename,
             Ro,
             b,
             co,
             dx,
             dy,
             dz,
             output_filename,
             anion='O',
             anion_number=8):

    # Read in the CIF file
    a = ase.io.read("{}".format(input_filename))

    # Take only the anion positions and unit cell vectors
    a = ase.Atoms([anion for i in range(len(a[a.numbers == anion_number]))],
                  cell=a.cell,
                  positions=a.positions[a.numbers == anion_number],
                  pbc=True)

    # Make a mesh over the resolution defined by dx,dy,dz
    x, y, z = np.mgrid[0:1 + dx:dx, 0:1 + dy:dy, 0:1 + dz:dz]
    r_scaled = np.stack([x, y, z])  # Cartesian coordinates of each voxel

    # Transform the unit cell to access each voxel by real lengths
    r = np.dot(r_scaled.reshape((3, r_scaled.shape[1]**3)).T,
               a.cell).T.reshape(r_scaled.shape)

    # Make an empty array to calculate the Valence over
    V = np.ones(x.shape)

    # Define oxygen positions
    O = a.get_positions()
    # Add coordinates of all anions in adjacent cells
    # (important if the interaction length is larger than the unit cell)
    permutations = [[-1, 0, 1] for i in range(3)]
    for i in itr.product(*permutations):
        if i != (0, 0, 0):
            shift = a.cell[0] * i[0] + a.cell[1] * i[1] + a.cell[2] * i[2]
            a.translate(shift)
            O = np.concatenate((O, a.get_positions()), axis=0)
            a.translate(-shift)

    # optionally, time this section because it is the time critical step
    start_time = time.time()

    # Iterate through each volume element in the unit cell
    lens = [range(r.shape[1]), range(r.shape[2]), range(r.shape[3])]
    for i in itr.product(*lens):

        # define upper and lower bound for anion coordinates as deffined by the cuttoff radii (co)
        top = r[:, i[0], i[1], i[2]] + co
        bottom = r[:, i[0], i[1], i[2]] - co

        # Store all oxygen within a box around the sphere defined by the cuttoff radii
        O2 = O[np.all(((O < top) == (O > bottom)), axis=1)]

        # Calculate the distance to each anion in the box
        Ri = np.array([
            np.sqrt((r[0, i[0], i[1], i[2]] -
                     O2[k, 0])**2 +  # This way worked faster
                    (r[1, i[0], i[1], i[2]] -
                     O2[k, 1])**2 +  # than with linalg.norm()
                    (r[2, i[0], i[1], i[2]] - O2[k, 2])**2)
            for k in range(len(O2[:, 0]))
        ])

        # Calculate the valence sum and apply the cutoff
        V[i[0], i[1],
          i[2]] = np.abs(np.sum(np.exp((Ro - Ri[Ri < co]) / b)) - 1)

    # Save the information
    with open('{0}.grd'.format(output_filename), "w") as savefile:

        savefile.write("Bond Valence Sum Difference\r")  # Title
        from ase.geometry import cell_to_cellpar
        cellParams = cell_to_cellpar(a.cell)  # get ABC alpha, beta, gamma
        savefile.write(" ".join([str(k) for k in cellParams]) + "\r")
        savefile.write(" ".join([str(k) for k in V.shape]) + "\r")
        for i in np.nditer(V.flatten()):
            savefile.write("%.6f  " %
                           (i))  # Write each valence difference value

    #print "Total time taken = %.4f s" % (time.time()-start_time)
    return V
コード例 #30
0
TOL_ROT = 1e-9

debug = False

###

for run in range(NRUNS):
    r0 = np.array([SX / 2, SY / 2, SZ / 2])
    r0c = np.array([SX, SY, SZ])

    # Random atoms and charges (charges between -1 and 1)
    a = []
    for i in range(8):
        a += [
            ase.Atoms("%iH" % NAT,
                      positions=np.random.random([NAT, 3]) * SX,
                      charges=(2 * np.random.random([NAT]) - 1) * CHARGE,
                      cell=[SX, SY, SZ])
        ]

    # Compute moments
    M = []
    for i in range(8):
        M += [
            get_moments(a[i].get_positions(), a[i].get_initial_charges(),
                        L_MAX, r0)
        ]

    # Construct a composite atoms object
    # and compute the corresponding multipole
    # expansion
    b = ase.Atoms(cell=[2 * SX, 2 * SY, 2 * SZ])