Beispiel #1
0
    def test_frac2cart(self):
        lattice_cart = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
        positions_frac = [0.5, 0.5, 0.5]
        self.assertEqual(frac2cart(lattice_cart, positions_frac), [1, 1, 1])

        lattice_cart = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
        positions_frac = [[0.5, 0.5, 0.5]]
        self.assertEqual(frac2cart(lattice_cart, positions_frac), [[1, 1, 1]])

        lattice_cart = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
        positions_frac = [[1, 1, 1], [0.5, 0.5, 0.5]]
        self.assertEqual(frac2cart(lattice_cart, positions_frac),
                         [[2, 2, 2], [1, 1, 1]])
Beispiel #2
0
def minseps_feasible(mutant, minsep_dict=None, debug=False):
    """ Check if minimum separations between species of atom are satisfied by mutant.

    Parameters:
        mutant (dict): trial mutated structure
        minsep_dict (dict): dictionary containing element-specific minimum separations, e.g.
            {('K', 'K'): 2.5, ('K', 'P'): 2.0}.

    Returns:
        bool: True if minseps are greater than desired value else False.

    """
    elems = set(mutant["atom_types"])
    elem_pairs = set()
    for elem in elems:
        for _elem in elems:
            elem_key = tuple(sorted([elem, _elem]))
            elem_pairs.add(elem_key)

    if minsep_dict is None:
        minsep_dict = dict()
    else:
        marked_for_del = []
        for key in minsep_dict:
            if tuple(sorted(key)) != tuple(key):
                minsep_dict[tuple(sorted(key))] = minsep_dict[key]
                marked_for_del.append(key)
        for key in marked_for_del:
            del minsep_dict[key]

    # use 0.5 * average covalent radii (NOT just average covalent radius) as rough default minsep guess
    for elem_key in elem_pairs:
        if elem_key not in minsep_dict:
            minsep_dict[elem_key] = 0.5 * sum([
                periodictable.elements.symbol(elem).covalent_radius
                for elem in elem_key
            ])

    if "positions_abs" not in mutant:
        mutant["positions_abs"] = frac2cart(mutant["lattice_cart"],
                                            mutant["positions_frac"])
    poscart = mutant["positions_abs"]

    for prod in product(range(-1, 2), repeat=3):
        trans = np.zeros((3))
        for ind, multi in enumerate(prod):
            trans += np.asarray(mutant["lattice_cart"][ind]) * multi
        distances = cdist(poscart + trans, poscart)
        distances = np.ma.masked_where(distances < 1e-12, distances)
        for i, dists in enumerate(distances):
            for j, dist in enumerate(dists):
                min_dist = minsep_dict[tuple(
                    sorted([mutant["atom_types"][i],
                            mutant["atom_types"][j]]))]
                if dist < min_dist:
                    message = "Mutant with {} failed minsep check.".format(
                        ", ".join(mutant["mutations"]))
                    LOG.debug(message)
                    return False
    return True
Beispiel #3
0
 def positions_abs(self):
     """ Return list of absolute Cartesian positions. """
     from matador.utils.cell_utils import frac2cart
     if 'positions_abs' not in self._data:
         self._data['positions_abs'] = frac2cart(self.cell.lattice_cart,
                                                 self.positions_frac)
     return self._data['positions_abs']
Beispiel #4
0
def viz_site(doc, targ_substruc, elem, rmax=6):
    if 'positions_abs' not in doc:
        doc['positions_abs'] = frac2cart(doc['lattice_cart'], doc['positions_frac'])
    for ind, substruc in enumerate(doc['substruc_dict'][elem]):
        if substruc == targ_substruc:
            elem_ind = ind
            elem_count = find_site_index(doc, elem, elem_ind)
            if elem_count is not False:
                break
    targ_site = doc['positions_frac'][elem_count]
    targ_pos = doc['positions_abs'][elem_count]
    print(elem_count)
    from itertools import product
    from collections import defaultdict
    doc['lattice_cart'] = np.asarray(doc['lattice_cart'])
    neighbour_doc = defaultdict(list)
    neighbour_doc['positions_abs'].append(targ_pos)
    neighbour_doc['positions_frac'].append(targ_site)
    neighbour_doc['atom_types'].append('U')
    neighbour_doc['lattice_cart'] = doc['lattice_cart']
    for j, pos in enumerate(doc['positions_abs']):
        for prod in product(range(-1, 2), repeat=3):
            trans = np.zeros((3))
            for ind, multi in enumerate(prod):
                trans += doc['lattice_cart'][ind] * multi
            dist2 = 0
            for i in range(3):
                dist2 += (targ_pos[i]-(pos+trans)[i])**2
            if dist2 < rmax**2:
                neighbour_doc['positions_abs'].append(pos + trans)
                neighbour_doc['positions_frac'].append((np.asarray(doc['positions_frac'][j]) + prod).tolist())
                neighbour_doc['atom_types'].append(doc['atom_types'][j])
    print(targ_site)
    return elem_count, neighbour_doc
Beispiel #5
0
def doc2xsf(doc, path, write_energy=False, write_forces=False, overwrite=False):
    """ Write an .xsf file for a matador document, with positions in
    Cartesian coordinates. Optionally, write the energy in a comment
    at the top of the file for use with aenet.

    Parameters:
        doc (dict): matador document containing structure.
        path (str): desired path of xsf file.

    Keyword arguments:
        write_energy (bool): whether or not to write total energy in a comment
            as the first line of the file.
        write_forces (bool): whether or not to write the forces on each atom.
        overwrite (bool): overwrite if file exists.

    """
    if path.endswith('.xsf'):
        path = path.replace('.xsf', '')

    flines = []
    if write_energy:
        if 'total_energy' in doc:
            flines.append('# total energy = {:10.8f} eV\n'.format(doc['total_energy']))
        else:
            raise RuntimeError("Failed to write energy in xsf file: key 'total_energy' missing from input.")

    flines.append('CRYSTAL')
    flines.append('PRIMVEC')
    if 'lattice_cart' in doc:
        for i in range(3):
            flines.append('\t\t{lat[0]: 10.8f}\t{lat[1]: 10.8f}\t{lat[2]: 10.8f}'.format(lat=doc['lattice_cart'][i]))
    else:
        raise RuntimeError("Failed to write lattice in xsf file: key 'lattice_cart' missing from input.")
    flines.append('PRIMCOORD')
    flines.append('{} {}'.format(doc['num_atoms'], 1))
    if 'positions_abs' not in doc:
        doc['positions_abs'] = frac2cart(doc['lattice_cart'], doc['positions_frac'])
    for ind, (atom, position) in enumerate(zip(doc['atom_types'], doc['positions_abs'])):
        flines.append('{:2}\t{pos[0]: 16.8f}\t{pos[1]: 16.8f}\t{pos[2]: 16.8f}'.format(atom, pos=position))
        if write_forces:
            if 'forces' in doc:
                flines[-1] += ('\t{f[0]: 16.8f}\t{f[1]: 16.8f}\t{f[2]: 16.8f}'.format(f=doc['forces'][ind]))
            else:
                raise RuntimeError("Failed to write forces in xsf file: key 'forces' missing from input.")

    if os.path.isfile(path + '.xsf'):
        if overwrite:
            os.remove(path + '.xsf')
        else:
            print('File name already exists! Skipping!')
            raise RuntimeError('Duplicate file!')

    with open(path + '.xsf', 'w') as f:
        for line in flines:
            f.write(line + '\n')
Beispiel #6
0
 def test_conversion_transitivity(self):
     """ Test that cart2frac(frac2cart(A)) == A. """
     castep_fname = REAL_PATH + "data/Na3Zn4-swap-ReOs-OQMD_759599.castep"
     test_doc, s = castep2dict(castep_fname, db=True, verbosity=VERBOSITY)
     lattice_cart = test_doc["lattice_cart"]
     positions_frac = test_doc["positions_frac"]
     np.testing.assert_almost_equal(
         cart2frac(lattice_cart, frac2cart(lattice_cart, positions_frac)),
         positions_frac,
         decimal=10,
     )
Beispiel #7
0
    def test_convert_positions(self):
        doc = res2dict(REAL_PATH + "data/structures/Li7Sn-Fmmm.res")[0]
        crystal = res2dict(REAL_PATH + "data/structures/Li7Sn-Fmmm.res", as_model=True)[
            0
        ]

        doc["positions_abs"] = frac2cart(doc["lattice_cart"], doc["positions_frac"])

        np.testing.assert_array_equal(doc["positions_abs"], crystal.positions_abs)
        print(crystal.positions_abs)
        for site in crystal:
            print(site, site.get_coords("cartesian"))
Beispiel #8
0
    def test_convert_positions(self):
        doc = res2dict(REAL_PATH + "data/structures/Li7Sn-Fmmm.res")[0]
        crystal = res2dict(REAL_PATH + "data/structures/Li7Sn-Fmmm.res",
                           as_model=True)[0]

        doc["positions_abs"] = frac2cart(doc["lattice_cart"],
                                         doc["positions_frac"])

        np.testing.assert_array_almost_equal(doc["positions_abs"],
                                             crystal.positions_abs)
        for ind, site in enumerate(crystal):
            np.testing.assert_array_almost_equal(doc["positions_abs"][ind],
                                                 site.coords_cartesian)

        crystal.cell.lengths = np.asarray(crystal.cell.lengths) * 10

        rescaled_pos = frac2cart(
            np.asarray(doc["lattice_cart"]) * 10, doc["positions_frac"])

        for ind, site in enumerate(crystal):
            np.testing.assert_array_almost_equal(doc["positions_frac"][ind],
                                                 site.coords)
            np.testing.assert_array_almost_equal(rescaled_pos[ind],
                                                 site.coords_cartesian)
Beispiel #9
0
 def test_cart2abc(self):
     castep_fname = REAL_PATH + "data/Na3Zn4-swap-ReOs-OQMD_759599.castep"
     self.assertTrue(os.path.isfile(castep_fname))
     test_doc, s = castep2dict(castep_fname, db=True, verbosity=VERBOSITY)
     try:
         self.assertTrue(
             np.allclose(test_doc["lattice_abc"],
                         cart2abc(test_doc["lattice_cart"])),
             msg="Conversion cart2abc failed.",
         )
         self.assertTrue(
             np.allclose(
                 cart2abc(test_doc["lattice_cart"]),
                 cart2abc(abc2cart(test_doc["lattice_abc"])),
             ),
             msg="Conversion abc2cart failed.",
         )
         self.assertAlmostEqual(
             test_doc["cell_volume"],
             cart2volume(test_doc["lattice_cart"]),
             msg="Failed to calculate volume from lattice vectors.",
             places=5,
         )
         self.assertIsInstance(test_doc["lattice_abc"],
                               list,
                               msg="Failed abc numpy cast to list")
         self.assertIsInstance(
             test_doc["lattice_cart"],
             list,
             msg="Failed cartesian numpy cast to list",
         )
         cart_pos = frac2cart(test_doc["lattice_cart"],
                              test_doc["positions_frac"])
         back2frac = cart2frac(test_doc["lattice_cart"], cart_pos)
         np.testing.assert_array_almost_equal(back2frac,
                                              test_doc["positions_frac"])
     except AssertionError:
         print("cart:", test_doc["lattice_cart"],
               abc2cart(test_doc["lattice_abc"]))
         print("abc:", test_doc["lattice_abc"],
               cart2abc(test_doc["lattice_cart"]))
         print(
             "volume:",
             test_doc["cell_volume"],
             cart2volume(test_doc["lattice_cart"]),
         )
         raise AssertionError
Beispiel #10
0
def doc2pdb(doc, path, info=True, hash_dupe=True):
    """ Write a simple .pdb for single doc.

    Parameters:
        doc (dict): matador document containing structure.
        path (str): desired path of xsf file.

    Keyword arguments:
        info (bool): write info string to HEADER.
        hash_dupe (bool): hash duplicate file names or skip?

    """
    if path.endswith('.pdb'):
        path = path.replace('.pdb', '')
    try:
        if os.path.isfile(path + '.pdb'):
            if hash_dupe:
                print('File already exists, generating hash...')
                path += '-' + generate_hash()
            else:
                raise RuntimeError('Skipping duplicate structure...')
        with open(path + '.pdb', 'w') as f:
            try:
                header = 'HEADER    {} {}'.format(doc['text_id'][0],
                                                  doc['text_id'][1])
            except Exception:
                header = 'HEADER    Generated with matador.'
            try:
                # write res file header if info
                title = 'TITLE     '
                title += path.split('/')[-1] + ' '
                if not doc.get('pressure'):
                    title += '0.00 '
                else:
                    title += str(doc['pressure']) + ' '
                title += str(doc['cell_volume']) + ' '
                title += str(doc['enthalpy']) + ' '
                title += '0 0 '  # spin
                title += str(doc['num_atoms']) + ' '
                try:
                    if 'x' in doc['space_group']:
                        title += '(P1) '
                    else:
                        title += '(' + str(doc['space_group']) + ')' + ' '
                except Exception:
                    title += '(P1) '
                title += 'n - 1'
            except Exception:
                if not info:
                    title = 'TITLE\t' + path.split('/')[-1]
                raise RuntimeError(
                    'Failed to get info for res file, turn info off.')
            author = 'AUTHOR    Generated with matador (Matthew Evans, 2016)'
            f.write(header + '\n')
            f.write(author + '\n')
            f.write(title + '\n')
            # use dummy SG for CRYST1, shouldn't matter
            cryst = (
                'CRYST1 {v[0][0]:9.3f} {v[0][1]:9.3f} {v[0][2]:9.3f} {v[1][0]:7.2f} {v[1][1]:7.2f} {v[1][2]:7.2f} P 1'
                .format(v=doc['lattice_abc']))
            f.write(cryst + '\n')
            scale_n = cart2abcstar(doc['lattice_cart'])
            f.write(
                'SCALE1    {v[0][0]:10.6f} {v[0][1]:10.6f} {v[0][2]:10.6f}      {:10.5f}\n'
                .format(0.0, v=scale_n))
            f.write(
                'SCALE2    {v[1][0]:10.6f} {v[1][1]:10.6f} {v[1][2]:10.6f}      {:10.5f}\n'
                .format(0.0, v=scale_n))
            f.write(
                'SCALE3    {v[2][0]:10.6f} {v[2][1]:10.6f} {v[2][2]:10.6f}      {:10.5f}\n'
                .format(0.0, v=scale_n))
            if 'positions_abs' not in doc:
                doc['positions_abs'] = frac2cart(doc['lattice_cart'],
                                                 doc['positions_frac'])
            for ind, atom in enumerate(doc['atom_types']):
                try:
                    hetatm = 'HETATM '
                    # append 00 to atom type, a la cell2pdb...
                    hetatm += '{:4d} {:.4} NON A   1     '.format(
                        ind + 1, atom + '00')
                    hetatm += (
                        '{v[0]:7.3f} {v[1]:7.3f} {v[2]:7.3f} {:5.2f} {:5.2f}          {:.2}'
                        .format(1.0, 0.0, atom, v=doc['positions_abs'][ind]))
                    f.write(hetatm + '\n')
                except Exception:
                    print_exc()
            ter = 'TER       {}       NON A   1'.format(len(doc['atom_types']))
            f.write(ter + '\n')
            f.write('END')
    except Exception:
        if hash_dupe:
            print_exc()
            print('Writing pdb file failed for ', doc['text_id'])
        else:
            print_exc()
Beispiel #11
0
def _cif_set_unreduced_sites(doc):
    """ Expands sites by symmetry operations found under the key
    `symemtry_equiv_pos_as_xyz` in the cif_dict.

    Parameters:
        doc (dict): matador document to modify. Must contain symops
            under doc['_cif']['_symmetry_equiv_pos_as_xyz']. This doc
            is updated with new `positions_frac`, `num_atoms`, `atom_types`
            and `site_occupancy`.

    """
    from matador.utils.cell_utils import wrap_frac_coords
    from matador.utils.cell_utils import calc_pairwise_distances_pbc
    from matador.fingerprints.pdf import PDF

    species_sites = dict()
    species_occ = dict()

    symmetry_ops = []
    symmetry_functions = []

    def _apply_sym_op(x=None, y=None, z=None, symmetry=None):
        """ Returns the site after the applied symmetry operation, in string representation. """
        # cannot use a listcomp here due to interplay with functools
        return [eval(symmetry[0]), eval(symmetry[1]), eval(symmetry[2])]

    for symmetry in doc['_cif']['_symmetry_equiv_pos_as_xyz']:
        symmetry = tuple(elem.strip() for elem in symmetry.strip('\'').split(','))
        # check the element before doing an eval, as it is so unsafe
        allowed_chars = ['x', 'y', 'z', '.', '/', '+', '-',
                         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
        for element in symmetry:
            for character in element:
                if character not in allowed_chars:
                    raise RuntimeError('You are trying to do something naughty with the symmetry element {}'
                                       .format(element))

        symmetry_ops.append(symmetry)
        symmetry_functions.append(functools.partial(_apply_sym_op, symmetry=symmetry))

    for ind, site in enumerate(doc['positions_frac']):
        species = doc['atom_types'][ind]
        occupancy = doc['site_occupancy'][ind]
        if doc['atom_types'][ind] not in species_sites:
            species_sites[species] = []
            species_occ[species] = []
        for symmetry in symmetry_functions:
            x, y, z = site
            new_site = symmetry(x=x, y=y, z=z)
            new_site = wrap_frac_coords([new_site])[0]
            species_sites[species].append(new_site)
            species_occ[species].append(occupancy)

    unreduced_sites = []
    unreduced_occupancies = []
    unreduced_species = []

    # this loop assumes that no symmetry operation can map 2 unlike sites upon one another
    for species in species_sites:
        unreduced_sites.extend(species_sites[species])
        unreduced_occupancies.extend(species_occ[species])
        unreduced_species.extend(len(species_sites[species]) * [species])

    # check that the symmetry procedure has not generated overlapping atoms
    # this can happen for certain symmetries/cells if positions are not
    # reported to sufficient precision
    images = PDF._get_image_trans_vectors_auto(
        doc['lattice_cart'],
        0.1, 0.01, max_num_images=1,
    )

    poscarts = frac2cart(doc['lattice_cart'], unreduced_sites)
    distances = calc_pairwise_distances_pbc(
        poscarts,
        images,
        doc['lattice_cart'],
        0.01,
        compress=False,
        filter_zero=False,
        per_image=True
    )

    dupe_set = set()
    for img in distances:
        try:
            i_s, j_s = np.where(~img.mask)
        except ValueError:
            # ValueError will be raised if there is only one atom as i_s, j_s cannot be unpacked
            continue
        for i, j in zip(i_s, j_s):
            if i == j:
                continue
            else:
                # sites can overlap if they have partial occupancy
                if i not in dupe_set and unreduced_species[i] == unreduced_species[j]:
                    dupe_set.add(j)

    doc['positions_frac'] = unreduced_sites
    doc['site_occupancy'] = unreduced_occupancies
    doc['atom_types'] = unreduced_species

    doc['site_occupancy'] = [
        atom for ind, atom in enumerate(unreduced_occupancies) if ind not in dupe_set
    ]
    doc['atom_types'] = [
        atom for ind, atom in enumerate(unreduced_species) if ind not in dupe_set
    ]
    doc['positions_frac'] = [
        atom for ind, atom in enumerate(unreduced_sites) if ind not in dupe_set
    ]

    _num_atoms = np.sum(doc['site_occupancy'])
    if abs(_num_atoms - round(_num_atoms, 0)) < EPS:
        _num_atoms = int(round(_num_atoms, 0))
    doc['num_atoms'] = _num_atoms

    if len(doc['site_occupancy']) != len(doc['positions_frac']):
        raise RuntimeError('Size mismatch between positions and occs, {} vs {}'
                           .format(len(doc['site_occupancy']), len(doc['positions_frac'])))
    if len(doc['positions_frac']) != len(doc['atom_types']):
        raise RuntimeError('Size mismatch between positions and types')
Beispiel #12
0
def random_slice(parent_seeds,
                 standardize=True,
                 supercell=True,
                 shift=True,
                 debug=False):
    """ Simple cut-and-splice crossover of two parents.

    The overall size of the child can vary between 0.5 and 1.5 the size of the
    parent structures. Both parent structures are cut and spliced along the
    same crystallographic axis.

    Parameters:

        parents (list(dict)) : parent structures to crossover,
        standardize (bool)   : use spglib to standardize parents pre-crossover,
        supercell (bool)     : make a random supercell to rescale parents,
        shift (bool)         : randomly shift atoms in parents to unbias.

    Returns:

        dict: newborn structure from parents.

    """
    parents = deepcopy(parent_seeds)
    child = dict()
    # child_size is a number between 0.5 and 2
    child_size = 0.5 + 1.5 * np.random.rand()
    # cut_val is a number between 0.25*child_size and 0.75*child_size
    # the slice position of one parent in fractional coordinates
    # (the other is (child_size-cut_val))
    cut_val = child_size * (0.25 + (np.random.rand() / 2.0))

    parent_densities = []
    for ind, parent in enumerate(parents):
        if "cell_volume" not in parent:
            parents[ind]["cell_volume"] = cart2volume(parent["lattice_cart"])
        parent_densities.append(parent["num_atoms"] / parent["cell_volume"])
    target_density = sum(parent_densities) / len(parent_densities)

    if standardize:
        parents = [standardize_doc_cell(parent) for parent in parents]

    if supercell:
        # check ratio of num atoms in parents and grow the smaller one
        parent_extent_ratio = parents[0]["cell_volume"] / parents[1][
            "cell_volume"]
        if debug:
            print(
                parent_extent_ratio,
                parents[0]["cell_volume"],
                "vs",
                parents[1]["cell_volume"],
            )
        if parent_extent_ratio < 1:
            supercell_factor = int(round(1 / parent_extent_ratio))
            supercell_target = 0
        elif parent_extent_ratio >= 1:
            supercell_factor = int(round(parent_extent_ratio))
            supercell_target = 1
        if debug:
            print(supercell_target, supercell_factor)
        supercell_vector = [1, 1, 1]
        if supercell_factor > 1:
            for ind in range(supercell_factor):
                min_lat_vec_abs = 1e10
                min_lat_vec_ind = -1
                for i in range(3):
                    lat_vec_abs = np.sum(
                        np.asarray(
                            parents[supercell_target]["lattice_cart"][i])**2)
                    if lat_vec_abs < min_lat_vec_abs:
                        min_lat_vec_abs = lat_vec_abs
                        min_lat_vec_ind = i
                supercell_vector[min_lat_vec_ind] += 1
        if debug:
            print("Making supercell of {} with {}".format(
                parents[supercell_target]["source"][0], supercell_vector))
        if supercell_vector != [1, 1, 1]:
            parents[supercell_target] = create_simple_supercell(
                parents[supercell_target], supercell_vector, standardize=False)
    child["positions_frac"] = []
    child["atom_types"] = []
    child["lattice_cart"] = cut_val * np.asarray(
        parents[0]["lattice_cart"]) + (child_size - cut_val) * np.asarray(
            parents[1]["lattice_cart"])
    child["lattice_cart"] = child["lattice_cart"].tolist()

    # choose slice axis
    axis = np.random.randint(low=0, high=3)
    for ind, parent in enumerate(parents):
        if shift:
            # apply same random shift to all atoms in parents
            shift_vec = np.random.rand(3)
            for idx, _ in enumerate(parent["positions_frac"]):
                for k in range(3):
                    parent["positions_frac"][idx][k] += shift_vec[k]
                    if parent["positions_frac"][idx][k] >= 1:
                        parent["positions_frac"][idx][k] -= 1
                    elif parent["positions_frac"][idx][k] < 0:
                        parent["positions_frac"][idx][k] += 1
        # slice parent
        for atom, pos in zip(parent["atom_types"], parent["positions_frac"]):
            if ind == (pos[axis] <= cut_val):
                child["positions_frac"].append(pos)
                child["atom_types"].append(atom)
    # check child is sensible
    child["mutations"] = ["crossover"]
    child["stoichiometry"] = get_stoich(child["atom_types"])
    child["num_atoms"] = len(child["atom_types"])

    if "cell_volume" not in child:
        child["cell_volume"] = cart2volume(child["lattice_cart"])
    number_density = child["num_atoms"] / child["cell_volume"]

    # rescale cell based on number density of parents
    new_scale = np.cbrt(number_density / target_density)
    child["lattice_abc"] = np.asarray(cart2abc(child["lattice_cart"]))
    child["lattice_abc"][0] *= new_scale
    child["lattice_abc"] = child["lattice_abc"].tolist()
    child["lattice_cart"] = abc2cart(child["lattice_abc"])
    child["cell_volume"] = cart2volume(child["lattice_cart"])
    child["positions_abs"] = frac2cart(child["lattice_cart"],
                                       child["positions_frac"])

    return child
Beispiel #13
0
    def __init__(self, doc, lazy=False, **kwargs):
        """ Initialise parameters and run PDF (unless lazy=True).

        Parameters:

            doc (dict) : matador document to calculate PDF of

        Keyword Arguments:

            dr (float) : bin width for PDF (Angstrom) (DEFAULT: 0.01)
            gaussian_width (float) : width of Gaussian smearing (Angstrom) (DEFAULT: 0.01)
            num_images (int/str) : number of unit cell images include in PDF calculation (DEFAULT: 'auto')
            max_num_images (int) : cutoff number of unit cells before crashing (DEFAULT: 50)
            rmax (float) : maximum distance cutoff for PDF (Angstrom) (DEFAULT: 15)
            projected (bool) : optionally calculate the element-projected PDF
            standardize (bool) : standardize cell before calculating PDF
            lazy (bool) : if True, calculator is not called when initializing PDF object
            timing (bool) : if True, print the total time taken to calculate the PDF

        """

        prop_defaults = {
            'dr': 0.01,
            'gaussian_width': 0.1,
            'rmax': 15,
            'num_images': 'auto',
            'style': 'smear',
            'debug': False,
            'timing': False,
            'low_mem': False,
            'projected': True,
            'max_num_images': 50,
            'standardize': True
        }

        # read and store kwargs
        self.kwargs = prop_defaults
        self.kwargs.update(
            {key: kwargs[key]
             for key in kwargs if kwargs[key] is not None})

        # useful data for labelling
        self.spg = None
        structure = copy.deepcopy(doc)
        if self.kwargs.get('standardize'):
            structure = standardize_doc_cell(structure)
            self.spg = structure['space_group']
        self.stoichiometry = structure.get('stoichiometry',
                                           get_stoich(structure['atom_types']))

        # private variables
        self._num_images = self.kwargs.get('num_images')
        self._lattice = np.asarray(structure['lattice_cart'])
        self._poscart = np.asarray(
            frac2cart(structure['lattice_cart'],
                      structure['positions_frac'])).reshape(-1, 3)
        self._types = structure['atom_types']
        self._num_atoms = len(self._poscart)
        self._volume = cart2volume(self._lattice)
        self._image_vec = None

        # public variables
        self.rmax = self.kwargs.get('rmax')
        self.number_density = self._num_atoms / self._volume
        self.dr = self.kwargs.get('dr')
        self.r_space = None
        self.gr = None
        self.elem_gr = None

        self.label = None
        if self.kwargs.get('label'):
            self.label = self.kwargs["label"]
        elif 'text_id' in structure:
            self.label = ' '.join(structure['text_id'])

        if not lazy:
            if self.kwargs.get('timing'):
                start = time.time()
            self.calc_pdf()
            if self.kwargs.get('timing'):
                end = time.time()
                print('PDF calculated in {:.3f} s'.format(end - start))
Beispiel #14
0
 def kpoint_path_cartesian(self):
     """ The reicprocal space sampling path in Cartesian coordinates. """
     return np.asarray(
         frac2cart(real2recip(self.lattice_cart), self.kpoint_path))
Beispiel #15
0
    def test_kpt_path(self):

        cell, s = castep2dict(REAL_PATH +
                              "data/Na3Zn4-swap-ReOs-OQMD_759599.castep")
        std_cell, path, seekpath_results = get_seekpath_kpoint_path(
            cell, spacing=0.01, debug=False)
        self.assertEqual(539, len(path))

        self.assertLess(pdf_sim_dist(cell, std_cell), 0.05)

        import glob
        from os import remove
        from matador.utils.cell_utils import frac2cart

        fnames = glob.glob(REAL_PATH + "data/bs_test/*.res")
        spacing = 0.01
        for fname in fnames:
            doc, s = res2dict(fname, db=False)
            doc["cell_volume"] = cart2volume(doc["lattice_cart"])

            std_doc, path, seekpath_results = get_seekpath_kpoint_path(
                doc, spacing=spacing, debug=False)
            seekpath_results_path = get_path(doc2spg(doc))

            rel_path = seekpath_results["explicit_kpoints_rel"]
            abs_path = seekpath_results["explicit_kpoints_abs"]

            cart_kpts = np.asarray(
                frac2cart(real2recip(std_doc["lattice_cart"]), path))
            diffs = np.zeros((len(cart_kpts[:-1])))
            np.testing.assert_array_almost_equal(cart_kpts, abs_path)
            np.testing.assert_array_almost_equal(path, rel_path)
            for ind, kpt in enumerate(cart_kpts[:-1]):
                diffs[ind] = np.sqrt(np.sum((kpt - cart_kpts[ind + 1])**2))
            self.assertLess(
                len(np.where(diffs > 1.1 * spacing)[0]),
                len(seekpath_results["explicit_segments"]),
            )

            if "flrys4-1x109" in fname:
                bs, s = bands2dict(fname.replace(".res", ".bands"))
                np.testing.assert_array_almost_equal(bs["kpoint_path"],
                                                     rel_path)
                np.testing.assert_array_almost_equal(bs["lattice_cart"],
                                                     std_doc["lattice_cart"])
            self.assertLess(
                len(np.where(diffs > 1.1 * spacing)[0]),
                len(seekpath_results["explicit_segments"]),
            )

            cell_path = fname.replace(".res", ".cell")
            doc2cell(std_doc, cell_path)
            new_doc, s = cell2dict(cell_path,
                                   lattice=True,
                                   positions=True,
                                   db=False)
            assert "positions_frac" in new_doc
            remove(cell_path)
            seekpath_new_results = get_path(doc2spg(new_doc))
            self.assertEqual(
                seekpath_new_results["bravais_lattice_extended"],
                seekpath_results_path["bravais_lattice_extended"],
            )

            dist = pdf_sim_dist(doc, std_doc)
            self.assertLess(dist, 0.01)
            dist = pdf_sim_dist(doc, new_doc)
            self.assertLess(dist, 0.01)
Beispiel #16
0
 def coords_cartesian(self):
     return np.asarray(frac2cart(self.lattice, self.coords))