예제 #1
0
    def _read_vol_data(filename, normalize=True):
        """
        Parses the Sphinx volumetric data files (rho.sxb and vElStat-eV.sxb).

        Args:
            filename (str): File to be parsed
            normalize (bool): Normalize the data with respect to the volume
                (probably sensible for rho)

        Returns:
            list: A list of the volumetric data (length >1 for density
                files with spin)

        """
        if not os.path.getsize(filename) > 0:
            s = Settings()
            s.logger.warning("File:" + filename + "seems to be empty! ")
            return None, None

        with netcdf_file(filename, mmap=False) as f:
            dim = [int(d) for d in f.variables["dim"]]
            volume = 1.0
            if normalize:
                cell = f.variables["cell"].data * BOHR_TO_ANGSTROM
                volume = np.abs(np.linalg.det(cell))
            if "mesh" in f.variables:
                # non-spin polarized
                total_data_list = [
                    np.array(f.variables["mesh"][:]).reshape(dim) / volume
                ]
            elif "mesh-0" in f.variables and "mesh-1" in f.variables:
                # spin-polarized
                total_data_list = [
                    np.array(f.variables["mesh-0"][:]).reshape(dim) / volume,
                    np.array(f.variables["mesh-1"][:]).reshape(dim) / volume
                ]
            else:
                raise ValueError(
                    "Unexpected keys in the netcdf file's variables: neither "
                    f"'mesh' nor 'mesh-0' and 'mesh-1' found in {f.variables}."
                )

        if len(total_data_list) == 0:
            s = Settings()
            s.logger.warning(
                "File:"
                + filename
                + "seems to be corrupted/empty even after parsing!"
            )
            return None

        return total_data_list
예제 #2
0
파일: vasprun.py 프로젝트: t-brink/pyiron
    def get_initial_structure(self):
        """
        Gets the initial structure from the simulation

        Returns:
            pyiron.atomistics.structure.atoms.Atoms: The initial structure

        """
        try:
            el_list = self.vasprun_dict["atominfo"]["species_list"]
            cell = self.vasprun_dict["init_structure"]["cell"]
            positions = self.vasprun_dict["init_structure"]["positions"]
            if len(positions[positions > 1.01]) > 0:
                basis = Atoms(el_list,
                              positions=positions,
                              cell=cell,
                              pbc=True)
            else:
                basis = Atoms(el_list,
                              scaled_positions=positions,
                              cell=cell,
                              pbc=True)
            if "selective_dynamics" in self.vasprun_dict[
                    "init_structure"].keys():
                basis.add_tag(selective_dynamics=[True, True, True])
                for i, val in enumerate(self.vasprun_dict["init_structure"]
                                        ["selective_dynamics"]):
                    basis[i].selective_dynamics = val
            return basis
        except KeyError:
            s = Settings()
            s.logger.warning(
                "The initial structure could not be extracted from vasprun properly"
            )
            return
import shutil
import os
from pyiron_base import Settings, GenericParameters
from pyiron.atomistics.job.potentials import PotentialAbstract, find_potential_file_base

__author__ = "Joerg Neugebauer, Sudarsan Surendralal, Jan Janssen"
__copyright__ = (
    "Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
    "Computational Materials Design (CM) Department")
__version__ = "1.0"
__maintainer__ = "Sudarsan Surendralal"
__email__ = "*****@*****.**"
__status__ = "production"
__date__ = "Sep 1, 2017"

s = Settings()


class LammpsPotential(GenericParameters):
    """
    This module helps write commands which help in the control of parameters related to the potential used in LAMMPS
    simulations
    """
    def __init__(self, input_file_name=None):
        super(LammpsPotential, self).__init__(
            input_file_name=input_file_name,
            table_name="potential_inp",
            comment_char="#",
        )
        self._potential = None
        self._attributes = {}
예제 #4
0
    def _read_vol_data_old(filename, normalize=True):
        """
        Convenience method to parse a generic volumetric static file in the vasp like format.
        Used by subclasses for parsing the file. This routine is adapted from the pymatgen vasp VolumetricData
        class with very minor modifications. The new parser is faster

        http://pymatgen.org/_modules/pymatgen/io/vasp/outputs.html#VolumetricData.

        Args:
            filename (str): Path of file to parse
            normalize (boolean): Flag to normalize by the volume of the cell

        """
        if os.stat(filename).st_size == 0:
            s = Settings()
            s.logger.warning("File:" + filename +
                             "seems to be corrupted/empty")
            return None, None
        poscar_read = False
        poscar_string = list()
        dataset = list()
        all_dataset = list()
        dim = None
        dimline = None
        read_dataset = False
        ngrid_pts = 0
        data_count = 0
        atoms = None
        volume = None
        with open(filename, "r") as f:
            for line in f:
                line = line.strip()
                if read_dataset:
                    toks = line.split()
                    for tok in toks:
                        if data_count < ngrid_pts:
                            # This complicated procedure is necessary because
                            # vasp outputs x as the fastest index, followed by y
                            # then z.
                            x = data_count % dim[0]
                            y = int(math.floor(data_count / dim[0])) % dim[1]
                            z = int(math.floor(data_count / dim[0] / dim[1]))
                            dataset[x, y, z] = float(tok)
                            data_count += 1
                    if data_count >= ngrid_pts:
                        read_dataset = False
                        data_count = 0
                        all_dataset.append(dataset)
                elif not poscar_read:
                    if line != "" or len(poscar_string) == 0:
                        poscar_string.append(line)
                    elif line == "":
                        try:
                            atoms = atoms_from_string(poscar_string)
                        except ValueError:
                            pot_str = filename.split("/")
                            pot_str[-1] = "POTCAR"
                            potcar_file = "/".join(pot_str)
                            species = get_species_list_from_potcar(potcar_file)
                            atoms = atoms_from_string(poscar_string,
                                                      species_list=species)
                        volume = atoms.get_volume()
                        poscar_read = True
                elif not dim:
                    dim = [int(i) for i in line.split()]
                    ngrid_pts = dim[0] * dim[1] * dim[2]
                    dimline = line
                    read_dataset = True
                    dataset = np.zeros(dim)
                elif line == dimline:
                    read_dataset = True
                    dataset = np.zeros(dim)
            if not normalize:
                volume = 1.0
            if len(all_dataset) == 0:
                s = Settings()
                s.logger.warning("File:" + filename +
                                 "seems to be corrupted/empty")
                return None, None
            if len(all_dataset) == 2:
                data = {
                    "total": all_dataset[0] / volume,
                    "diff": all_dataset[1] / volume,
                }
                return atoms, [data["total"], data["diff"]]
            else:
                data = {"total": all_dataset[0] / volume}
                return atoms, [data["total"]]
예제 #5
0
    def _read_vol_data(self, filename, normalize=True):
        """
        Parses the VASP volumetric type files (CHGCAR, LOCPOT, PARCHG etc). Rather than looping over individual values,
        this function utilizes numpy indexing resulting in a parsing efficiency of at least 10%.

        Args:
            filename (str): File to be parsed
            normalize (bool): Normalize the data with respect to the volume (Recommended for CHGCAR files)

        Returns:
            pyiron.atomistics.structure.atoms.Atoms: The structure of the volumetric snapshot
            list: A list of the volumetric data (length >1 for CHGCAR files with spin)

        """
        if not os.path.getsize(filename) > 0:
            s = Settings()
            s.logger.warning("File:" + filename + "seems to be empty! ")
            return None, None
        with open(filename, "r") as f:
            struct_lines = list()
            get_grid = False
            n_x = 0
            n_y = 0
            n_z = 0
            n_grid = 0
            n_grid_str = None
            total_data_list = list()
            atoms = None
            for line in f:
                strip_line = line.strip()
                if not get_grid:
                    if strip_line == "":
                        get_grid = True
                    struct_lines.append(strip_line)
                elif n_grid_str is None:
                    n_x, n_y, n_z = [int(val) for val in strip_line.split()]
                    n_grid = n_x * n_y * n_z
                    n_grid_str = " ".join(
                        [str(val) for val in [n_x, n_y, n_z]])
                    load_txt = np.genfromtxt(f, max_rows=int(n_grid / 5))
                    load_txt = np.hstack(load_txt)
                    if n_grid % 5 != 0:
                        add_line = np.genfromtxt(f, max_rows=1)
                        load_txt = np.append(load_txt, np.hstack(add_line))
                    total_data = self._fastest_index_reshape(
                        load_txt, [n_x, n_y, n_z])
                    try:
                        atoms = atoms_from_string(struct_lines)
                    except ValueError:
                        pot_str = filename.split("/")
                        pot_str[-1] = "POTCAR"
                        potcar_file = "/".join(pot_str)
                        species = get_species_list_from_potcar(potcar_file)
                        atoms = atoms_from_string(struct_lines,
                                                  species_list=species)
                    if normalize:
                        total_data /= atoms.get_volume()
                    total_data_list.append(total_data)
                elif atoms is not None:
                    grid_str = n_grid_str.replace(" ", "")
                    if grid_str == strip_line.replace(" ", ""):
                        load_txt = np.genfromtxt(f, max_rows=int(n_grid / 5))
                        load_txt = np.hstack(load_txt)
                        if n_grid % 5 != 0:
                            add_line = np.genfromtxt(f, max_rows=1)
                            load_txt = np.hstack(
                                np.append(load_txt, np.hstack(add_line)))
                        total_data = self._fastest_index_reshape(
                            load_txt, [n_x, n_y, n_z])
                        if normalize:
                            total_data /= atoms.get_volume()
                        total_data_list.append(total_data)
            if len(total_data_list) == 0:
                s = Settings()
                s.logger.warning(
                    "File:" + filename +
                    "seems to be corrupted/empty even after parsing!")
                return None, None
            return atoms, total_data_list