コード例 #1
0
 def setUp(self):
     self.resource_path = os.path.abspath(
         os.path.join(os.path.dirname(os.path.abspath(__file__)),
                      '../../static')).replace('\\', '/')
     self.project_path = os.path.dirname(os.path.abspath(__file__)).replace(
         '\\', '/')
     self.file_config = Settings()
コード例 #2
0
ファイル: volumetric_data.py プロジェクト: samk07/pyiron
    def _read_vol_data(filename, normalize=True):
        """
        Parses the Sphinx volumetric data files (rho.sxb and vElStat-eV.sxb).

        Args:
            filename (str): File to be parsed
            normalize (bool): Normalize the data with respect to the volume
                (probably sensible for rho)

        Returns:
            list: A list of the volumetric data (length >1 for density
                files with spin)

        """
        if not os.path.getsize(filename) > 0:
            s = Settings()
            s.logger.warning("File:" + filename + "seems to be empty! ")
            return None, None

        with netcdf_file(filename, mmap=False) as f:
            dim = [int(d) for d in f.variables["dim"]]
            volume = 1.0
            if normalize:
                cell = f.variables["cell"].data * BOHR_TO_ANGSTROM
                volume = np.abs(np.linalg.det(cell))
            if "mesh" in f.variables:
                # non-spin polarized
                total_data_list = [
                    np.array(f.variables["mesh"][:]).reshape(dim) / volume
                ]
            elif "mesh-0" in f.variables and "mesh-1" in f.variables:
                # spin-polarized
                total_data_list = [
                    np.array(f.variables["mesh-0"][:]).reshape(dim) / volume,
                    np.array(f.variables["mesh-1"][:]).reshape(dim) / volume
                ]
            else:
                raise ValueError(
                    "Unexpected keys in the netcdf file's variables: neither "
                    f"'mesh' nor 'mesh-0' and 'mesh-1' found in {f.variables}."
                )

        if len(total_data_list) == 0:
            s = Settings()
            s.logger.warning("File:" + filename +
                             "seems to be corrupted/empty even after parsing!")
            return None

        return total_data_list
コード例 #3
0
ファイル: parameters.py プロジェクト: obaica/pyiron
    def read_input(self, file_name, ignore_trigger=None):
        """
        Read input file and store the data in GenericParameters - this overwrites the current parameter settings

        Args:
            file_name (str): absolute path to the input file
            ignore_trigger (str): trigger for lines to be ignored
        """
        Settings().logger.debug("file: %s %s", file_name, os.path.isfile(file_name))
        if not os.path.isfile(file_name):
            raise ValueError("file does not exist: " + file_name)
        with open(file_name, "r") as f:
            lines = f.readlines()
            new_lines = np.array(lines).tolist()
            if ignore_trigger is not None:
                del_ind = list()
                for i, line in enumerate(lines):
                    line = line.strip()
                    if len(line.split()) > 0:
                        if ignore_trigger == line.strip()[0]:
                            del_ind.append(i)
                        elif ignore_trigger in line:
                            lines[i] = line[: line.find("!")]
                lines = np.array(lines)
                new_lines = lines[np.setdiff1d(np.arange(len(lines)), del_ind)]
        new_dict = self._lines_to_dict(new_lines)
        self._read_only_check_dict(new_dict=new_dict)
        self._dataset = new_dict
コード例 #4
0
    def get_initial_structure(self):
        """
        Gets the initial structure from the simulation

        Returns:
            pyiron.atomistics.structure.atoms.Atoms: The initial structure

        """
        try:
            el_list = self.vasprun_dict["atominfo"]["species_list"]
            cell = self.vasprun_dict["init_structure"]["cell"]
            positions = self.vasprun_dict["init_structure"]["positions"]
            if len(positions[positions > 1.01]) > 0:
                basis = Atoms(el_list, positions=positions, cell=cell)
            else:
                basis = Atoms(el_list, scaled_positions=positions, cell=cell)
            if "selective_dynamics" in self.vasprun_dict[
                    "init_structure"].keys():
                basis.add_tag(selective_dynamics=[True, True, True])
                for i, val in enumerate(self.vasprun_dict["init_structure"]
                                        ["selective_dynamics"]):
                    basis[i].selective_dynamics = val
            return basis
        except KeyError:
            s = Settings()
            s.logger.warning(
                "The initial structure could not be extracted from vasprun properly"
            )
            return
コード例 #5
0
 def setUpClass(cls):
     cls.resource_path = os.path.abspath(
         os.path.join(os.path.dirname(os.path.abspath(__file__)),
                      "../../static")).replace("\\", "/")
     cls.project_path = os.path.dirname(os.path.abspath(__file__)).replace(
         "\\", "/")
     cls.file_config = Settings()
コード例 #6
0
 def setUpClass(cls):
     cls.resource_path = os.path.abspath(
         os.path.join(os.path.dirname(os.path.abspath(__file__)),
                      '../../static')).replace('\\', '/')
     cls.project_path = os.path.dirname(os.path.abspath(__file__)).replace(
         '\\', '/')
     cls.file_config = Settings()
コード例 #7
0
ファイル: path.py プロジェクト: ares201005/pyiron
    def _get_project_from_path(full_path):
        """
        Split the absolute path in root_path and project_path using the top_path function in Settings()

        Args:
            full_path (str): absolute path

        Returns:
            str, str: root_path, project_path
        """
        root = Settings().top_path(full_path)
        pr_path = posixpath.relpath(full_path, root)
        return root, pr_path
コード例 #8
0
 def setUpClass(cls):
     cls.resource_path = (Path(
         __file__).expanduser().resolve().absolute().as_posix().replace(
             "\\", "/"))
     cls.test_config = Settings(
         config={
             "sql_file":
             "sqlite.db",
             "project_paths":
             os.path.join(cls.resource_path, "../../../../.."),
             "resource_paths":
             os.path.join(cls.resource_path, "../../../../static"),
         })
コード例 #9
0
def set_logging_level(level, channel=None):
    """
    Set level for logger

    Args:
        level (str): 'DEBUG, INFO, WARN'
        channel (int): 0: file_log, 1: stream, None: both
    """
    from pyiron.base.settings.generic import Settings
    s = Settings()
    if channel:
        s.logger.handlers[channel].setLevel(level)
    else:
        s.logger.handlers[0].setLevel(level)
        s.logger.handlers[1].setLevel(level)
コード例 #10
0
ファイル: update.py プロジェクト: ares201005/pyiron
def database():
    """
    Convenience function to update an existing (older) version of the database to the latest version, by modifying the
    database columns. This is only possible if no other pyiron session is accessing the database. Therefore the script
    might take some time to be executed successfully.
    """
    s = Settings()
    s.open_connection()
    db = s.database
    try:
        if "projectPath".lower() not in db.get_table_headings(db.table_name):
            print("add missing column: " + "projectPath")
            db.add_column(col_name="projectPath", col_type="varchar(255)")
        if "subJob".lower() not in db.get_table_headings(db.table_name):
            print("add missing column: " + "subJob")
            db.add_column(col_name="subJob", col_type="varchar(255)")
        else:
            print("change data type of subJob")
            db.change_column_type(col_name="subJob", col_type="varchar(255)")
        if "masterID".lower() not in db.get_table_headings(db.table_name):
            print("add missing column: " + "masterid")
            db.add_column(col_name="masterid", col_type="bigint")

        if "hamversion" in db.get_table_headings(db.table_name):
            print("change data type hamversion")
            db.change_column_type(col_name="hamversion",
                                  col_type="varchar(50)")

        if "job" in db.get_table_headings(db.table_name):
            print("change data type job")
            db.change_column_type(col_name="job", col_type="varchar(50)")
        print(db.table_name, " - database successful updated")
    except ValueError:
        print(db.table_name, " - database failed")

    print("database update done")
コード例 #11
0
class TestConfigSettingsStatic(unittest.TestCase):
    def setUp(self):
        self.resource_path = os.path.abspath(
            os.path.join(os.path.dirname(os.path.abspath(__file__)),
                         '../../static')).replace('\\', '/')
        self.project_path = os.path.dirname(os.path.abspath(__file__)).replace(
            '\\', '/')
        self.file_config = Settings()

    # def test_file_db_connection_name(self):
    #     self.assertEqual(self.file_config.db_connection_name, 'DEFAULT')
    #
    # def test_file_db_connection_string(self):
    #     self.assertEqual(self.file_config.db_connection_string, 'sqlite:///' + self.resource_path + '/sqlite.db')
    #
    # def test_file_db_connection_table(self):
    #     self.assertEqual(self.file_config.db_connection_table, 'jobs_pyiron')

    # def test_file_db_translate_dict(self):
    #     self.assertEqual(self.file_config.db_translate_dict,
    #                      {'DEFAULT': {self.project_path: self.project_path}})

    # def test_file_db_name(self):
    #     self.assertEqual(self.file_config.db_name, 'DEFAULT')

    def test_file_top_path(self):
        self.assertTrue(
            self.file_config.top_path(self.project_path +
                                      '/test') in self.project_path)

    def test_file_resource_paths(self):
        self.assertTrue(
            any([
                path for path in self.file_config.resource_paths
                if path in self.resource_path
            ]))

    def test_file_login_user(self):
        self.assertEqual(self.file_config.login_user, 'pyiron')
コード例 #12
0
import tarfile
import shutil
"""
The JobCore the most fundamental pyiron job class.
"""

__author__ = "Jan Janssen"
__copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \
                "Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "*****@*****.**"
__status__ = "production"
__date__ = "Sep 1, 2017"

s = Settings()


class JobCore(PyironObject):
    """
    The JobCore the most fundamental pyiron job class. From this class the GenericJob as well as the reduced JobPath
    class are derived. While JobPath only provides access to the HDF5 file it is about one order faster.

    Args:
        project (ProjectHDFio): ProjectHDFio instance which points to the HDF5 file the job is stored in
        job_name (str): name of the job, which has to be unique within the project

    Attributes:

        .. attribute:: job_name
コード例 #13
0
    def _read_vol_data_old(filename, normalize=True):
        """
        Convenience method to parse a generic volumetric static file in the vasp like format.
        Used by subclasses for parsing the file. This routine is adapted from the pymatgen vasp VolumetricData
        class with very minor modifications. The new parser is faster

        http://pymatgen.org/_modules/pymatgen/io/vasp/outputs.html#VolumetricData.

        Args:
            filename (str): Path of file to parse
            normalize (boolean): Flag to normalize by the volume of the cell

        """
        if os.stat(filename).st_size == 0:
            s = Settings()
            s.logger.warning("File:" + filename +
                             "seems to be corrupted/empty")
            return None, None
        poscar_read = False
        poscar_string = list()
        dataset = list()
        all_dataset = list()
        dim = None
        dimline = None
        read_dataset = False
        ngrid_pts = 0
        data_count = 0
        atoms = None
        volume = None
        with open(filename, "r") as f:
            for line in f:
                line = line.strip()
                if read_dataset:
                    toks = line.split()
                    for tok in toks:
                        if data_count < ngrid_pts:
                            # This complicated procedure is necessary because
                            # vasp outputs x as the fastest index, followed by y
                            # then z.
                            x = data_count % dim[0]
                            y = int(math.floor(data_count / dim[0])) % dim[1]
                            z = int(math.floor(data_count / dim[0] / dim[1]))
                            dataset[x, y, z] = float(tok)
                            data_count += 1
                    if data_count >= ngrid_pts:
                        read_dataset = False
                        data_count = 0
                        all_dataset.append(dataset)
                elif not poscar_read:
                    if line != "" or len(poscar_string) == 0:
                        poscar_string.append(line)
                    elif line == "":
                        try:
                            atoms = atoms_from_string(poscar_string)
                        except ValueError:
                            pot_str = filename.split("/")
                            pot_str[-1] = "POTCAR"
                            potcar_file = "/".join(pot_str)
                            species = get_species_list_from_potcar(potcar_file)
                            atoms = atoms_from_string(poscar_string,
                                                      species_list=species)
                        volume = atoms.get_volume()
                        poscar_read = True
                elif not dim:
                    dim = [int(i) for i in line.split()]
                    ngrid_pts = dim[0] * dim[1] * dim[2]
                    dimline = line
                    read_dataset = True
                    dataset = np.zeros(dim)
                elif line == dimline:
                    read_dataset = True
                    dataset = np.zeros(dim)
            if not normalize:
                volume = 1.0
            if len(all_dataset) == 0:
                s = Settings()
                s.logger.warning("File:" + filename +
                                 "seems to be corrupted/empty")
                return None, None
            if len(all_dataset) == 2:
                data = {
                    "total": all_dataset[0] / volume,
                    "diff": all_dataset[1] / volume
                }
                return atoms, [data["total"], data["diff"]]
            else:
                data = {"total": all_dataset[0] / volume}
                return atoms, [data["total"]]
コード例 #14
0
    def _read_vol_data(self, filename, normalize=True):
        """
        Parses the VASP volumetric type files (CHGCAR, LOCPOT, PARCHG etc). Rather than looping over individual values,
        this function utilizes numpy indexing resulting in a parsing efficiency of at least 10%.

        Args:
            filename (str): File to be parsed
            normalize (bool): Normalize the data with respect to the volume (Recommended for CHGCAR files)

        Returns:
            pyiron.atomistics.structure.atoms.Atoms: The structure of the volumetric snapshot
            list: A list of the volumetric data (length >1 for CHGCAR files with spin)

        """
        if not os.path.getsize(filename) > 0:
            s = Settings()
            s.logger.warning("File:" + filename + "seems to be empty! ")
            return None, None
        with open(filename, "r") as f:
            struct_lines = list()
            get_grid = False
            n_x = 0
            n_y = 0
            n_z = 0
            n_grid = 0
            n_grid_str = None
            total_data_list = list()
            atoms = None
            for line in f:
                strip_line = line.strip()
                if not get_grid:
                    if strip_line == "":
                        get_grid = True
                    struct_lines.append(strip_line)
                elif n_grid_str is None:
                    n_x, n_y, n_z = [int(val) for val in strip_line.split()]
                    n_grid = n_x * n_y * n_z
                    n_grid_str = " ".join(
                        [str(val) for val in [n_x, n_y, n_z]])
                    load_txt = np.genfromtxt(f, max_rows=int(n_grid / 5))
                    load_txt = np.hstack(load_txt)
                    if n_grid % 5 != 0:
                        add_line = np.genfromtxt(f, max_rows=1)
                        load_txt = np.append(load_txt, np.hstack(add_line))
                    total_data = self._fastest_index_reshape(
                        load_txt, [n_x, n_y, n_z])
                    try:
                        atoms = atoms_from_string(struct_lines)
                    except ValueError:
                        pot_str = filename.split("/")
                        pot_str[-1] = "POTCAR"
                        potcar_file = "/".join(pot_str)
                        species = get_species_list_from_potcar(potcar_file)
                        atoms = atoms_from_string(struct_lines,
                                                  species_list=species)
                    if normalize:
                        total_data /= atoms.get_volume()
                    total_data_list.append(total_data)
                elif atoms is not None:
                    grid_str = n_grid_str.replace(" ", "")
                    if grid_str == strip_line.replace(" ", ""):
                        load_txt = np.genfromtxt(f, max_rows=int(n_grid / 5))
                        load_txt = np.hstack(load_txt)
                        if n_grid % 5 != 0:
                            add_line = np.genfromtxt(f, max_rows=1)
                            load_txt = np.hstack(
                                np.append(load_txt, np.hstack(add_line)))
                        total_data = self._fastest_index_reshape(
                            load_txt, [n_x, n_y, n_z])
                        if normalize:
                            total_data /= atoms.get_volume()
                        total_data_list.append(total_data)
            if len(total_data_list) == 0:
                s = Settings()
                s.logger.warning(
                    "File:" + filename +
                    "seems to be corrupted/empty even after parsing!")
                return None, None
            return atoms, total_data_list
コード例 #15
0
 def setUpClass(cls):
     cls.resource_path = Path(__file__).expanduser().resolve().absolute().as_posix().replace('\\', '/')
     cls.test_config = Settings(config={'sql_file': 'sqlite.db',
                                        'project_paths': os.path.join(cls.resource_path, '../../../../..'),
                                        'resource_paths': os.path.join(cls.resource_path, '../../../../static')})