コード例 #1
0
class TestVerdiDataArray(AiidaTestCase):
    """Testing verdi data array."""
    @classmethod
    def setUpClass(cls):  # pylint: disable=arguments-differ
        super().setUpClass()

    def setUp(self):
        self.arr = ArrayData()
        self.arr.set_array('test_array', np.array([0, 1, 3]))
        self.arr.store()

        self.cli_runner = CliRunner()

    def test_arrayshowhelp(self):
        output = sp.check_output(['verdi', 'data', 'array', 'show', '--help'])
        self.assertIn(b'Usage:', output,
                      'Sub-command verdi data array show --help failed.')

    def test_arrayshow(self):
        options = [str(self.arr.id)]
        res = self.cli_runner.invoke(cmd_array.array_show,
                                     options,
                                     catch_exceptions=False)
        self.assertEqual(res.exit_code, 0,
                         'The command did not finish correctly')
コード例 #2
0
def get_forces(parameters):
    """Return the forces array [eV/ang] from the output parameters node."""
    # cclib parser keeps forces in au
    forces_au = np.array(parameters['grads'][-1])
    forces_arr = ArrayData()
    forces_arr.set_array(name='forces', array=forces_au * ANG_TO_BOHR / EV_TO_EH)
    return forces_arr
コード例 #3
0
    def parse_sqw(self, **kwargs):
        """Parse the spin-spin correlation function file
        """
        import numpy as np
        import pandas as pd
        from scipy import signal
        # Check if the file is present
        try:
            self.retrieved
        except NotExistent:
            return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER

        sqw_filename = self.node.get_option('sqw_filename')

        files_retrieved = self.retrieved.list_object_names()

        files_expected = [sqw_filename]

        if not set(files_expected) <= set(files_retrieved):
            self.logger.error("Found files '{}', expected to find '{}'".format(
                files_retrieved, files_expected))
            return self.exit_codes.ERROR_MISSING_OUTPUT_FILES

        self.logger.info("Parsing '{}'".format(sqw_filename))

        try:
            sqwa = pd.read_csv(sqw_filename,
                               delim_whitespace=True,
                               header=None).values
        except OSError:
            return self.exit_codes.ERROR_READING_OUTPUT_FILE

        qd = int(sqwa[sqwa.shape[0] - 1, 0])
        ed = int(sqwa[sqwa.shape[0] - 1, 4])
        sqw_data = ArrayData()
        sigma = 1.50
        gauss = signal.gaussian(ed, std=sigma)
        sqw_labels =\
            [r'$S_x(q,\omega)$ [meV]', r'$S_y(q,\omega)$ [meV]',
             r'$S_z(q,\omega)$ [meV]', r'$S^2(q,\omega)$ [meV]']
        # Perform a convolution with a windowing function for each q-point
        for iq in range(0, qd):
            indx = np.where(sqwa[:, 0] == (iq + 1))[0]
            for ii in range(0, 4):
                sqwa[indx, ii + 5] =\
                    signal.convolve(sqwa[indx, ii + 5], gauss, mode='same')
        # Find the peaks and normalize the data
        for ii in range(5, len(sqwa[0])):
            sqw = np.transpose((np.reshape(sqwa[:, ii],
                                           (qd, ed))[:, 0:int(ed / 2)]))
            normMat = np.diag(1.0 / np.amax(sqw, axis=0))
            sqw = np.matmul(sqw, normMat)
            sqw_data.set_array(sqw, sqw_labels[ii])

        self.out("sqw", sqw_data)
        return ExitCode(0)
コード例 #4
0
    def _parse_folders(self, retrieved_folders, parser_params):

        if 'heights' in parser_params:
            heights = parser_params['heights']
        else:
            heights = [2.0]

        # By default, don't re-orient cube
        orient_cube = False
        if 'orient_cube' in parser_params:
            orient_cube = parser_params['orient_cube']

        out_array = ArrayData()

        add_suppl = True

        for retrieved_fd in retrieved_folders:
            for filename in retrieved_fd.list_object_names():
                if filename.endswith(".cube"):

                    with retrieved_fd.open(filename) as handle:
                        cube = Cube.from_file_handle(handle)

                    if orient_cube:
                        self._orient_cube(cube)

                    cube_data = None
                    h_added = []

                    for h in heights:
                        try:
                            cube_plane = cube.get_plane_above_topmost_atom(h)
                            cube_plane = np.expand_dims(cube_plane, axis=2)
                            if cube_data is None:
                                cube_data = cube_plane
                            else:
                                cube_data = np.concatenate(
                                    (cube_data, cube_plane), axis=2)
                            h_added.append(h)
                        except IndexError:
                            pass

                    if cube_data is None:
                        # None of the heights were inside the calculated box
                        return

                    arr_label = "cube_" + os.path.splitext(
                        filename)[0].replace('-', '').replace('+', '')

                    out_array.set_array(arr_label, cube_data)

                    if add_suppl:
                        out_array.set_array('x_arr', cube.x_arr_ang)
                        out_array.set_array('y_arr', cube.y_arr_ang)
                        out_array.set_array('h_arr', np.array(h_added))
                        add_suppl = False

        self.out('cube_planes_array', out_array)
コード例 #5
0
def calculate_invariant_with_parities(dimensionality: orm.Int,
                                      scf_out_params: orm.Dict,
                                      par_data: orm.ArrayData) -> orm.Dict:
    """Calculate the z2 invariant from the parities using the output of a BandsxCalculation."""
    dim = dimensionality.value

    parities = par_data.get_array('par')

    n_el = int(scf_out_params.get_dict()['number_of_electrons'])
    if dim == 2:
        x = 1
        for p in parities:
            delta = 1
            for i in range(0, n_el, 2):
                delta *= p[i]

            x *= delta

        if x == 1:
            res = {'nu': 0}
        elif x == -1:
            res = {'nu': 1}
        else:
            res = {'nu': -1}
            # raise exceptions.OutputParsingError(
            #     'Invalid result for z2 using parities')

    elif dim == 3:
        raise NotImplemented('dimensionality = 3  not implemented.')
    else:
        raise exceptions.InputValidationError(
            'dimensionality must be either 2 or 3')

    return orm.Dict(dict=res)
コード例 #6
0
def create_non_coll_array(**arrays):
    arraydata = ArrayData()
    arraydata.set_array('grid_X', arrays["q"].get_array("grid_X"))
    arraydata.set_array('grid_Y', arrays["q"].get_array("grid_Y"))
    arraydata.set_array('STM_q', arrays["q"].get_array("STM"))
    for spinmod in ("x", "y", "z"):
        arraydata.set_array('STM_s{}'.format(spinmod),
                            arrays[spinmod].get_array("STM"))

    return arraydata
コード例 #7
0
ファイル: kpoint_grids.py プロジェクト: Crivella/mypyutils
def kpt_crop(kpoints: orm.KpointsData, centers: orm.ArrayData,
             radii: orm.ArrayData, anticrop: orm.Bool) -> orm.KpointsData:
    kpt_cryst = kpoints.get_kpoints_mesh(print_list=True)
    cell = kpoints.cell
    recipr = recipr_base(cell)

    centers = centers.get_array('centers')
    centers = centers.dot(recipr)
    radii = radii.get_array('radii')

    kpt, wgt = _kpt_crop(kpt_cryst,
                         recipr,
                         centers=centers,
                         radii=radii,
                         anticrop=anticrop.value)

    res = orm.KpointsData()
    res.set_cell(cell)
    res.set_kpoints(kpt, cartesian=True, weights=wgt)

    return res
コード例 #8
0
    def parse(self, **kwargs):
        try:
            output_folder = self.retrieved
        except exceptions.NotExistent:
            return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER
        try:
            with output_folder.open(self.node.get_attribute('input_filename'),
                                    'r') as handle:
                params = json.load(handle)
            with output_folder.open(params['disp_file'], 'r') as handle:
                headers = self.get_headers(handle)
                result = self.parse_lcurve(handle)
        except (OSError, IOError):
            return self.exit_codes.ERROR_READING_OUTPUT_FILE

        if result is None:
            return self.exit_codes.ERROR_INVALID_OUTPUT

        arraydata = ArrayData()
        if headers is None or len(headers) != result.shape[1]:
            arraydata.set_array('lcurve.out', result)
        else:
            for i, h in enumerate(headers):
                arraydata.set_array(h, result[:, i])
        self.out('lcurve', arraydata)
        self.out('param', Dict(dict=params))
コード例 #9
0
    def _aiida_ndb_hf(self, data):
        """Save the data from ndb.HF_and_locXC

        """
        pdata = ArrayData()
        pdata.set_array('Sx', numpy.array(data['Sx']))
        pdata.set_array('Vxc', numpy.array(data['Vxc']))
        return pdata
コード例 #10
0
    def _parse_dos(self):
        """Parse DosMain DOS output."""
        if self.node.inputs.dos_method == 'tetrahedron':
            filename = 'aiida.DOS.Tetrahedron'
        else:
            filename = 'aiida.DOS.Gaussian'

        if filename not in self.retrieved.list_object_names():
            return self.exit_codes.ERROR_DOS_OUTPUT_MISSING

        try:
            with self.retrieved.open(filename, 'r') as stream:
                dos = np.loadtxt(stream)
        except FileNotFoundError:
            return self.exit_codes.ERROR_DOS_OUTPUT_MISSING
        except OSError:
            return self.exit_codes.ERROR_DOS_OUTPUT_READ

        dos_ad = ArrayData()
        dos_ad.set_array('dos', dos)
        self.out('output_dos', dos_ad)

        return ExitCode(0)
コード例 #11
0
def test_calcfunction_band_gap_with_spin(db_test_app, data_regression):
    data = get_test_data("edge_at_fermi")
    array = ArrayData()
    array.set_array("energies", np.array(data.energies))
    array.set_array("total_alpha", np.array(data.densities))
    array.set_array("total_beta", np.array(data.densities))
    outputs, node = calcfunction_band_gap.run_get_node(
        doss_array=array,
        doss_results=Dict(dict={
            "fermi_energy": data.fermi,
            "units": {
                "energy": "eV"
            }
        }),
        dtol=Float(1e-6),
        try_fshifts=List(list=data.try_fshifts),
        metadata={"store_provenance": True},
    )
    assert node.is_finished_ok, node.exit_status
    assert "results" in node.outputs
    data_regression.check(recursive_round(node.outputs.results.attributes, 4))
コード例 #12
0
def get_force_constants(force_constants: orm.ArrayData) -> str:
    """Get the force constants in text format

    :param force_constants: Array with the information needed for the force constants
    :type force_constants: orm.ArrayData
    :return: force constants in text
    :rtype: str
    """
    force_constants = force_constants.get_array('force_constants')

    fc_shape = force_constants.shape
    fc_txt = '%4d\n' % (fc_shape[0])
    for i in range(fc_shape[0]):
        for j in range(fc_shape[1]):
            fc_txt += '%4d%4d\n' % (i + 1, j + 1)
            for vec in force_constants[i][j]:
                fc_txt += ('%22.15f' * 3 + '\n') % tuple(vec)

    return fc_txt
コード例 #13
0
    def parse_traj_file(self, trajectory_filename: str) -> ArrayData:
        """Parse the trajectory file.

        :param trajectory_filename: trajectory file for the single point calculation
        :type trajectory_filename: str
        :raises IOError: if the file is empty
        :raises IOError: if the file has multiple steps instead of only one
        :raises IOError: if a required field is not found
        :return: array with the forces and charges (if present) for the calculation
        :rtype: orm.ArrayData
        """
        with self.retrieved.open(trajectory_filename, 'r') as handle:
            traj_steps = list(iter_trajectories(handle))
        if not traj_steps:
            raise IOError('trajectory file empty')
        if len(traj_steps) > 1:
            raise IOError(
                'trajectory file has multiple steps (expecting only one)')

        traj_step = traj_steps[0]  # type: TRAJ_BLOCK

        for field in ['fx', 'fy', 'fz']:
            if field not in traj_step.atom_fields:
                raise IOError(
                    f'trajectory file does not contain fields {field}')

        array_data = ArrayData()

        array_data.set_array(
            'forces',
            np.array(
                [
                    traj_step.atom_fields['fx'],
                    traj_step.atom_fields['fy'],
                    traj_step.atom_fields['fz'],
                ],
                dtype=float,
            ).T,
        )

        if 'q' in traj_step.atom_fields:
            array_data.set_array(
                'charges', np.array(traj_step.atom_fields['q'], dtype=float))

        return array_data
コード例 #14
0
    def parse(self, **kwargs):
        """
        Parses the datafolder, stores results.
        """
        # retrieve resources
        resources, exit_code = self.get_parsing_resources(kwargs)
        if exit_code is not None:
            return exit_code
        trajectory_filename, trajectory_filepath, info_filepath = resources

        # parse log file
        log_data, exit_code = self.parse_log_file()
        if exit_code is not None:
            return exit_code

        # parse trajectory file
        trajectory_txt = self.retrieved.get_object_content(trajectory_filename)
        if not trajectory_txt:
            self.logger.error("trajectory file empty")
            return self.exit_codes.ERROR_TRAJ_PARSING
        positions, forces, charges, symbols, cell2 = read_lammps_positions_and_forces_txt(
            trajectory_txt)

        # save forces and stresses into node
        array_data = ArrayData()
        array_data.set_array('forces', forces)
        if charges is not None:
            array_data.set_array('charges', charges)
        self.out('arrays', array_data)

        # save results into node
        output_data = log_data["data"]
        if 'units_style' in output_data:
            output_data.update(get_units_dict(output_data['units_style'],
                                              ["energy", "force", "distance"]))
        else:
            self.logger.warning("units missing in log")
        self.add_warnings_and_errors(output_data)
        self.add_standard_info(output_data)
        parameters_data = Dict(dict=output_data)
        self.out('results', parameters_data)

        if output_data["errors"]:
            return self.exit_codes.ERROR_LAMMPS_RUN
コード例 #15
0
ファイル: force.py プロジェクト: pableizi/aiida-lammps
    def parse_traj_file(self, trajectory_filename):
        with self.retrieved.open(trajectory_filename, "r") as handle:
            traj_steps = list(iter_trajectories(handle))
        if not traj_steps:
            raise IOError("trajectory file empty")
        if len(traj_steps) > 1:
            raise IOError("trajectory file has multiple steps (expecting only one)")

        traj_step = traj_steps[0]  # type: TRAJ_BLOCK

        for field in ["fx", "fy", "fz"]:
            if field not in traj_step.atom_fields:
                raise IOError(
                    "trajectory file does not contain fields {}".format(field)
                )

        array_data = ArrayData()

        array_data.set_array(
            "forces",
            np.array(
                [
                    traj_step.atom_fields["fx"],
                    traj_step.atom_fields["fy"],
                    traj_step.atom_fields["fz"],
                ],
                dtype=float,
            ).T,
        )

        if "q" in traj_step.atom_fields:
            array_data.set_array(
                "charges", np.array(traj_step.atom_fields["q"], dtype=float)
            )

        return array_data
コード例 #16
0
    def parse(self, **kwargs):
        """Parse outputs, store results in database."""
        try:
            output_folder = self.retrieved
        except exceptions.NotExistent:
            return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER

        # parse stderr
        pbs_error = None
        sterr_file = self.node.get_option("scheduler_stderr")
        if sterr_file in output_folder.list_object_names():
            with output_folder.open(sterr_file) as fileobj:
                pbs_exit_code = parse_pbs_stderr(fileobj)
            if pbs_exit_code:
                pbs_error = self.exit_codes[pbs_exit_code]

        # parse stdout file
        stdout_error = None
        stdout_data = {}
        stdout_fname = self.node.get_option("stdout_file_name")
        if stdout_fname not in self.retrieved.list_object_names():
            stdout_error = self.exit_codes.ERROR_OUTPUT_FILE_MISSING
        else:
            with output_folder.open(stdout_fname) as handle:
                stdout_data = read_properties_stdout(handle.read())
            stdout_exit_code = stdout_data.pop("exit_code", None)
            if stdout_exit_code:
                stdout_error = self.exit_codes[stdout_exit_code]

        # parse iso file
        iso_error = None
        iso_data = {}
        iso_arrays = None
        output_isovalue_fname = self.node.get_option("output_isovalue_fname")
        if output_isovalue_fname not in output_folder.list_object_names():
            iso_error = self.exit_codes.ERROR_ISOVALUE_FILE_MISSING
        else:
            try:
                with output_folder.open(output_isovalue_fname) as handle:
                    iso_data, iso_arrays = parse_crystal_fort25_aiida(handle)
            except Exception:
                traceback.print_exc()
                iso_error = self.exit_codes.ERROR_PARSING_ISOVALUE_FILE

        final_data = self.merge_output_dicts(stdout_data, iso_data)

        # log errors
        errors = final_data.get("errors", [])
        parser_errors = final_data.get("parser_errors", [])
        if parser_errors:
            self.logger.warning(
                "the parser raised the following errors:\n{}".format(
                    "\n\t".join(parser_errors)
                )
            )
        if errors:
            self.logger.warning(
                "the calculation raised the following errors:\n{}".format(
                    "\n\t".join(errors)
                )
            )

        # make output nodes
        self.out("results", Dict(dict=final_data))
        if iso_arrays is not None:
            array_data = ArrayData()
            for name, array in iso_arrays.items():
                array_data.set_array(name, np.array(array))
            self.out("arrays", array_data)

        if pbs_error is not None:
            return pbs_error

        if stdout_error is not None:
            return stdout_error

        if iso_error is not None:
            return iso_error

        return ExitCode()
コード例 #17
0
    def setUp(self):
        self.arr = ArrayData()
        self.arr.set_array('test_array', np.array([0, 1, 3]))
        self.arr.store()

        self.cli_runner = CliRunner()
コード例 #18
0
 def _aiida_bands_data(self, data, cell, kpoints_dict):
     if not data:
         return False
     kpt_idx = sorted(data.keys())  #  list of kpoint indices
     try:
         k_list = [kpoints_dict[i]
                   for i in kpt_idx]  # list of k-point triplet
     except KeyError:
         # kpoint triplets are not present (true  for .qp and so on, can not use BandsData)
         # We use the internal Yambo Format  [ [Eo_1, Eo_2,... ], ...[So_1,So_2,] ]
         #                                  QP_TABLE  [[ib_1,ik_1,isp_1]      ,[ib_n,ik_n,isp_n]]
         # Each entry in DATA has corresponding legend in QP_TABLE that defines its details
         # like   ib= Band index,  ik= kpoint index,  isp= spin polarization index.
         #  Eo_1 =>  at ib_1, ik_1 isp_1.
         pdata = ArrayData()
         QP_TABLE = []
         ORD = []
         Eo = []
         E_minus_Eo = []
         So = []
         Z = []
         for ky in data.keys():  # kp == kpoint index as a string  1,2,..
             for ind in range(len(data[ky]['Band'])):
                 try:
                     Eo.append(data[ky]['Eo'][ind])
                 except KeyError:
                     pass
                 try:
                     E_minus_Eo.append(data[ky]['E-Eo'][ind])
                 except KeyError:
                     pass
                 try:
                     So.append(data[ky]['Sc|Eo'][ind])
                 except KeyError:
                     pass
                 try:
                     Z.append(data[ky]['Z'][ind])
                 except KeyError:
                     pass
                 ik = int(ky)
                 ib = data[ky]['Band'][ind]
                 isp = 0
                 if 'Spin_Pol' in list(data[ky].keys()):
                     isp = data[ky]['Spin_Pol'][ind]
                 QP_TABLE.append([ik, ib, isp])
         pdata.set_array('Eo', numpy.array(Eo))
         pdata.set_array('E_minus_Eo', numpy.array(E_minus_Eo))
         pdata.set_array('So', numpy.array(So))
         pdata.set_array('Z', numpy.array(Z))
         pdata.set_array('qp_table', numpy.array(QP_TABLE))
         return pdata
     quasiparticle_bands = BandsData()
     quasiparticle_bands.set_cell(cell)
     quasiparticle_bands.set_kpoints(k_list, cartesian=True)
     # labels will come from any of the keys in the nested  kp_point data,
     # there is a uniform set of observables for each k-point, ie Band, Eo, ...
     # ***FIXME BUG does not seem to handle spin polarizes at all when constructing bandsdata***
     bands_labels = [
         legend for legend in sorted(data[list(data.keys())[0]].keys())
     ]
     append_list = [[] for i in bands_labels]
     for kp in kpt_idx:
         for i in range(len(bands_labels)):
             append_list[i].append(data[kp][bands_labels[i]])
     generalised_bands = [numpy.array(it) for it in append_list]
     quasiparticle_bands.set_bands(bands=generalised_bands,
                                   units='eV',
                                   labels=bands_labels)
     return quasiparticle_bands
コード例 #19
0
    def parse(self, **kwargs):
        """Parse the retrieved folder and store results."""
        # pylint: disable=too-many-locals, too-many-branches, too-many-return-statements
        # retrieve resources
        resources = self.get_parsing_resources(kwargs, traj_in_temp=True)
        if resources.exit_code is not None:
            return resources.exit_code

        # parse log file
        log_data, exit_code = self.parse_log_file()
        if exit_code is not None:
            return exit_code

        traj_error = None
        if not resources.traj_paths:
            traj_error = self.exit_codes.ERROR_TRAJ_FILE_MISSING
        else:
            try:
                trajectory_data = LammpsTrajectory(resources.traj_paths[0])
                self.out('trajectory_data', trajectory_data)
            except Exception as err:  # pylint: disable=broad-except
                traceback.print_exc()
                self.logger.error(str(err))
                traj_error = self.exit_codes.ERROR_TRAJ_PARSING

        # save results into node
        output_data = log_data['data']
        if 'units_style' in output_data:
            output_data.update(
                get_units_dict(output_data['units_style'],
                               ['distance', 'time', 'energy']))
        else:
            self.logger.warning('units missing in log')
        self.add_warnings_and_errors(output_data)
        self.add_standard_info(output_data)
        if 'parameters' in self.node.get_incoming().all_link_labels():
            output_data['timestep_picoseconds'] = convert_units(
                self.node.inputs.parameters.dict.timestep,
                output_data['units_style'],
                'time',
                'picoseconds',
            )
        parameters_data = Dict(dict=output_data)
        self.out('results', parameters_data)

        # parse the system data file
        sys_data_error = None
        if resources.sys_paths:
            sys_data = ArrayData()
            try:
                with open(resources.sys_paths[0]) as handle:
                    names = handle.readline().strip().split()
                for i, col in enumerate(
                        np.loadtxt(resources.sys_paths[0],
                                   skiprows=1,
                                   unpack=True,
                                   ndmin=2)):
                    sys_data.set_array(names[i], col)
            except Exception:  # pylint: disable=broad-except
                traceback.print_exc()
                sys_data_error = self.exit_codes.ERROR_INFO_PARSING
            sys_data.set_attribute('units_style',
                                   output_data.get('units_style', None))
            self.out('system_data', sys_data)

        if output_data['errors']:
            return self.exit_codes.ERROR_LAMMPS_RUN

        if traj_error:
            return traj_error

        if sys_data_error:
            return sys_data_error

        if not log_data.get('found_end', False):
            return self.exit_codes.ERROR_RUN_INCOMPLETE
        return None
コード例 #20
0
 def _aiida_ndb_qp(self, data):
     """
     Save the data from ndb.QP to the db
     """
     pdata = ArrayData()
     pdata.set_array('Eo', numpy.array(data['Eo']))
     pdata.set_array('E_minus_Eo', numpy.array(data['E-Eo']))
     pdata.set_array('Z', numpy.array(data['Z']))
     pdata.set_array('qp_table', numpy.array(data['qp_table']))
     try:
         pdata.set_array('So', numpy.array(data['So']))
     except KeyError:
         pass
     return pdata
コード例 #21
0
    def test_valid_node():
        """Test that the correct exceptions are thrown for incompatible nodes."""
        from aiida.orm import ArrayData, BandsData

        # Invalid node type
        node = ArrayData().store()
        with pytest.raises(ValueError):
            get_highest_occupied_band(node)

        # The `occupations` array is missing
        node = BandsData()
        node.set_array('not_occupations', numpy.array([]))
        node.store()
        with pytest.raises(ValueError):
            get_highest_occupied_band(node)

        # The `occupations` array has incorrect shape
        node = BandsData()
        node.set_array('occupations', numpy.array([1., 1.]))
        node.store()
        with pytest.raises(ValueError):
            get_highest_occupied_band(node)
コード例 #22
0
    def parse(self, **kwargs):
        """Parse the retrieved files of a completed `NebCalculation` into output nodes.

        Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files
        permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files`
        which should contain the temporary retrieved files.
        """
        from aiida.orm import TrajectoryData, ArrayData
        import os
        import numpy

        PREFIX = self.node.process_class._PREFIX

        # Check that the retrieved folder is there
        try:
            out_folder = self.retrieved
        except NotExistent:
            return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_FOLDER)

        list_of_files = out_folder.list_object_names(
        )  # Note: this includes folders, but not the files they contain.

        # The stdout is required for parsing
        filename_stdout = self.node.get_attribute('output_filename')

        if filename_stdout not in list_of_files:
            return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)

        # Look for optional settings input node and potential 'parser_options' dictionary within it
        # Note that we look for both NEB and PW parser options under "inputs.settings.parser_options";
        # we don't even have a namespace "inputs.pw.settings".
        try:
            settings = self.node.inputs.settings.get_dict()
            parser_options = settings[self.get_parser_settings_key()]
        except (AttributeError, KeyError, NotExistent):
            settings = {}
            parser_options = {}

        # load the pw input parameters dictionary
        pw_input_dict = self.node.inputs.pw__parameters.get_dict()

        # load the neb input parameters dictionary
        neb_input_dict = self.node.inputs.parameters.get_dict()

        # First parse the Neb output
        try:
            stdout = out_folder.get_object_content(filename_stdout)
            neb_out_dict, iteration_data, raw_successful = parse_raw_output_neb(
                stdout, neb_input_dict)
            # TODO: why do we ignore raw_successful ?
        except (OSError, QEOutputParsingError):
            return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)

        for warn_type in ['warnings', 'parser_warnings']:
            for message in neb_out_dict[warn_type]:
                self.logger.warning('parsing NEB output: {}'.format(message))

        if 'QE neb run did not reach the end of the execution.' in neb_out_dict[
                'parser_warnings']:
            return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE)

        # Retrieve the number of images
        try:
            num_images = neb_input_dict['num_of_images']
        except KeyError:
            try:
                num_images = neb_out_dict['num_of_images']
            except KeyError:
                return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)
        if num_images < 2:
            return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)

        # Now parse the information from the individual pw calculations for the different images
        image_data = {}
        positions = []
        cells = []
        # for each image...
        for i in range(num_images):
            # check if any of the known XML output file names are present, and parse the first that we find
            relative_output_folder = os.path.join(
                '{}_{}'.format(PREFIX, i + 1), '{}.save'.format(PREFIX))
            retrieved_files = self.retrieved.list_object_names(
                relative_output_folder)
            for xml_filename in PwCalculation.xml_filenames:
                if xml_filename in retrieved_files:
                    xml_file_path = os.path.join(relative_output_folder,
                                                 xml_filename)
                    try:
                        with out_folder.open(xml_file_path) as xml_file:
                            parsed_data_xml, logs_xml = parse_pw_xml(
                                xml_file, None)
                    except IOError:
                        return self.exit(self.exit_codes.ERROR_OUTPUT_XML_READ)
                    except XMLParseError:
                        return self.exit(
                            self.exit_codes.ERROR_OUTPUT_XML_PARSE)
                    except XMLUnsupportedFormatError:
                        return self.exit(
                            self.exit_codes.ERROR_OUTPUT_XML_FORMAT)
                    except Exception:
                        import traceback
                        traceback.print_exc()
                        return self.exit(
                            self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION)
                    # this image is dealt with, so break the inner loop and go to the next image
                    break
            # otherwise, if none of the filenames we tried exists, exit with an error
            else:
                return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)

            # look for pw output and parse it
            pw_out_file = os.path.join('{}_{}'.format(PREFIX, i + 1), 'PW.out')
            try:
                with out_folder.open(pw_out_file, 'r') as f:
                    pw_out_text = f.read()  # Note: read() and not readlines()
            except IOError:
                return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)

            try:
                parsed_data_stdout, logs_stdout = parse_pw_stdout(
                    pw_out_text, pw_input_dict, parser_options,
                    parsed_data_xml)
            except Exception:
                return self.exit(
                    self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION)

            parsed_structure = parsed_data_stdout.pop('structure', {})
            parsed_trajectory = parsed_data_stdout.pop('trajectory', {})
            parsed_parameters = PwParser.build_output_parameters(
                parsed_data_xml, parsed_data_stdout)

            # Explicit information about k-points does not need to be queryable so we remove it from the parameters
            parsed_parameters.pop('k_points', None)
            parsed_parameters.pop('k_points_units', None)
            parsed_parameters.pop('k_points_weights', None)

            # Delete bands # TODO: this is just to make pytest happy; do we want to keep them instead?
            parsed_parameters.pop('bands', None)

            # Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying
            PwParser.final_trajectory_frame_to_parameters(
                parsed_parameters, parsed_trajectory)

            # If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space
            all_symmetries = False if parser_options is None else parser_options.get(
                'all_symmetries', False)
            if not all_symmetries and 'cell' in parsed_structure:
                reduce_symmetries(parsed_parameters, parsed_structure,
                                  self.logger)

            structure_data = convert_qe2aiida_structure(parsed_structure)

            key = 'pw_output_image_{}'.format(i + 1)
            image_data[key] = parsed_parameters

            positions.append([site.position for site in structure_data.sites])
            cells.append(structure_data.cell)

            # Add also PW warnings and errors to the neb output data, avoiding repetitions.
            for log_type in ['warning', 'error']:
                for message in logs_stdout[log_type]:
                    formatted_message = '{}: {}'.format(log_type, message)
                    if formatted_message not in neb_out_dict['warnings']:
                        neb_out_dict['warnings'].append(formatted_message)

        # Symbols can be obtained simply from the last image
        symbols = [str(site.kind_name) for site in structure_data.sites]

        output_params = Dict(
            dict=dict(list(neb_out_dict.items()) + list(image_data.items())))
        self.out('output_parameters', output_params)

        trajectory = TrajectoryData()
        trajectory.set_trajectory(
            stepids=numpy.arange(1, num_images + 1),
            cells=numpy.array(cells),
            symbols=symbols,
            positions=numpy.array(positions),
        )
        self.out('output_trajectory', trajectory)

        if parser_options is not None and parser_options.get(
                'all_iterations', False):
            if iteration_data:
                arraydata = ArrayData()
                for k, v in iteration_data.items():
                    arraydata.set_array(k, numpy.array(v))
                self.out('iteration_array', arraydata)

        # Load the original and interpolated energy profile along the minimum-energy path (mep)
        try:
            filename = PREFIX + '.dat'
            with out_folder.open(filename, 'r') as handle:
                mep = numpy.loadtxt(handle)
        except Exception:
            self.logger.warning(
                'could not open expected output file `{}`.'.format(filename))
            mep = numpy.array([[]])

        try:
            filename = PREFIX + '.int'
            with out_folder.open(filename, 'r') as handle:
                interp_mep = numpy.loadtxt(handle)
        except Exception:
            self.logger.warning(
                'could not open expected output file `{}`.'.format(filename))
            interp_mep = numpy.array([[]])

        # Create an ArrayData with the energy profiles
        mep_arraydata = ArrayData()
        mep_arraydata.set_array('mep', mep)
        mep_arraydata.set_array('interpolated_mep', interp_mep)
        self.out('output_mep', mep_arraydata)

        return
コード例 #23
0
ファイル: core_parser.py プロジェクト: MXJK851/AiiDA_UppASD
    def parse(self, **kwargs):
        """
        In this version of API we parse :
        totenergy.SCsurf_T.out coord.SCsurf_T.out  qpoints.out  averages.SCsurf_T.out  qm_sweep.SCsurf_T.out  qm_minima.SCsurf_T.out

        """
        output_folder = self.retrieved

        retrived_file_name_list = output_folder.list_object_names()
        for name in retrived_file_name_list:
            if 'coord' in name:
                coord_filename = name
            if 'qpoints' in name:
                qpoints_filename = name
            if 'averages' in name:
                averages_filename = name
            if 'qm_sweep' in name:
                qm_sweep_filename = name
            if 'qm_minima' in name:
                qm_minima_filename = name
            if 'totenergy' in name:
                totenergy_filename = name
        # parse totenergy.xx.out
        self.logger.info("Parsing '{}'".format(totenergy_filename))
        with output_folder.open(totenergy_filename, 'rb') as f:
            Iter_num, Tot, Exc, Ani, DM, PD, BiqDM, BQ, Dip, Zeeman, LSF, Chir = total_energy_file_paser(
                f)
            output_totenergy = ArrayData()
            output_totenergy.set_array('Iter_num', Iter_num)
            output_totenergy.set_array('Tot', Tot)
            output_totenergy.set_array('Exc', Exc)
            output_totenergy.set_array('Ani', Ani)
            output_totenergy.set_array('DM', DM)
            output_totenergy.set_array('PD', PD)
            output_totenergy.set_array('BiqDM', BiqDM)
            output_totenergy.set_array('BQ', BQ)
            output_totenergy.set_array('Dip', Dip)
            output_totenergy.set_array('Zeeman', Zeeman)
            output_totenergy.set_array('LSF', LSF)
            output_totenergy.set_array('Chir', Chir)
        self.out('totenergy', output_totenergy)

        # parse coord.xx.out
        self.logger.info("Parsing '{}'".format(coord_filename))
        with output_folder.open(coord_filename, 'rb') as f:
            coord = coord_file_paser(f)
            output_coord = ArrayData()
            output_coord.set_array('coord', coord)
        self.out('coord', output_coord)

        # parse qpoints.xx.out
        self.logger.info("Parsing '{}'".format(qpoints_filename))
        with output_folder.open(qpoints_filename, 'rb') as f:
            qpoints = qpoints_file_paser(f)
            output_qpoints = ArrayData()
            output_qpoints.set_array('qpoints', qpoints)
        self.out('qpoints', output_qpoints)

        # parse averages.xx.out
        self.logger.info("Parsing '{}'".format(averages_filename))
        with output_folder.open(averages_filename, 'rb') as f:
            M_x, M_y, M_z, M, M_stdv = averages_file_paser(f)
            output_averages = ArrayData()
            output_averages.set_array('M_x', M_x)
            output_averages.set_array('M_y', M_y)
            output_averages.set_array('M_z', M_z)
            output_averages.set_array('M', M)
            output_averages.set_array('M_stdv', M_stdv)
        self.out('averages', output_averages)

        # parse qm_sweep.xx.out
        self.logger.info("Parsing '{}'".format(qm_sweep_filename))
        with output_folder.open(qm_sweep_filename, 'rb') as f:
            Q_vector, Energy_mRy = qm_sweep_file_paser(f)
            output_qm_sweep = ArrayData()
            output_qm_sweep.set_array('Q_vector', Q_vector)
            output_qm_sweep.set_array('Energy_mRy', Energy_mRy)
        self.out('qm_sweep', output_qm_sweep)

        # parse qm_minima.xx.out
        self.logger.info("Parsing '{}'".format(qm_minima_filename))
        with output_folder.open(qm_minima_filename, 'rb') as f:
            Q_vector, Energy_mRy = qm_minima_file_paser(f)
            output_qm_minima = ArrayData()
            output_qm_minima.set_array('Q_vector', Q_vector)
            output_qm_minima.set_array('Energy_mRy', Energy_mRy)
        self.out('qm_minima', output_qm_minima)

        return ExitCode(0)
コード例 #24
0
    def _sigma_c(self, ndbqp, ndbhf):
        """Calculate S_c if missing from  information parsed from the  ndb.*

         Sc = 1/Z[ E-Eo] -S_x + Vxc
        """
        Eo = numpy.array(ndbqp['Eo'])
        Z = numpy.array(ndbqp['Z'])
        E_minus_Eo = numpy.array(ndbqp['E-Eo'])
        Sx = numpy.array(ndbhf['Sx'])
        Vxc = numpy.array(ndbhf['Vxc'])
        try:
            Sc = numpy.array(ndbqp['So'])
        except KeyError:
            Sc = 1 / Z * E_minus_Eo - Sx + Vxc
        pdata = ArrayData()
        pdata.set_array('Eo', Eo)
        pdata.set_array('E_minus_Eo', E_minus_Eo)
        pdata.set_array('Z', Z)
        pdata.set_array('Sx', Sx)
        pdata.set_array('Sc', Sc)
        pdata.set_array('Vxc', Vxc)
        pdata.set_array('qp_table', numpy.array(ndbqp['qp_table']))
        return pdata
コード例 #25
0
    def parse(self, **kwargs):
        """Parse the retrieved folder and store results."""
        # retrieve resources
        resources = self.get_parsing_resources(kwargs, traj_in_temp=True)
        if resources.exit_code is not None:
            return resources.exit_code

        # parse log file
        log_data, exit_code = self.parse_log_file()
        if exit_code is not None:
            return exit_code

        traj_error = None
        if not resources.traj_paths:
            traj_error = self.exit_codes.ERROR_TRAJ_FILE_MISSING
        else:
            try:
                trajectory_data = LammpsTrajectory(resources.traj_paths[0])
                self.out("trajectory_data", trajectory_data)
            except Exception as err:
                traceback.print_exc()
                self.logger.error(str(err))
                traj_error = self.exit_codes.ERROR_TRAJ_PARSING

        # save results into node
        output_data = log_data["data"]
        if "units_style" in output_data:
            output_data.update(
                get_units_dict(output_data["units_style"],
                               ["distance", "time", "energy"]))
        else:
            self.logger.warning("units missing in log")
        self.add_warnings_and_errors(output_data)
        self.add_standard_info(output_data)
        if "parameters" in self.node.get_incoming().all_link_labels():
            output_data["timestep_picoseconds"] = convert_units(
                self.node.inputs.parameters.dict.timestep,
                output_data["units_style"],
                "time",
                "picoseconds",
            )
        parameters_data = Dict(dict=output_data)
        self.out("results", parameters_data)

        # parse the system data file
        sys_data_error = None
        if resources.sys_paths:
            sys_data = ArrayData()
            try:
                with open(resources.sys_paths[0]) as handle:
                    names = handle.readline().strip().split()
                for i, col in enumerate(
                        np.loadtxt(resources.sys_paths[0],
                                   skiprows=1,
                                   unpack=True,
                                   ndmin=2)):
                    sys_data.set_array(names[i], col)
            except Exception:
                traceback.print_exc()
                sys_data_error = self.exit_codes.ERROR_INFO_PARSING
            sys_data.set_attribute("units_style",
                                   output_data.get("units_style", None))
            self.out("system_data", sys_data)

        if output_data["errors"]:
            return self.exit_codes.ERROR_LAMMPS_RUN

        if traj_error:
            return traj_error

        if sys_data_error:
            return sys_data_error

        if not log_data.get("found_end", False):
            return self.exit_codes.ERROR_RUN_INCOMPLETE
コード例 #26
0
ファイル: test_1.py プロジェクト: MXJK851/AiiDA_UppASD
def test_process():
    """Test running a calculation
    note this does not test that the expected outputs are created of output parsing"""
    from aiida.plugins import DataFactory, CalculationFactory
    from aiida.engine import run
    from aiida.orm import Code, SinglefileData, Int, Float, Str, Bool, List, Dict, ArrayData, XyData, SinglefileData, FolderData, RemoteData
    import numpy as np
    import aiida
    import os
    aiida.load_profile()
    #pre-prepared files
    dmdata = SinglefileData(
        file=os.path.join(os.getcwd(), "input_files", 'dmdata'))
    jij = SinglefileData(
        file=os.path.join(os.getcwd(), "input_files", 'jij'))
    momfile = SinglefileData(
        file=os.path.join(os.getcwd(), "input_files", 'momfile'))
    posfile = SinglefileData(
        file=os.path.join(os.getcwd(), "input_files", 'posfile'))
    qfile = SinglefileData(
        file=os.path.join(os.getcwd(), "input_files", 'qfile'))
    # inpsd.dat file selection
    simid = Str('SCsurf_T')

    ncell = ArrayData()
    ncell.set_array('matrix', np.array([128, 128, 1]))

    BC = Str('P         P         0 ')

    cell = ArrayData()
    cell.set_array('matrix', np.array([[1.00000, 0.00000, 0.00000], [
                   0.00000, 1.00000, 0.00000], [0.00000, 0.00000, 1.00000]]))

    do_prnstruct = Int(2)
    maptype = Int(2)
    SDEalgh = Int(1)
    Initmag = Int(3)
    ip_mode = Str('Q')
    qm_svec = ArrayData()
    qm_svec.set_array('matrix', np.array([1, -1, 0]))

    qm_nvec = ArrayData()
    qm_nvec.set_array('matrix', np.array([0, 0, 1]))

    mode = Str('S')
    temp = Float(0.000)
    damping = Float(0.500)
    Nstep = Int(5000)
    timestep = Str('1.000d-15')
    qpoints = Str('F')
    plotenergy = Int(1)
    do_avrg = Str('Y')

    code = Code.get_from_string('uppasd_dev@uppasd_local')
    
    r_l = List(list= [f'coord.{simid.value}.out',
                                    f'qm_minima.{simid.value}.out',
                                    f'qm_sweep.{simid.value}.out',
                                    f'qpoints.out',
                                    f'totenergy.{simid.value}.out',
                                    f'averages.{simid.value}.out',
                                    'fort.2000',
                                    'inp.SCsurf_T.yaml',
                                    'qm_restart.SCsurf_T.out',
                                    'restart.SCsurf_T.out'])
    # set up calculation
    inputs = {
        'code': code,
        'dmdata': dmdata,
        'jij': jij,
        'momfile': momfile,
        'posfile': posfile,
        'qfile': qfile,
        'simid': simid,
        'ncell': ncell,
        'BC': BC,
        'cell': cell,
        'do_prnstruct': do_prnstruct,
        'maptype': maptype,
        'SDEalgh': SDEalgh,
        'Initmag': Initmag,
        'ip_mode': ip_mode,
        'qm_svec': qm_svec,
        'qm_nvec': qm_nvec,
        'mode': mode,
        'temp': temp,
        'damping': damping,
        'Nstep': Nstep,
        'timestep': timestep,
        'qpoints': qpoints,
        'plotenergy': plotenergy,
        'do_avrg': do_avrg,
        'retrieve_list_name': r_l,
        'metadata': {
            'options': {
                'max_wallclock_seconds': 60,
                'resources': {'num_machines': 1},
                'input_filename': 'inpsd.dat',
                'parser_name': 'UppASD_core_parsers',
                
            },

        },
    }

    result = run(CalculationFactory('UppASD_core_calculations'), **inputs)
    computed_diff = result['UppASD_core_calculations'].get_content()

    assert 'content1' in computed_diff
    assert 'content2' in computed_diff
コード例 #27
0
    def get_stm_data(self, plot_contents):
        """
        Parses the STM plot file to get an Array object with
        X, Y, and Z arrays in the 'meshgrid'
        setting, as in the example code:

        import numpy as np
        xlist = np.linspace(-3.0, 3.0, 3)
        ylist = np.linspace(-3.0, 3.0, 4)
        X, Y = np.meshgrid(xlist, ylist)
        Z = np.sqrt(X**2 + Y**2)

        X:
        [[-3.  0.  3.]
        [-3.  0.  3.]
        [-3.  0.  3.]
        [-3.  0.  3.]]

        Y:
        [[-3. -3. -3.]
        [-1. -1. -1.]
        [ 1.  1.  1.]
        [ 3.  3.  3.]]

        Z:
        [[ 4.24264069  3.          4.24264069]
        [ 3.16227766  1.          3.16227766]
        [ 3.16227766  1.          3.16227766]
        [ 4.24264069  3.          4.24264069]]

        These can then be used in matplotlib to get a contour plot.

        :param plot_contents: the contents of the aiida.CH.STM file as a string
        :return: `aiida.orm.ArrayData` instance representing the STM contour.
        """

        import numpy as np
        from itertools import groupby
        from aiida.orm import ArrayData

        # aiida.CH.STM or aiida.CC.STM...
        data = plot_contents.split('\n')
        data = [i.split() for i in data]

        # The data in the file is organized in "lines" parallel to the Y axes
        # (that is, for constant X) separated by blank lines.
        # In the following we use the 'groupby' function to get at the individual
        # blocks one by one, and set the appropriate arrays.

        # I am not sure about the mechanics of groupby,
        # so repeat
        xx = []
        yy = []
        zz = []
        #
        # Function to separate the blocks
        h = lambda x: len(x) == 0
        #
        for k, g in groupby(data, h):
            if not k:
                xx.append([i[0] for i in g])
        for k, g in groupby(data, h):
            if not k:
                yy.append([i[1] for i in g])
        for k, g in groupby(data, h):
            if not k:
                zz.append([i[2] for i in g])

        # Now, transpose, since x runs fastest in our fortran code,
        # the opposite convention of the meshgrid paradigm.

        X = np.array(xx, dtype=float).transpose()
        Y = np.array(yy, dtype=float).transpose()
        Z = np.array(zz, dtype=float).transpose()

        arraydata = ArrayData()
        arraydata.set_array('X', np.array(X))
        arraydata.set_array('Y', np.array(Y))
        arraydata.set_array('Z', np.array(Z))

        return arraydata
コード例 #28
0
    def parse(self, **kwargs):  # noqa: MC0001  - is mccabe too complex funct -
        """
        Receives in input a dictionary of retrieved nodes. Does all the logic here.
        """
        from aiida.engine import ExitCode

        parser_info = {}
        parser_info['parser_info'] = 'AiiDA Siesta Parser V. {}'.format(
            self._version)

        try:
            output_folder = self.retrieved
        except exceptions.NotExistent:
            raise OutputParsingError("Folder not retrieved")

        output_path, messages_path, xml_path, json_path, bands_path, basis_enthalpy_path = \
            self._fetch_output_files(output_folder)

        if xml_path is None:
            raise OutputParsingError("Xml file not retrieved")
        xmldoc = get_parsed_xml_doc(xml_path)
        result_dict = get_dict_from_xml_doc(xmldoc)

        if output_path is None:
            raise OutputParsingError("output file not retrieved")

        output_dict = dict(
            list(result_dict.items()) + list(parser_info.items()))

        warnings_list = []

        if json_path is not None:
            from .json_time import get_timing_info
            global_time, timing_decomp = get_timing_info(json_path)
            if global_time is None:
                warnings_list.append(["Cannot fully parse the time.json file"])
            else:
                output_dict["global_time"] = global_time
                output_dict["timing_decomposition"] = timing_decomp

        if basis_enthalpy_path is not None:
            the_file = open(basis_enthalpy_path)
            bas_enthalpy = float(the_file.read().split()[0])
            the_file.close()
            output_dict["basis_enthalpy"] = bas_enthalpy
            output_dict["basis_enthalpy_units"] = "eV"
        else:
            warnings_list.append(["BASIS_ENTHALPY file not retrieved"])

        have_errors_to_analyse = False
        if messages_path is None:
            # Perhaps using an old version of Siesta
            warnings_list.append([
                'WARNING: No MESSAGES file, could not check if calculation terminated correctly'
            ])
        else:
            have_errors_to_analyse = True
            #succesful when "INFO: Job completed" is present in message files
            succesful, from_message = self._get_warnings_from_file(
                messages_path)
            warnings_list.append(from_message)
        output_dict["warnings"] = warnings_list

        # An output_parametrs port is always return, even if only parser's info are present
        output_data = Dict(dict=output_dict)
        self.out('output_parameters', output_data)

        #
        # When using floating sites, Siesta associates 'atomic positions' to them, and
        # the structure and forces in the XML file include these fake atoms.
        # In order to return physical structures and forces, we need to remove them.
        # Recall that the input structure is the physical one, and the floating sites
        # are specified in the 'basis' input
        #
        physical_structure = self.node.inputs.structure
        number_of_real_atoms = len(physical_structure.sites)

        # If the structure has changed, save it
        #
        if output_dict['variable_geometry']:
            in_struc = self.node.inputs.structure
            # The next function never fails. If problems arise, the initial structure is
            # returned. The input structure is also necessary because the CML file
            # traditionally contains only the atomic symbols and not the site names.
            # The returned structure does not have any floating atoms, they are removed
            # in the `get_last_structure` call.
            success, out_struc = get_last_structure(xmldoc, in_struc)
            if not success:
                self.logger.warning(
                    "Problem in parsing final structure, returning inp structure in output_structure"
                )

            self.out('output_structure', out_struc)

        # Attempt to parse forces and stresses. In case of failure "None" is returned.
        # Therefore the function never crashes
        forces, stress = get_final_forces_and_stress(xmldoc)
        if forces is not None and stress is not None:
            from aiida.orm import ArrayData
            arraydata = ArrayData()
            arraydata.set_array('forces',
                                np.array(forces[0:number_of_real_atoms]))
            arraydata.set_array('stress', np.array(stress))
            self.out('forces_and_stress', arraydata)

        #Attempt to parse the ion files. Files ".ion.xml" are not produced by siesta if ions file are used
        #in input (`user-basis = T`). This explains the first "if" statement. The SiestaCal input is called
        #`ions__El` (El is the element label) therefore we look for the str "ions" in any of the inputs name.
        if not any(["ions" in inp for inp in self.node.inputs]):  #pylint: disable=too-many-nested-blocks
            from aiida_siesta.data.ion import IonData
            ions = {}
            #Ions from the structure
            in_struc = self.node.inputs.structure
            for kind in in_struc.get_kind_names():
                ion_file_name = kind + ".ion.xml"
                if ion_file_name in output_folder._repository.list_object_names(
                ):
                    ion_file_path = os.path.join(
                        output_folder._repository._get_base_folder().abspath,
                        ion_file_name)
                    ions[kind] = IonData(ion_file_path)
                else:
                    self.logger.warning(f"no ion file retrieved for {kind}")
            #Ions from floating_sites
            if "basis" in self.node.inputs:
                basis_dict = self.node.inputs.basis.get_dict()
                if "floating_sites" in basis_dict:
                    floating_kinds = []
                    for orb in basis_dict["floating_sites"]:
                        if orb["name"] not in floating_kinds:
                            floating_kinds.append(orb["name"])
                            ion_file_name = orb["name"] + ".ion.xml"
                            if ion_file_name in output_folder._repository.list_object_names(
                            ):
                                ion_file_path = os.path.join(
                                    output_folder._repository._get_base_folder(
                                    ).abspath, ion_file_name)
                                ions[orb["name"]] = IonData(ion_file_path)
                            else:
                                self.logger.warning(
                                    f"no ion file retrieved for {orb['name']}")
            #Return the outputs
            if ions:
                self.out('ion_files', ions)

        # Error analysis
        if have_errors_to_analyse:
            # No metter if "INFO: Job completed" is present (succesfull) or not, we check for known
            # errors. They might apprear as WARNING (therefore with succesful True) or FATAL
            # (succesful False)
            for line in from_message:
                if u'split options' in line:
                    min_split = get_min_split(output_path)
                    if min_split:
                        self.logger.error(
                            "Error in split_norm option. Minimum value is {}".
                            format(min_split))
                        return self.exit_codes.SPLIT_NORM
                if u'sys::die' in line:
                    #This is the situation when siesta dies with no specified error
                    #to be reported in "MESSAGES", unfortunately some interesting cases
                    #are treated in this way, we explore the .out file for more insights.
                    if is_polarization_problem(output_path):
                        return self.exit_codes.BASIS_POLARIZ
                if u'SCF_NOT_CONV' in line:
                    return self.exit_codes.SCF_NOT_CONV
                if u'GEOM_NOT_CONV' in line:
                    return self.exit_codes.GEOM_NOT_CONV

        #Because no known error has been found, attempt to parse bands if requested
        if bands_path is None:
            if "bandskpoints" in self.node.inputs:
                return self.exit_codes.BANDS_FILE_NOT_PRODUCED
        else:
            #bands, coords = self._get_bands(bands_path)
            try:
                bands = self._get_bands(bands_path)
            except (ValueError, IndexError):
                return self.exit_codes.BANDS_PARSE_FAIL
            from aiida.orm import BandsData
            arraybands = BandsData()
            #Reset the cell for KpointsData of bands, necessary
            #for bandskpoints without cell and if structure changed
            bkp = self.node.inputs.bandskpoints.clone()
            if output_dict['variable_geometry']:
                bkp.set_cell_from_structure(out_struc)
            else:
                bkp.set_cell_from_structure(self.node.inputs.structure)
            arraybands.set_kpointsdata(bkp)
            arraybands.set_bands(bands, units="eV")
            self.out('bands', arraybands)
            #bandsparameters = Dict(dict={"kp_coordinates": coords})
            #self.out('bands_parameters', bandsparameters)

        #At the very end, return a particular exit code if "INFO: Job completed"
        #was not present in the MESSAGES file, but no known error is detected.
        if have_errors_to_analyse:
            if not succesful:
                self.logger.error(
                    'The calculation finished without "INFO: Job completed", but no '
                    'error could be processed. Might be that the calculation was killed externally'
                )
                return self.exit_codes.UNEXPECTED_TERMINATION

        return ExitCode(0)
コード例 #29
0
    def parse(self, **kwargs):
        """
        Parses the datafolder, stores results.
        """
        # retrieve resources
        resources, exit_code = self.get_parsing_resources(
            kwargs, traj_in_temp=True, sys_info=True)
        if exit_code is not None:
            return exit_code
        trajectory_filename, trajectory_filepath, info_filepath = resources

        # parse log file
        log_data, exit_code = self.parse_log_file()
        if exit_code is not None:
            return exit_code

        # parse trajectory file
        try:
            timestep = self.node.inputs.parameters.dict.timestep
            positions, charges, step_ids, cells, symbols, time = read_lammps_trajectory(
                trajectory_filepath, timestep=timestep,
                log_warning_func=self.logger.warning)
        except Exception:
            traceback.print_exc()
            return self.exit_codes.ERROR_TRAJ_PARSING

        # save results into node
        output_data = log_data["data"]
        if 'units_style' in output_data:
            output_data.update(get_units_dict(output_data['units_style'],
                                              ["distance", "time", "energy"]))
        else:
            self.logger.warning("units missing in log")
        self.add_warnings_and_errors(output_data)
        self.add_standard_info(output_data)
        parameters_data = Dict(dict=output_data)
        self.out('results', parameters_data)

        # save trajectories into node
        trajectory_data = TrajectoryData()
        trajectory_data.set_trajectory(
            symbols, positions, stepids=step_ids, cells=cells, times=time)
        if charges is not None:
            trajectory_data.set_array('charges', charges)       
        self.out('trajectory_data', trajectory_data)

        # parse the system data file
        if info_filepath:
            sys_data = ArrayData()
            try:
                with open(info_filepath) as handle:
                    names = handle.readline().strip().split()
                for i, col in enumerate(np.loadtxt(info_filepath, skiprows=1, unpack=True)):
                    sys_data.set_array(names[i], col)
            except Exception:
                traceback.print_exc()
                return self.exit_codes.ERROR_INFO_PARSING
            sys_data.set_attribute('units_style', output_data.get('units_style', None))
            self.out('system_data', sys_data)

        if output_data["errors"]:
            return self.exit_codes.ERROR_LAMMPS_RUN
コード例 #30
0
    def parse(self, **kwargs):
        """Parse the retrieved folder and store results."""
        # pylint: disable= too-many-locals, too-many-branches, too-many-statements, too-many-return-statements
        # retrieve resources
        resources = self.get_parsing_resources(kwargs, traj_in_temp=True)
        if resources.exit_code is not None:
            return resources.exit_code

        # parse log file
        log_data, exit_code = self.parse_log_file()
        if exit_code is not None:
            return exit_code

        traj_error = None
        if not resources.traj_paths:
            traj_error = self.exit_codes.ERROR_TRAJ_FILE_MISSING
        else:
            try:
                trajectories = {
                    os.path.basename(traj_path).split('-')[0]:
                    LammpsTrajectory(traj_path)
                    for traj_path in resources.traj_paths
                }
                self.out('trajectory', trajectories)
            except Exception as err:  # pylint: disable=broad-except
                traceback.print_exc()
                self.logger.error(str(err))
                traj_error = self.exit_codes.ERROR_TRAJ_PARSING

        # save results into node
        output_data = log_data['data']
        if 'units_style' in output_data:
            output_data.update(
                get_units_dict(output_data['units_style'],
                               ['distance', 'time', 'energy']))
        else:
            self.logger.warning('units missing in log')
        self.add_warnings_and_errors(output_data)
        self.add_standard_info(output_data)
        if 'parameters' in self.node.get_incoming().all_link_labels():
            output_data['timestep_picoseconds'] = convert_units(
                self.node.inputs.parameters.dict.timestep,
                output_data['units_style'],
                'time',
                'picoseconds',
            )
            output_data['stage_names'] = [
                s['name'] for s in self.node.inputs.parameters.dict.stages
            ]
        parameters_data = Dict(dict=output_data)
        self.out('results', parameters_data)

        # parse the system data file
        sys_data_error = None
        arrays = {}
        for sys_path in resources.sys_paths:
            stage_name = os.path.basename(sys_path).split('-')[0]
            sys_data = ArrayData()
            sys_data.set_attribute('units_style',
                                   output_data.get('units_style', None))
            try:
                with open(sys_path) as handle:
                    names = handle.readline().strip().split()
                for i, col in enumerate(
                        np.loadtxt(sys_path, skiprows=1, unpack=True,
                                   ndmin=2)):
                    sys_data.set_array(names[i], col)
                arrays[stage_name] = sys_data
            except Exception:  # pylint: disable=broad-except
                traceback.print_exc()
                sys_data_error = self.exit_codes.ERROR_INFO_PARSING
        if arrays:
            self.out('system', arrays)

        # retrieve the last restart file, per stage
        restart_map = {}
        for rpath in resources.restart_paths:
            rpath_base = os.path.basename(rpath)
            match = re.match(r'([^\-]*)\-.*\.([\d]+)', rpath_base)
            if match:
                stage, step = match.groups()
                if int(step) > restart_map.get(stage, (-1, None))[0]:
                    restart_map[stage] = (int(step), rpath)

        for stage, (step, rpath) in restart_map.items():
            with io.open(rpath, 'rb') as handle:
                self.retrieved.put_object_from_filelike(
                    handle, os.path.basename(rpath), 'wb', force=True)

        if output_data['errors']:
            return self.exit_codes.ERROR_LAMMPS_RUN

        if traj_error:
            return traj_error

        if sys_data_error:
            return sys_data_error

        if not log_data.get('found_end', False):
            return self.exit_codes.ERROR_RUN_INCOMPLETE
        return None