Example #1
0
#but standard for aiida structures is Cartesian in Angstrom
structure = StructureData(cell=cell)
structure.append_atom(position=(0.000 * alat, 0.000 * alat, 0.000 * alat),
                      symbols=['Si'],
                      name="Si_one")
structure.append_atom(position=(0.250 * alat, 0.250 * alat, 0.250 * alat),
                      symbols='Si',
                      name="Si_two")

#The parameters
parameters = Dict(
    dict={
        'xc-functional': 'LDA',
        'xc-authors': 'CA',
        'max-scfiterations': 50,
        'dm-numberpulay': 4,
        'dm-mixingweight': 0.3,
        'dm-tolerance': 1.e-3,
        'Solution-method': 'diagon',
        'electronic-temperature': '25 meV',
        'write-forces': True,
    })

#The basis set

basis = Dict(
    dict={
        'floating_sites':
        [{
            "name": 'Si_bond',
            "symbols": 'Si',
            "position": (0.125 * alat, 0.125 * alat, 0.125 * alat)
#
options = {
    "queue_name": "debug",
    "max_wallclock_seconds": 1700,
    "resources": {
        "num_machines": 1,
        "num_mpiprocs_per_machine": 1,
    }
}
#
# Parameters ---------------------------------------------------
params_dict = {
    'z': height  # In Angstrom
}
parameters = Dict(dict=params_dict)
#
#-------------------------- Settings ---------------------------------
#
settings_dict = {}
settings = Dict(dict=settings_dict)
#
#--All the inputs of a Siesta calculations are listed in a dictionary--
#
inputs = {
    'settings': settings,
    'parameters': parameters,
    'code': code,
    'ldos_folder': remotedata,
    'metadata': {
        'options': options,
Example #3
0
    def parse(self, **kwargs):
        """Parse outputs, store results in database."""
        try:
            output_folder = self.retrieved
        except exceptions.NotExistent:
            return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER

        # parse stderr
        pbs_error = None
        sterr_file = self.node.get_option("scheduler_stderr")
        if sterr_file in output_folder.list_object_names():
            with output_folder.open(sterr_file) as fileobj:
                pbs_exit_code = parse_pbs_stderr(fileobj)
            if pbs_exit_code:
                pbs_error = self.exit_codes[pbs_exit_code]

        # parse stdout file
        stdout_error = None
        stdout_data = {}
        stdout_fname = self.node.get_option("stdout_file_name")
        if stdout_fname not in self.retrieved.list_object_names():
            stdout_error = self.exit_codes.ERROR_OUTPUT_FILE_MISSING
        else:
            with output_folder.open(stdout_fname) as handle:
                stdout_data = read_properties_stdout(handle.read())
            stdout_exit_code = stdout_data.pop("exit_code", None)
            if stdout_exit_code:
                stdout_error = self.exit_codes[stdout_exit_code]

        # parse iso file
        iso_error = None
        iso_data = {}
        iso_arrays = None
        output_isovalue_fname = self.node.get_option("output_isovalue_fname")
        if output_isovalue_fname not in output_folder.list_object_names():
            iso_error = self.exit_codes.ERROR_ISOVALUE_FILE_MISSING
        else:
            try:
                with output_folder.open(output_isovalue_fname) as handle:
                    iso_data, iso_arrays = parse_crystal_fort25_aiida(handle)
            except Exception:
                traceback.print_exc()
                iso_error = self.exit_codes.ERROR_PARSING_ISOVALUE_FILE

        final_data = self.merge_output_dicts(stdout_data, iso_data)

        # log errors
        errors = final_data.get("errors", [])
        parser_errors = final_data.get("parser_errors", [])
        if parser_errors:
            self.logger.warning(
                "the parser raised the following errors:\n{}".format(
                    "\n\t".join(parser_errors)
                )
            )
        if errors:
            self.logger.warning(
                "the calculation raised the following errors:\n{}".format(
                    "\n\t".join(errors)
                )
            )

        # make output nodes
        self.out("results", Dict(dict=final_data))
        if iso_arrays is not None:
            array_data = ArrayData()
            for name, array in iso_arrays.items():
                array_data.set_array(name, np.array(array))
            self.out("arrays", array_data)

        if pbs_error is not None:
            return pbs_error

        if stdout_error is not None:
            return stdout_error

        if iso_error is not None:
            return iso_error

        return ExitCode()
Example #4
0
# import aiida_lsmo.calcfunctions.ff_builder_module as FFBuilder

from aiida_matdis.aide_de_camp import (get_molecule_dict, get_ff_parameters,
                                       get_temperature_points, get_vlcc_output,
                                       update_workchain_params)

RaspaBaseWorkChain = WorkflowFactory('raspa.base')  #pylint: disable=invalid-name
FFBuilder = CalculationFactory('matdis.ff_builder')

VLCPARAMETERS_DEFAULT = Dict(
    dict={  #TODO: create IsothermParameters instead of Dict # pylint: disable=fixme
        "ff_framework": None,  # str, Forcefield of the structure (used also as a definition of ff.rad for zeopp)
        "ff_shifted": False,  # bool, Shift or truncate at cutoff
        "ff_tail_corrections": True,  # bool, Apply tail corrections
        "ff_mixing_rule": 'Lorentz-Berthelot',  # str, Mixing rule for the forcefield
        "ff_separate_interactions": False,  # bool, if true use only ff_framework for framework-molecule interactions
        "ff_cutoff": 12.0,  # float, CutOff truncation for the VdW interactions (Angstrom)
        "temperature": 300,  # float, Temperature of the simulation
        "raspa_verbosity": 10,  # int, Print stats every: number of cycles / raspa_verbosity
        "raspa_init_cycles": int(1e3),  # int, Number of GCMC initialization cycles
        "raspa_prod_cycles": int(1e4),  # int, Number of GCMC production cycles
        "temperature_list": None,
    })


class VLCCWorkChain(WorkChain):
    """This Worchain is designed to construct Vapor-Liquid Coexistence Curve through GEMC simulation"""
    @classmethod
    def define(cls, spec):
        super(VLCCWorkChain, cls).define(spec)

        spec.expose_inputs(RaspaBaseWorkChain,
def example_no_struct(cp2k_code):
    """Run DFT calculation with structure specified in the input file"""

    print("Testing CP2K ENERGY on H2 (DFT) without StructureData...")

    pwd = os.path.dirname(os.path.realpath(__file__))

    # basis set
    basis_file = SinglefileData(
        file=os.path.join(pwd, "..", "files", "BASIS_MOLOPT"))

    # pseudopotentials
    pseudo_file = SinglefileData(
        file=os.path.join(pwd, "..", "files", "GTH_POTENTIALS"))

    # parameters
    parameters = Dict(
        dict={
            'FORCE_EVAL': {
                'METHOD': 'Quickstep',
                'DFT': {
                    'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',
                    'POTENTIAL_FILE_NAME': 'GTH_POTENTIALS',
                    'QS': {
                        'EPS_DEFAULT': 1.0e-12,
                        'WF_INTERPOLATION': 'ps',
                        'EXTRAPOLATION_ORDER': 3,
                    },
                    'MGRID': {
                        'NGRIDS': 4,
                        'CUTOFF': 280,
                        'REL_CUTOFF': 30,
                    },
                    'XC': {
                        'XC_FUNCTIONAL': {
                            '_': 'LDA',
                        },
                    },
                    'POISSON': {
                        'PERIODIC': 'none',
                        'PSOLVER': 'MT',
                    },
                },
                'SUBSYS': {
                    # structure directly included in parameters
                    'CELL': {
                        'ABC': '4.0   4.0   4.75'
                    },
                    'COORD': {
                        ' ': [
                            'H    2.0   2.0   2.737166',
                            'H    2.0   2.0   2.000000'
                        ]
                    },
                    'KIND': [
                        {
                            '_': 'O',
                            'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
                            'POTENTIAL': 'GTH-LDA-q6'
                        },
                        {
                            '_': 'H',
                            'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
                            'POTENTIAL': 'GTH-LDA-q1'
                        },
                    ],
                },
            }
        })

    # Construct process builder
    builder = Cp2kCalculation.get_builder()
    builder.parameters = parameters
    builder.code = cp2k_code
    builder.file = {
        'basis': basis_file,
        'pseudo': pseudo_file,
    }
    builder.metadata.options.resources = {
        "num_machines": 1,
        "num_mpiprocs_per_machine": 1,
    }
    builder.metadata.options.max_wallclock_seconds = 1 * 3 * 60

    print("submitted calculation...")
    calc = run(builder)

    # check energy
    expected_energy = -1.14005678487
    if abs(calc['output_parameters'].dict.energy - expected_energy) < 1e-10:
        print("OK, energy has the expected value")
    else:
        print("ERROR!")
        print("Expected energy value: {}".format(expected_energy))
        print("Actual energy value: {}".format(
            calc['output_parameters'].dict.energy))
        sys.exit(3)
def example_gemc_single_comp(raspa_code,
                             gemc_single_comp_calc_pk=None,
                             submit=True):
    """Prepare and submit RASPA calculation with components mixture."""

    # This line is needed for tests only
    if gemc_single_comp_calc_pk is None:
        gemc_single_comp_calc_pk = pytest.gemc_single_comp_calc_pk  # pylint: disable=no-member

    # parameters
    parameters = Dict(
        dict={
            "GeneralSettings": {
                "SimulationType": "MonteCarlo",
                "NumberOfCycles": 50,
                "NumberOfInitializationCycles": 50,
                "PrintEvery": 10,
                "Forcefield": "GenericMOFs",
                "EwaldPrecision": 1e-6,
                "CutOff": 12.0,
                "GibbsVolumeChangeProbability": 0.1,
            },
            "System": {
                "box_one": {
                    "type": "Box",
                    "BoxLengths": "25 25 25",
                    "BoxAngles": "90 90 90",
                    "ExternalTemperature": 200.0,
                },
                "box_two": {
                    "type": "Box",
                    "BoxLengths": "25 25 25",
                    "BoxAngles": "90 90 90",
                    "ExternalTemperature": 200.0,
                }
            },
            "Component": {
                "methane": {
                    "MoleculeDefinition": "TraPPE",
                    "TranslationProbability": 1.0,
                    "ReinsertionProbability": 1.0,
                    "GibbsSwapProbability": 1.0,
                    "CreateNumberOfMolecules": {
                        "box_one": 50,
                        "box_two": 50,
                    },
                },
            },
        })

    # restart file
    retrieved_parent_folder = load_node(
        gemc_single_comp_calc_pk).outputs.retrieved

    # Contructing builder
    builder = raspa_code.get_builder()
    builder.parameters = parameters
    builder.retrieved_parent_folder = retrieved_parent_folder
    builder.metadata.options = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1,
        },
        "max_wallclock_seconds": 1 * 30 * 60,  # 30 min
        "withmpi": False,
    }
    builder.metadata.dry_run = False
    builder.metadata.store_provenance = True

    if submit:
        print("Testing RASPA GEMC with methane (Restart)...")
        res, pk = run_get_pk(builder)
        print("calculation pk: ", pk)
        print(
            "Average number of methane molecules/uc (box_one):",
            res['output_parameters'].dict.box_one['components']['methane']
            ['loading_absolute_average'])
        print(
            "Average number of methane molecules/uc (box_two):",
            res['output_parameters'].dict.box_two['components']['methane']
            ['loading_absolute_average'])
        print("OK, calculation has completed successfully")
    else:
        print("Generating test input ...")
        builder.metadata.dry_run = True
        builder.metadata.store_provenance = False
        run(builder)
        print("Submission test successful")
        print("In order to actually submit, add '--submit'")
    print("-----")
Example #7
0
def launch_calculation(code, structure, pseudo_family, kpoints_mesh, ecutwfc,
                       ecutrho, hubbard_u, hubbard_v, hubbard_file_pk,
                       starting_magnetization, smearing, max_num_machines,
                       max_wallclock_seconds, with_mpi, daemon, parent_folder,
                       dry_run, mode, unfolded_kpoints):
    """Run a PwCalculation."""
    from aiida.orm import Dict
    from aiida.orm.nodes.data.upf import get_pseudos_from_structure
    from aiida.plugins import CalculationFactory
    from aiida_quantumespresso.utils.resources import get_default_options

    parameters = {
        'CONTROL': {
            'calculation': mode,
        },
        'SYSTEM': {
            'ecutwfc': ecutwfc,
            'ecutrho': ecutrho,
        }
    }

    if mode in CALCS_REQUIRING_PARENT and not parent_folder:
        raise click.BadParameter(
            "calculation '{}' requires a parent folder".format(mode),
            param_hint='--parent-folder')

    try:
        hubbard_file = validate.validate_hubbard_parameters(
            structure, parameters, hubbard_u, hubbard_v, hubbard_file_pk)
    except ValueError as exception:
        raise click.BadParameter(str(exception))

    try:
        validate.validate_starting_magnetization(structure, parameters,
                                                 starting_magnetization)
    except ValueError as exception:
        raise click.BadParameter(str(exception))

    try:
        validate.validate_smearing(parameters, smearing)
    except ValueError as exception:
        raise click.BadParameter(str(exception))

    if unfolded_kpoints:
        from aiida.orm import KpointsData
        unfolded_list = kpoints_mesh.get_kpoints_mesh(print_list=True)
        kpoints_mesh = KpointsData()
        kpoints_mesh.set_kpoints(unfolded_list)

    inputs = {
        'code': code,
        'structure': structure,
        'pseudos': get_pseudos_from_structure(structure, pseudo_family),
        'kpoints': kpoints_mesh,
        'parameters': Dict(dict=parameters),
        'metadata': {
            'options':
            get_default_options(max_num_machines, max_wallclock_seconds,
                                with_mpi),
        }
    }

    if parent_folder:
        inputs['parent_folder'] = parent_folder

    if hubbard_file:
        inputs['hubbard_file'] = hubbard_file

    if dry_run:
        if daemon:
            # .submit() would forward to .run(), but it's better to stop here,
            # since it's a bit unexpected and the log messages output to screen
            # would be confusing ("Submitted PwCalculation<None> to the daemon")
            raise click.BadParameter(
                'cannot send to the daemon if in dry_run mode',
                param_hint='--daemon')
        inputs['metadata']['store_provenance'] = False
        inputs['metadata']['dry_run'] = True

    launch.launch_process(CalculationFactory('quantumespresso.pw'), daemon,
                          **inputs)
Example #8
0
    def parse(self, **kwargs):
        """Parse the retrieved folder and store results."""
        # pylint: disable= too-many-locals, too-many-branches, too-many-statements, too-many-return-statements
        # retrieve resources
        resources = self.get_parsing_resources(kwargs, traj_in_temp=True)
        if resources.exit_code is not None:
            return resources.exit_code

        # parse log file
        log_data, exit_code = self.parse_log_file()
        if exit_code is not None:
            return exit_code

        traj_error = None
        if not resources.traj_paths:
            traj_error = self.exit_codes.ERROR_TRAJ_FILE_MISSING
        else:
            try:
                trajectories = {
                    os.path.basename(traj_path).split('-')[0]:
                    LammpsTrajectory(traj_path)
                    for traj_path in resources.traj_paths
                }
                self.out('trajectory', trajectories)
            except Exception as err:  # pylint: disable=broad-except
                traceback.print_exc()
                self.logger.error(str(err))
                traj_error = self.exit_codes.ERROR_TRAJ_PARSING

        # save results into node
        output_data = log_data['data']
        if 'units_style' in output_data:
            output_data.update(
                get_units_dict(output_data['units_style'],
                               ['distance', 'time', 'energy']))
        else:
            self.logger.warning('units missing in log')
        self.add_warnings_and_errors(output_data)
        self.add_standard_info(output_data)
        if 'parameters' in self.node.get_incoming().all_link_labels():
            output_data['timestep_picoseconds'] = convert_units(
                self.node.inputs.parameters.dict.timestep,
                output_data['units_style'],
                'time',
                'picoseconds',
            )
            output_data['stage_names'] = [
                s['name'] for s in self.node.inputs.parameters.dict.stages
            ]
        parameters_data = Dict(dict=output_data)
        self.out('results', parameters_data)

        # parse the system data file
        sys_data_error = None
        arrays = {}
        for sys_path in resources.sys_paths:
            stage_name = os.path.basename(sys_path).split('-')[0]
            sys_data = ArrayData()
            sys_data.set_attribute('units_style',
                                   output_data.get('units_style', None))
            try:
                with open(sys_path) as handle:
                    names = handle.readline().strip().split()
                for i, col in enumerate(
                        np.loadtxt(sys_path, skiprows=1, unpack=True,
                                   ndmin=2)):
                    sys_data.set_array(names[i], col)
                arrays[stage_name] = sys_data
            except Exception:  # pylint: disable=broad-except
                traceback.print_exc()
                sys_data_error = self.exit_codes.ERROR_INFO_PARSING
        if arrays:
            self.out('system', arrays)

        # retrieve the last restart file, per stage
        restart_map = {}
        for rpath in resources.restart_paths:
            rpath_base = os.path.basename(rpath)
            match = re.match(r'([^\-]*)\-.*\.([\d]+)', rpath_base)
            if match:
                stage, step = match.groups()
                if int(step) > restart_map.get(stage, (-1, None))[0]:
                    restart_map[stage] = (int(step), rpath)

        for stage, (step, rpath) in restart_map.items():
            with io.open(rpath, 'rb') as handle:
                self.retrieved.put_object_from_filelike(
                    handle, os.path.basename(rpath), 'wb', force=True)

        if output_data['errors']:
            return self.exit_codes.ERROR_LAMMPS_RUN

        if traj_error:
            return traj_error

        if sys_data_error:
            return sys_data_error

        if not log_data.get('found_end', False):
            return self.exit_codes.ERROR_RUN_INCOMPLETE
        return None
Example #9
0
 def parse_aiida(cls, string):
     """Return AiiDA dictionary."""
     return Dict(dict=cls.parse(string))
def add_write_binary_restart(input_dict, write_every):
    final_dict = input_dict.get_dict()
    final_dict["GeneralSettings"]["WriteBinaryRestartFileEvery"] = write_every
    return input_dict if input_dict.get_dict() == final_dict else Dict(
        dict=final_dict)
        "num_machines": 1,
        "num_mpiprocs_per_machine": 1,
        "parallel_env": "localmpi",
        "tot_num_mpiprocs": 1,
    }
    # options.queue_name = 'iqtc04.q'
    options.max_wallclock_seconds = 3600
    inputs.metadata.options = options

    # Setup code
    inputs.code = Code.get_from_string(codename)

    # setup nodes
    inputs.structure = structure
    inputs.potential = EmpiricalPotential(type=potential["pair_style"],
                                          data=potential["data"])

    inputs.parameters = Dict(dict=parameters_opt)

    print(inputs.potential.get_potential_file())
    print(inputs.potential.atom_style)
    print(inputs.potential.default_units)

    # run calculation
    result, node = run_get_node(LammpsOptimizeCalculation, **inputs)
    print("results:", result)
    print("node:", node)

    # submit to deamon
    # submit(LammpsOptimizeCalculation, **inputs)
Example #12
0
def test_cp2k_energy_on_H2O(new_workdir):
    """Testing CP2K GEO_OPT on H2 (DFT)"""

    import ase.build

    from aiida.engine import run
    from aiida.plugins import CalculationFactory
    from aiida.orm import Dict, StructureData

    computer = get_computer(workdir=new_workdir)
    code = get_code(entry_point="cp2k", computer=computer)

    # structure
    atoms = ase.build.molecule("H2")
    atoms.center(vacuum=2.0)
    structure = StructureData(ase=atoms)

    # parameters
    parameters = Dict(
        dict={
            "GLOBAL": {
                "RUN_TYPE": "GEO_OPT"
            },
            "FORCE_EVAL": {
                "METHOD": "Quickstep",
                "DFT": {
                    "BASIS_SET_FILE_NAME": "BASIS_MOLOPT",
                    "QS": {
                        "EPS_DEFAULT": 1.0e-12,
                        "WF_INTERPOLATION": "ps",
                        "EXTRAPOLATION_ORDER": 3,
                    },
                    "MGRID": {
                        "NGRIDS": 4,
                        "CUTOFF": 280,
                        "REL_CUTOFF": 30
                    },
                    "XC": {
                        "XC_FUNCTIONAL": {
                            "_": "LDA"
                        }
                    },
                    "POISSON": {
                        "PERIODIC": "none",
                        "PSOLVER": "MT"
                    },
                },
                "SUBSYS": {
                    "KIND": [
                        {
                            "_": "O",
                            "BASIS_SET": "DZVP-MOLOPT-SR-GTH",
                            "POTENTIAL": "GTH-LDA-q6",
                        },
                        {
                            "_": "H",
                            "BASIS_SET": "DZVP-MOLOPT-SR-GTH",
                            "POTENTIAL": "GTH-LDA-q1",
                        },
                    ]
                },
            },
        })

    options = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1
        },
        "max_wallclock_seconds": 1 * 3 * 60,
    }

    inputs = {
        "structure": structure,
        "parameters": parameters,
        "code": code,
        "metadata": {
            "options": options
        },
    }

    result = run(CalculationFactory("cp2k"), **inputs)

    assert result["output_parameters"].dict.exceeded_walltime is False

    expected_energy = -1.14009973178
    assert abs(result["output_parameters"].dict.energy -
               expected_energy) < 1e-10

    # check geometry
    expected_dist = 0.736103879818
    dist = result["output_structure"].get_ase().get_distance(0, 1)
    assert abs(dist - expected_dist) < 1e-7
Example #13
0
    def define(cls, spec):
        super().define(spec)

        spec.input("formchk_code", valid_type=Code)
        spec.input("cubegen_code", valid_type=Code)

        spec.input('gaussian_calc_folder',
                   valid_type=RemoteData,
                   required=True,
                   help='The gaussian calculation output folder.')

        spec.input('gaussian_output_params',
                   valid_type=Dict,
                   required=True,
                   help='The gaussian calculation output parameters.')

        spec.input('orbital_indexes',
                   valid_type=List,
                   required=False,
                   default=lambda: List(list=[0, 1]),
                   help='Indexes of the orbital cubes to generate.')

        spec.input(
            'orbital_index_ref',
            valid_type=Str,
            required=False,
            default=lambda: Str('half_num_el'),
            help="Reference index, possible choices: 'half_num_el', 'abs'.")

        spec.input('natural_orbitals',
                   valid_type=Bool,
                   required=False,
                   default=lambda: Bool(False),
                   help="The cube files are natural orbitals.")

        spec.input('generate_density',
                   valid_type=Bool,
                   required=False,
                   default=lambda: Bool(True),
                   help="Generate density cube.")

        spec.input('generate_spin_density',
                   valid_type=Bool,
                   required=False,
                   default=lambda: Bool(True),
                   help="Generate spin density cube (if applicable).")

        spec.input(
            'edge_space',
            valid_type=Float,
            required=False,
            default=lambda: Float(3.0),
            help='Extra cube space in addition to molecule bounding box [ang].'
        )

        spec.input('dx',
                   valid_type=Float,
                   required=False,
                   default=lambda: Float(0.15),
                   help='Cube file spacing [ang].')

        spec.input('retrieve_cubes',
                   valid_type=Bool,
                   required=False,
                   default=lambda: Bool(False),
                   help='should the cubes be retrieved?')

        spec.input(
            "cubegen_parser_name",
            valid_type=str,
            default=CubegenCalculation.DEFAULT_PARSER,
            non_db=True,
        )

        spec.input("cubegen_parser_params",
                   valid_type=Dict,
                   required=False,
                   default=lambda: Dict(dict={}),
                   help='Additional parameters to cubegen parser.')

        spec.outline(cls.check_input, cls.formchk_step, cls.cubegen_step,
                     cls.finalize)

        spec.outputs.dynamic = True

        spec.exit_code(
            302,
            "ERROR_INPUT",
            message="Input options are invalid.",
        )

        spec.exit_code(
            390,
            "ERROR_TERMINATION",
            message="One or more steps of the work chain failed.",
        )
Example #14
0
    def cubegen_step(self):

        if not self._check_if_previous_calc_ok(self.ctx.formchk_node):
            return self.exit_codes.ERROR_TERMINATION  # pylint: disable=no-member

        self.report("Running Cubegen")

        gout_params = dict(self.inputs.gaussian_output_params)

        # --------------------------------------------------------------
        # Create the stencil

        ase_atoms = ase.Atoms(gout_params['atomnos'],
                              positions=gout_params['atomcoords'][0])

        es = self.inputs.edge_space.value + self.inputs.dx.value

        xmin = np.min(ase_atoms.positions[:, 0]) - es
        xmax = np.max(ase_atoms.positions[:, 0]) + es
        ymin = np.min(ase_atoms.positions[:, 1]) - es
        ymax = np.max(ase_atoms.positions[:, 1]) + es
        zmin = np.min(ase_atoms.positions[:, 2]) - es
        zmax = np.max(ase_atoms.positions[:, 2]) + es

        geom_center = np.array([xmin + xmax, ymin + ymax, zmin + zmax]) / 2.0

        cell = np.array([xmax - xmin, ymax - ymin, zmax - zmin])

        cell_n = (np.round(cell / self.inputs.dx.value)).astype(int)

        stencil = b"-1 %f %f %f\n" % tuple(geom_center - cell / 2)
        stencil += b"%d %f 0.0 0.0\n" % (cell_n[0], self.inputs.dx.value)
        stencil += b"%d 0.0 %f 0.0\n" % (cell_n[1], self.inputs.dx.value)
        stencil += b"%d 0.0 0.0 %f\n" % (cell_n[2], self.inputs.dx.value)

        # --------------------------------------------------------------
        # Create the parameters dict

        params_dict = {}

        orb_indexes = list(self.inputs.orbital_indexes)
        abs_orb_indexes = []

        if self.inputs.orbital_index_ref == 'half_num_el':

            total_num_electrons = sum(gout_params['num_electrons'])
            ref_index = total_num_electrons // 2

            for i_orb in orb_indexes:
                abs_orb_indexes.append(i_orb + ref_index)

        elif self.inputs.orbital_index_ref == 'abs':
            abs_orb_indexes = orb_indexes

        # remove negative and 0 indexes
        abs_orb_indexes = [i for i in abs_orb_indexes if i >= 1]

        for i_orb in abs_orb_indexes:

            if self.inputs.natural_orbitals:
                params_dict[f"{i_orb}_no"] = {
                    "kind": "MO=%d" % i_orb,
                    "npts": -1,
                }
            else:
                homos = gout_params['homos']
                # use the cubegen convention, where counting starts from 1
                homos = [h + 1 for h in homos]

                for i_spin, h in enumerate(homos):
                    label = self._get_orbital_label(i_orb - h)
                    if len(homos) == 1:
                        params_dict["%d_%s" % (i_orb, label)] = {
                            "kind": "MO=%d" % i_orb,
                            "npts": -1,
                        }
                    else:
                        spin_letter = "a" if i_spin == 0 else "b"
                        params_dict["%d_%s_%s" %
                                    (i_orb, spin_letter, label)] = {
                                        "kind": "%sMO=%d" %
                                        (spin_letter.upper(), i_orb),
                                        "npts": -1,
                                    }

        if not self.inputs.natural_orbitals:
            if self.inputs.generate_density:
                params_dict['density'] = {
                    "kind": "Density=SCF",
                    "npts": -1,
                }
            if self.inputs.generate_spin_density:
                if 'homos' in gout_params and len(gout_params['homos']) == 2:
                    params_dict['spin'] = {
                        "kind": "Spin=SCF",
                        "npts": -1,
                    }

        # --------------------------------------------------------------
        # Create the builder and submit!

        builder = CubegenCalculation.get_builder()
        builder.parent_calc_folder = self.ctx.formchk_node.outputs.remote_folder
        builder.code = self.inputs.cubegen_code
        builder.stencil = SinglefileData(io.BytesIO(stencil))
        builder.parameters = Dict(dict=params_dict)
        builder.retrieve_cubes = self.inputs.retrieve_cubes

        builder.parser_params = self.inputs.cubegen_parser_params

        builder.metadata.options.resources = self._set_resources()

        builder.metadata.options.max_wallclock_seconds = 2 * 60 * 60

        builder.metadata.options.parser_name = self.inputs.cubegen_parser_name

        future = self.submit(builder)
        return ToContext(cubegen_node=future)
def create_builder_from_file(input_folder,
                             input_file_name,
                             code,
                             metadata,
                             pseudo_folder_path=None,
                             use_first=False):
    """Create a populated process builder for a `PwCalculation` from a standard QE input file and pseudo (upf) files.

    :param input_folder: the folder containing the input file
    :type input_folder: aiida.common.folders.Folder or str
    :param input_file_name: the name of the input file
    :type input_file_name: str
    :param code: the code associated with the calculation
    :type code: aiida.orm.Code or str
    :param metadata: metadata values for the calculation (e.g. resources)
    :type metadata: dict
    :param pseudo_folder_path: the folder containing the upf files (if None, then input_folder is used)
    :type pseudo_folder_path: aiida.common.folders.Folder or str or None
    :param use_first: passed to UpfData.get_or_create
    :type use_first: bool
    :raises NotImplementedError: if the structure is not ibrav=0
    :return: a builder instance for PwCalculation
    """
    PwCalculation = CalculationFactory('quantumespresso.pw')

    builder = PwCalculation.get_builder()
    builder.metadata = metadata

    if isinstance(code, str):
        code = Code.get_from_string(code)
    builder.code = code

    # read input_file
    if isinstance(input_folder, str):
        input_folder = Folder(input_folder)

    with input_folder.open(input_file_name) as input_file:
        parsed_file = PwInputFile(input_file)

    builder.structure = parsed_file.get_structuredata()
    builder.kpoints = parsed_file.get_kpointsdata()

    if parsed_file.namelists['SYSTEM']['ibrav'] != 0:
        raise NotImplementedError(
            'Found ibrav != 0: `aiida-quantumespresso` currently only supports ibrav = 0.'
        )

    # Then, strip the namelist items that the plugin doesn't allow or sets later.
    # NOTE: If any of the position or cell units are in alat or crystal
    # units, that will be taken care of by the input parsing tools, and
    # we are safe to fake that they were never there in the first place.
    parameters_dict = copy.deepcopy(parsed_file.namelists)
    for namelist, blocked_key in PwCalculation._blocked_keywords:  # pylint: disable=protected-access
        for key in list(parameters_dict[namelist].keys()):
            # take into account that celldm and celldm(*) must be blocked
            if re.sub('[(0-9)]', '', key) == blocked_key:
                parameters_dict[namelist].pop(key, None)
    builder.parameters = Dict(dict=parameters_dict)

    # Get or create a UpfData node for the pseudopotentials used for the calculation.
    pseudos_map = {}
    if pseudo_folder_path is None:
        pseudo_folder_path = input_folder
    if isinstance(pseudo_folder_path, str):
        pseudo_folder_path = Folder(pseudo_folder_path)
    names = parsed_file.atomic_species['names']
    pseudo_file_names = parsed_file.atomic_species['pseudo_file_names']
    pseudo_file_map = {}
    for name, fname in zip(names, pseudo_file_names):
        if fname not in pseudo_file_map:
            local_path = pseudo_folder_path.get_abs_path(fname)
            upf_node, _ = UpfData.get_or_create(local_path,
                                                use_first=use_first,
                                                store_upf=False)
            pseudo_file_map[fname] = upf_node
        pseudos_map[name] = pseudo_file_map[fname]
    builder.pseudos = pseudos_map

    settings_dict = {}
    if parsed_file.k_points['type'] == 'gamma':
        settings_dict['gamma_only'] = True

    # If there are any fixed coordinates (i.e. force modification) present in the input file, specify in settings
    fixed_coords = parsed_file.atomic_positions['fixed_coords']
    # Function ``any()`` only works for 1-dimensional lists so we have to call it twice manually.
    if any((any(fc_xyz) for fc_xyz in fixed_coords)):
        settings_dict['FIXED_COORDS'] = fixed_coords

    if settings_dict:
        builder.settings = settings_dict

    return builder
Example #16
0
 def parse_aiida(cls, string):
     """Parses string and returns AiiDA Dict."""
     return Dict(dict=cls.parse(string))
def example_block_pockets(raspa_code, submit=True):
    """Prepare and submit RASPA calculation with blocked pockets."""

    # parameters
    parameters = Dict(
        dict={
            "GeneralSettings": {
                "SimulationType": "MonteCarlo",
                "NumberOfCycles": 50,
                "NumberOfInitializationCycles": 50,
                "PrintEvery": 10,
                "Forcefield": "GenericMOFs",
                "RemoveAtomNumberCodeFromLabel": True,
                "EwaldPrecision": 1e-6,
                "CutOff": 12.0,
            },
            "System": {
                "irmof_1": {
                    "type": "Framework",
                    "UnitCells": "1 1 1",
                    "HeliumVoidFraction": 0.149,
                    "ExternalTemperature": 300.0,
                    "ExternalPressure": 1e5,
                },
                "irmof_10": {
                    "type": "Framework",
                    "UnitCells": "1 1 1",
                    "HeliumVoidFraction": 0.149,
                    "ExternalTemperature": 300.0,
                    "ExternalPressure": 1e5,
                }
            },
            "Component": {
                "methane": {
                    "MoleculeDefinition": "TraPPE",
                    "TranslationProbability": 0.5,
                    "ReinsertionProbability": 0.5,
                    "SwapProbability": 1.0,
                    "CreateNumberOfMolecules": {
                        "irmof_1": 1,
                        "irmof_10": 2,
                    },
                    "BlockPocketsFileName": {
                        "irmof_1": "irmof_1_test",
                        "irmof_10": "irmof_10_test",
                    },
                },
                "xenon": {
                    "MoleculeDefinition": "TraPPE",
                    "TranslationProbability": 0.5,
                    "ReinsertionProbability": 0.5,
                    "SwapProbability": 1.0,
                    "CreateNumberOfMolecules": {
                        "irmof_1": 3,
                        "irmof_10": 4,
                    },
                    "BlockPocketsFileName": {
                        "irmof_1": "irmof_1_test",
                        "irmof_10": "irmof_10_test",
                    },
                },
            },
        })

    # frameworks
    pwd = os.path.dirname(os.path.realpath(__file__))
    framework_1 = CifData(file=os.path.join(pwd, '..', 'files', 'IRMOF-1.cif'))
    framework_10 = CifData(file=os.path.join(pwd, '..', 'files', 'IRMOF-10.cif'))

    # block pocket
    block_pocket_1 = SinglefileData(file=os.path.join(pwd, '..', 'files', 'IRMOF-1_test.block')).store()
    block_pocket_10 = SinglefileData(file=os.path.join(pwd, '..', 'files', 'IRMOF-10_test.block')).store()

    # Contructing builder
    builder = raspa_code.get_builder()
    builder.framework = {
        "irmof_1": framework_1,
        "irmof_10": framework_10,
    }
    builder.block_pocket = {
        "irmof_1_test": block_pocket_1,
        "irmof_10_test": block_pocket_10,
    }
    builder.parameters = parameters
    builder.metadata.options = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1,
        },
        "max_wallclock_seconds": 1 * 30 * 60,  # 30 min
        "withmpi": False,
    }
    builder.metadata.dry_run = False
    builder.metadata.store_provenance = True

    if submit:
        print("Testing RASPA calculation with two frameworks each one "
              "containing 2 molecules (metahne/xenon) and block pockets ...")
        res, pk = run_get_pk(builder)
        print("calculation pk: ", pk)
        print("Average number of methane molecules/uc (irmof-1):",
              res['output_parameters'].dict.irmof_1['components']['methane']['loading_absolute_average'])
        print("Average number of methane molecules/uc (irmof-10):",
              res['output_parameters'].dict.irmof_1['components']['methane']['loading_absolute_average'])
        print("OK, calculation has completed successfully")
    else:
        print("Generating test input ...")
        builder.metadata.dry_run = True
        builder.metadata.store_provenance = False
        run(builder)
        print("Submission test successful")
        print("In order to actually submit, add '--submit'")
def example_binary_misture(raspa_code, submit=True):
    """Prepare and submit RASPA calculation with components mixture."""

    # parameters
    parameters = Dict(
        dict={
            "GeneralSettings": {
                "SimulationType": "MonteCarlo",
                "NumberOfCycles": 400,
                "NumberOfInitializationCycles": 200,
                "PrintEvery": 200,
                "Forcefield": "GenericMOFs",
                "EwaldPrecision": 1e-6,
                "CutOff": 12.0,
            },
            "System": {
                "box_25_angstrom": {
                    "type": "Box",
                    "BoxLengths": "25 25 25",
                    "ExternalTemperature": 300.0,
                    "ExternalPressure": 5e5,
                },
            },
            "Component": {
                "propane": {
                    "MoleculeDefinition": "TraPPE",
                    "TranslationProbability": 1.0,
                    "RotationProbability": 1.0,
                    "ReinsertionProbability": 1.0,
                    "SwapProbability": 1.0,
                    "CreateNumberOfMolecules": 30,
                },
                "butane": {
                    "MoleculeDefinition": "TraPPE",
                    "TranslationProbability": 1.0,
                    "RotationProbability": 1.0,
                    "ReinsertionProbability": 1.0,
                    "SwapProbability": 1.0,
                    "CreateNumberOfMolecules": 30,
                },
            },
        })

    # Contructing builder
    builder = raspa_code.get_builder()
    builder.parameters = parameters
    builder.metadata.options = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1,
        },
        "max_wallclock_seconds": 1 * 30 * 60,  # 30 min
        "withmpi": False,
    }
    builder.metadata.dry_run = False
    builder.metadata.store_provenance = True

    if submit:
        print("Testing RASPA with binary mixture (propane/butane) ...")
        res, pk = run_get_pk(builder)
        print("calculation pk: ", pk)
        print(
            "Total Energy average (box_25_angstrom):",
            res['output_parameters'].dict.box_25_angstrom['general']
            ['total_energy_average'])
        print("OK, calculation has completed successfully")
    else:
        print("Generating test input ...")
        builder.metadata.dry_run = True
        builder.metadata.store_provenance = False
        run(builder)
        print("Submission test successful")
        print("In order to actually submit, add '--submit'")
    print("-----")
Example #19
0
# Computer options
options = AttributeDict()
options.account = ''
options.qos = ''
options.resources = {
    'num_machines': 1,
    'num_mpiprocs_per_machine': 1,
    'parallel_env': 'localmpi',
    'tot_num_mpiprocs': 1
}
#options.queue_name = 'iqtc04.q'
options.max_wallclock_seconds = 3600
inputs.metadata.options = options

# Setup code
inputs.code = Code.get_from_string(codename)

# setup nodes
inputs.structure = structure
inputs.potential = Dict(dict=potential)
inputs.force_constants(force_constants)
inputs.parameters_dynaphopy(Dict(dict=dynaphopy_parameters))

# run calculation
result, node = run_get_node(CombinateCalculation, **inputs)
print('results:', result)
print('node:', node)

# submit to deamon
#submit(LammpsOptimizeCalculation, **inputs)
Example #20
0
    def parse(self, **kwargs):
        """Receives in input a dictionary of retrieved nodes.

        Does all the logic here.
        """
        try:
            out_folder = self.retrieved
        except NotExistent:
            return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_FOLDER)

        # check what is inside the folder
        list_of_files = out_folder._repository.list_object_names()

        # options.metadata become attributes like this:
        stdout_filename = self.node.get_attribute('output_filename')
        # at least the stdout should exist
        if stdout_filename not in list_of_files:
            return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)

        # This should match 1 file
        xml_files = [
            xml_file for xml_file in self.node.process_class.xml_filenames
            if xml_file in list_of_files
        ]
        if not xml_files:
            return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
        elif len(xml_files) > 1:
            return self.exit(self.exit_codes.ERROR_OUTPUT_XML_MULTIPLE)

        if self.node.process_class._FILE_XML_PRINT_COUNTER_BASENAME not in list_of_files:
            self.logger.error(
                'We could not find the print counter file in the output')
            # TODO: Add an error for this counter
            return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)

        output_stdout = out_folder.get_object_content(stdout_filename)
        output_xml = out_folder.get_object_content(xml_files[0])
        output_xml_counter = out_folder.get_object_content(
            self.node.process_class._FILE_XML_PRINT_COUNTER_BASENAME)
        out_dict, _raw_successful = parse_cp_raw_output(
            output_stdout, output_xml, output_xml_counter)

        # parse the trajectory. Units in Angstrom, picoseconds and eV.
        # append everthing in the temporary dictionary raw_trajectory
        raw_trajectory = {}
        evp_keys = [
            'electronic_kinetic_energy', 'cell_temperature',
            'ionic_temperature', 'scf_total_energy', 'enthalpy',
            'enthalpy_plus_kinetic', 'energy_constant_motion', 'volume',
            'pressure'
        ]

        # Now prepare the reordering, as filex in the xml are  ordered
        reordering = self._generate_sites_ordering(out_dict['species'],
                                                   out_dict['atoms'])

        pos_filename = '{}.{}'.format(self.node.process_class._PREFIX, 'pos')
        if pos_filename not in list_of_files:
            return self.exit(self.exit_codes.ERROR_READING_POS_FILE)

        trajectories = [
            ('positions', 'pos', CONSTANTS.bohr_to_ang,
             out_dict['number_of_atoms']),
            ('cells', 'cel', CONSTANTS.bohr_to_ang, 3),
            ('velocities', 'vel',
             CONSTANTS.bohr_to_ang / CONSTANTS.timeau_to_sec * 10**12,
             out_dict['number_of_atoms']),
        ]

        for name, extension, scale, elements in trajectories:
            try:
                with out_folder.open('{}.{}'.format(
                        self.node.process_class._PREFIX,
                        extension)) as datafile:
                    data = [l.split() for l in datafile]
                    # POSITIONS stored in angstrom
                traj_data = parse_cp_traj_stanzas(
                    num_elements=elements,
                    splitlines=data,
                    prepend_name='{}_traj'.format(name),
                    rescale=scale)
                # here initialize the dictionary. If the parsing of positions fails, though, I don't have anything
                # out of the CP dynamics. Therefore, the calculation status is set to FAILED.
                if extension != 'cel':
                    raw_trajectory['{}_ordered'.format(
                        name)] = self._get_reordered_array(
                            traj_data['{}_traj_data'.format(name)], reordering)
                else:
                    raw_trajectory['cells'] = numpy.array(
                        traj_data['cells_traj_data'])
                if extension == 'pos':
                    raw_trajectory['times'] = numpy.array(
                        traj_data['{}_traj_times'.format(name)])
            except IOError:
                out_dict['warnings'].append(
                    'Unable to open the {} file... skipping.'.format(
                        extension.upper()))

        # =============== EVP trajectory ============================
        try:
            with out_folder.open('{}.evp'.format(
                    self._node.process_class._PREFIX)) as handle:
                matrix = numpy.genfromtxt(handle)
            # there might be a different format if the matrix has one row only
            try:
                matrix.shape[1]
            except IndexError:
                matrix = numpy.array(numpy.matrix(matrix))

            if LooseVersion(out_dict['creator_version']) > LooseVersion('5.1'):
                # Between version 5.1 and 5.1.1, someone decided to change
                # the .evp output format, without any way to know that this
                # happened... SVN commit 11158.
                # I here use the version number to parse, plus some
                # heuristics to check that I'm doing the right thing
                #print "New version"
                raw_trajectory['steps'] = numpy.array(matrix[:, 0], dtype=int)
                raw_trajectory['evp_times'] = matrix[:, 1]  # TPS, ps
                raw_trajectory[
                    'electronic_kinetic_energy'] = matrix[:,
                                                          2] * CONSTANTS.hartree_to_ev  # EKINC, eV
                raw_trajectory['cell_temperature'] = matrix[:, 3]  # TEMPH, K
                raw_trajectory['ionic_temperature'] = matrix[:, 4]  # TEMPP, K
                raw_trajectory[
                    'scf_total_energy'] = matrix[:,
                                                 5] * CONSTANTS.hartree_to_ev  # ETOT, eV
                raw_trajectory[
                    'enthalpy'] = matrix[:,
                                         6] * CONSTANTS.hartree_to_ev  # ENTHAL, eV
                raw_trajectory[
                    'enthalpy_plus_kinetic'] = matrix[:,
                                                      7] * CONSTANTS.hartree_to_ev  # ECONS, eV
                raw_trajectory[
                    'energy_constant_motion'] = matrix[:,
                                                       8] * CONSTANTS.hartree_to_ev  # ECONT, eV
                raw_trajectory['volume'] = matrix[:, 9] * (
                    CONSTANTS.bohr_to_ang**3)  # volume, angstrom^3
                raw_trajectory['pressure'] = matrix[:, 10]  # out_press, GPa
            else:
                #print "Old version"
                raw_trajectory['steps'] = numpy.array(matrix[:, 0], dtype=int)
                raw_trajectory[
                    'electronic_kinetic_energy'] = matrix[:,
                                                          1] * CONSTANTS.hartree_to_ev  # EKINC, eV
                raw_trajectory['cell_temperature'] = matrix[:, 2]  # TEMPH, K
                raw_trajectory['ionic_temperature'] = matrix[:, 3]  # TEMPP, K
                raw_trajectory[
                    'scf_total_energy'] = matrix[:,
                                                 4] * CONSTANTS.hartree_to_ev  # ETOT, eV
                raw_trajectory[
                    'enthalpy'] = matrix[:,
                                         5] * CONSTANTS.hartree_to_ev  # ENTHAL, eV
                raw_trajectory[
                    'enthalpy_plus_kinetic'] = matrix[:,
                                                      6] * CONSTANTS.hartree_to_ev  # ECONS, eV
                raw_trajectory[
                    'energy_constant_motion'] = matrix[:,
                                                       7] * CONSTANTS.hartree_to_ev  # ECONT, eV
                raw_trajectory['volume'] = matrix[:, 8] * (
                    CONSTANTS.bohr_to_ang**3)  # volume, angstrom^3
                raw_trajectory['pressure'] = matrix[:, 9]  # out_press, GPa
                raw_trajectory['evp_times'] = matrix[:, 10]  # TPS, ps

            # Huristics to understand if it's correct.
            # A better heuristics could also try to fix possible issues
            # (in new versions of QE, it's possible to recompile it with
            # the __OLD_FORMAT flag to get back the old version format...)
            # but I won't do it, as there may be also other columns swapped.
            # Better to stop and ask the user to check what's going on.
            max_time_difference = abs(
                numpy.array(raw_trajectory['times']) -
                numpy.array(raw_trajectory['evp_times'])).max()
            if max_time_difference > 1.e-4:  # It is typically ~1.e-7 due to roundoff errors
                # If there is a large discrepancy
                # it means there is something very weird going on...
                return self.exit(self.exit_codes.ERROR_READING_TRAJECTORY_DATA)

            # Delete evp_times in any case, it's a duplicate of 'times'
            del raw_trajectory['evp_times']
        except IOError:
            out_dict['warnings'].append(
                'Unable to open the EVP file... skipping.')

        # get the symbols from the input
        # TODO: I should have kinds in TrajectoryData
        input_structure = self.node.inputs.structure
        raw_trajectory['symbols'] = [
            str(i.kind_name) for i in input_structure.sites
        ]

        traj = TrajectoryData()
        traj.set_trajectory(
            stepids=raw_trajectory['steps'],
            cells=raw_trajectory['cells'],
            symbols=raw_trajectory['symbols'],
            positions=raw_trajectory['positions_ordered'],
            times=raw_trajectory['times'],
            velocities=raw_trajectory['velocities_ordered'],
        )

        for this_name in evp_keys:
            try:
                traj.set_array(this_name, raw_trajectory[this_name])
            except KeyError:
                # Some columns may have not been parsed, skip
                pass

        self.out('output_trajectory', traj)

        # Remove big dictionaries that would be redundant
        # For atoms and cell, there is a small possibility that nothing is parsed
        # but then probably nothing moved.
        try:
            del out_dict['atoms']
        except KeyError:
            pass
        try:
            del out_dict['cell']
        except KeyError:
            pass
        try:
            del out_dict['ions_positions_stau']
        except KeyError:
            pass
        try:
            del out_dict['ions_positions_svel']
        except KeyError:
            pass
        try:
            del out_dict['ions_positions_taui']
        except KeyError:
            pass
        # This should not be needed
        try:
            del out_dict['atoms_index_list']
        except KeyError:
            pass
        # This should be already in the input
        try:
            del out_dict['atoms_if_pos_list']
        except KeyError:
            pass
        #
        try:
            del out_dict['ions_positions_force']
        except KeyError:
            pass

        # convert the dictionary into an AiiDA object
        output_params = Dict(dict=out_dict)
        self.out('output_parameters', output_params)
def main(cp2k_code_string, ddec_code_string, ddec_atdens_path):
    """Example usage:
    ATDENS_PATH='/home/daniele/aiida-lsmo-codes/data/chargemol/atomic_densities/'
    verdi run run_Cp2kMultistageDdecWorkChain_h2o.py cp2k@localhost ddec@localhost $ATDENS_PATH
    """
    print('Testing CP2K-Multistage calculation + DDEC on H2O...')

    cp2k_code = Code.get_from_string(cp2k_code_string)
    ddec_code = Code.get_from_string(ddec_code_string)

    atoms = ase.build.molecule('H2O')
    atoms.center(vacuum=2.0)
    structure = StructureData(ase=atoms)

    cp2k_options = {
        'resources': {
            'num_machines': 1
        },
        'max_wallclock_seconds': 10 * 60,
        'withmpi': True,
    }

    ddec_options = {
        'resources': {
            'num_machines': 1
        },
        'max_wallclock_seconds': 10 * 60,
        'withmpi': False,
    }

    ddec_params = Dict(
        dict={
            'net charge': 0.0,
            'charge type': 'DDEC6',
            'periodicity along A, B, and C vectors': [True, True, True],
            'compute BOs': False,
            'atomic densities directory complete path': ddec_atdens_path,
            'input filename': 'valence_density',
        })

    inputs = {
        'structure': structure,
        'metadata': {
            'label': 'test-h2o'
        },
        'protocol_tag': Str('test'),
        'cp2k_base': {
            'cp2k': {
                'code': cp2k_code,
                'metadata': {
                    'options': cp2k_options,
                }
            }
        },
        'ddec': {
            'parameters': ddec_params,
            'code': ddec_code,
            'metadata': {
                'options': ddec_options,
            }
        }
    }

    run(MultistageDdecWorkChain, **inputs)
def example_precision(cp2k_code):
    """Test structure roundtrip precision ase->aiida->cp2k->aiida->ase."""

    print(
        "Testing structure roundtrip precision ase->aiida->cp2k->aiida->ase..."
    )

    thisdir = os.path.dirname(os.path.realpath(__file__))

    # Structure.
    epsilon = 1e-10  # expected precision in Angstrom
    dist = 0.74 + epsilon
    positions = [(0, 0, 0), (0, 0, dist)]
    cell = np.diag([4, -4, 4 + epsilon])
    atoms = ase.Atoms('H2', positions=positions, cell=cell)
    structure = StructureData(ase=atoms)

    # Basis set.
    basis_file = SinglefileData(
        file=os.path.join(thisdir, "..", "files", "BASIS_MOLOPT"))

    # Pseudopotentials.
    pseudo_file = SinglefileData(
        file=os.path.join(thisdir, "..", "files", "GTH_POTENTIALS"))

    # Parameters.
    parameters = Dict(
        dict={
            'GLOBAL': {
                'RUN_TYPE': 'MD',
            },
            'MOTION': {
                'MD': {
                    'TIMESTEP': 0.0,  # do not move atoms
                    'STEPS': 1,
                },
            },
            'FORCE_EVAL': {
                'METHOD': 'Quickstep',
                'DFT': {
                    'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',
                    'POTENTIAL_FILE_NAME': 'GTH_POTENTIALS',
                    'SCF': {
                        'MAX_SCF': 1,
                    },
                    'XC': {
                        'XC_FUNCTIONAL': {
                            '_': 'LDA',
                        },
                    },
                },
                'SUBSYS': {
                    'KIND': {
                        '_': 'DEFAULT',
                        'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
                        'POTENTIAL': 'GTH-LDA',
                    },
                },
            },
        })

    # Construct process builder.
    builder = cp2k_code.get_builder()
    builder.structure = structure
    builder.parameters = parameters
    builder.code = cp2k_code
    builder.file = {
        'basis': basis_file,
        'pseudo': pseudo_file,
    }
    builder.metadata.options.resources = {
        "num_machines": 1,
        "num_mpiprocs_per_machine": 1,
    }
    builder.metadata.options.max_wallclock_seconds = 1 * 60 * 60

    print("Submitted calculation...")
    calc = run(builder)

    # Check structure preservation.
    atoms2 = calc['output_structure'].get_ase()

    # Zeros should be preserved exactly.
    if np.all(atoms2.positions[0] == 0.0):
        print("OK, zeros in structure were preserved exactly.")
    else:
        print("ERROR!")
        print("Zeros in structure changed: ", atoms2.positions[0])
        sys.exit(3)

    # Other values should be preserved with epsilon precision.
    dist2 = atoms2.get_distance(0, 1)
    if abs(dist2 - dist) < epsilon:
        print("OK, structure preserved with %.1e Angstrom precision" % epsilon)
    else:
        print("ERROR!")
        print("Structure changed by %e Angstrom" % abs(dist - dist2))
        sys.exit(3)

    # Check cell preservation.
    cell_diff = np.amax(np.abs(atoms2.cell - cell))
    if cell_diff < epsilon:
        print("OK, cell preserved with %.1e Angstrom precision" % epsilon)
    else:
        print("ERROR!")
        print("Cell changed by %e Angstrom" % cell_diff)
        sys.exit(3)
def main(zeopp_code_label, raspa_code_label):
    """
    Prepare inputs and submit the Isotherm workchain.
    Usage: verdi run run_HTSEvWorkChain_KAXQIL_2comp.py zeopp@teslin raspa37@teslin
    """

    builder = HTSEvWorkChain.get_builder()

    builder.metadata.label = "test_ev"

    builder.structure = CifData(
        file=os.path.abspath('../aiida_matdis/data/KAXQIL_clean_P1.cif'),
        label="kaxqil")

    builder.mixture = Dict(
        dict={
            'comp1': {
                'name': 'xenon',
                'molfraction': 0.20
            },
            'comp2': {
                'name': 'krypton',
                'molfraction': 0.80
            }
        })

    builder.ev_output = load_node(21064)

    builder.zeopp.code = Code.get_from_string(zeopp_code_label)
    builder.zeopp.atomic_radii = SinglefileData(
        file=os.path.abspath('../aiida_matdis/data/UFF.rad'))

    builder.raspa_base.raspa.code = Code.get_from_string(raspa_code_label)

    options = {
        "resources": {
            "num_machines": 1,
            "tot_num_mpiprocs": 1,
        },
        "max_wallclock_seconds": 1 * 60 * 60,
        "withmpi": False,
    }
    builder.raspa_base.raspa.metadata.options = options
    builder.zeopp.metadata.options = options

    builder.parameters = Dict(
        dict={
            'ff_framework': 'UFF',  # Default: UFF
            "ff_cutoff": 12.5,
            'temperature':
            298,  # (K) Note: higher temperature will have less adsorbate and it is faster
            "ff_tail_corrections": False,
            'zeopp_volpo_samples':
            1000,  # Default: 1e5 *NOTE: default is good for standard real-case!
            'zeopp_sa_samples':
            1000,  # Default: 1e5 *NOTE: default is good for standard real-case!
            'zeopp_block_samples': 100,  # Default: 100
            'raspa_widom_cycles': 500,  # Default: 1e5
            'raspa_gcmc_init_cycles': 500,  # Default: 1e3
            'raspa_gcmc_prod_cycles': 500,  # Default: 1e4
            'pressure_list': [0.1, 1.0],
            'probe_based': True,
        })

    run(builder)
Example #24
0
:input prop is the quantity we required for calculation
"""
# workfunction to process the incoming json dictionary
# this is always required
# here it needs a validation by
prop = 'band_gap'
load_profile()
with open('config.json') as f:
    CALCULATION_OPTIONS = json.load(f)
with open(sys.argv[1]) as f:
    request = f.read()
f.close()

wf = run(
    ProcessInputs,
    request=Dict(dict=json.loads(request)),
    predefined=Dict(dict=CALCULATION_OPTIONS),
    property=Str(prop),
)

if not wf.is_finished_ok:
    msg = 'Structure retrieval error. See node uuid={} for more specific report'.format(
        wf.uuid
    )
    print(
        """ {
        'error': wf.exit_message,
        'message': msg,
        'stored_request': wf.inputs.request.get_dict()
    }"""
    )
Example #25
0
def example_henry(raspa_code, submit=True):
    """Prepare and submit simple RASPA calculation to compute Henry coefficient."""

    # parameters
    parameters = Dict(
        dict={
            "GeneralSettings": {
                "SimulationType": "MonteCarlo",
                "NumberOfCycles": 400,
                "PrintEvery": 200,
                "Forcefield": "GenericMOFs",
                "EwaldPrecision": 1e-6,
                "CutOff": 12.0,
            },
            "System": {
                "tcc1rs": {
                    "type": "Framework",
                    "UnitCells": "1 1 1",
                    "HeliumVoidFraction": 0.149,
                    "ExternalTemperature": 300.0,
                }
            },
            "Component": {
                "methane": {
                    "MoleculeDefinition": "TraPPE",
                    "WidomProbability": 1.0,
                    "CreateNumberOfMolecules": 0,
                }
            },
        })

    # framework
    pwd = os.path.dirname(os.path.realpath(__file__))
    framework = CifData(file=os.path.join(pwd, '..', 'files', 'TCC1RS.cif'))

    # Contructing builder
    builder = raspa_code.get_builder()
    builder.framework = {
        "tcc1rs": framework,
    }
    builder.parameters = parameters
    builder.metadata.options = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1,
        },
        "max_wallclock_seconds": 1 * 30 * 60,  # 30 min
        "withmpi": False,
    }
    builder.metadata.dry_run = False
    builder.metadata.store_provenance = True

    if submit:
        print("Testing RASPA on computing Henry coefficient ...")
        res, pk = run_get_pk(builder)
        print("calculation pk: ", pk)
        print("Average Henry coefficient (methane in tcc1rs):",
              res['output_parameters'].dict.tcc1rs['components']['methane']['henry_coefficient_average'])
        print("OK, calculation has completed successfully")
    else:
        print("Generating test input ...")
        builder.metadata.dry_run = True
        builder.metadata.store_provenance = False
        run(builder)
        print("submission test successful")
        print("In order to actually submit, add '--submit'")
    print("-----")
def cli(code_node, structure_group_name, workchain_group_name,
        base_parameter_node, pseudo_familyname, kptper_recipang,
        nume2bnd_ratio, calc_method, max_wallclock_seconds,
        max_active_calculations, number_of_nodes, memory_gb, ndiag, npools,
        sleep_interval, z_movement_only, keep_workdir, dryrun, run_debug):

    valid_calc_methods = ['scf', 'relax', 'vc-relax']
    if calc_method not in valid_calc_methods:
        raise Exception("Invalid calc_method: {}".format(calc_method))

    # setup parameters
    code = load_node(code_node)
    structure_group = Group.get_from_string(structure_group_name)
    workchain_group = Group.objects.get_or_create(name=workchain_group_name)[0]
    base_parameter = load_node(base_parameter_node)
    # announce if running in debug mode
    if run_debug:
        print("Running in debug mode!")

    # Load all the structures in the structure group, not-yet run in workchain_group_name
    uncalculated_structures = retrieve_alluncalculated_structures(
        structure_group_name, workchain_group_name=workchain_group_name)
    if len(uncalculated_structures) == 0:
        print(("All structures in {} already have associated workchains in "
               "the group {}".format(structure_group_name,
                                     workchain_group_name)))
        sys.exit()

    # determine number of calculations to submit
    running_calculations = retrieve_numactive_calculations()
    calcs_to_submit = max_active_calculations - running_calculations

    # submit calculations
    for structure in uncalculated_structures:
        print("Preparing to cli {}".format(structure))

        # ensure no more than the max number of calcs are submitted
        while (calcs_to_submit <= 0):
            running_calculations = retrieve_numactive_calculations()
            calcs_to_submit = max_active_calculations - running_calculations
            if calcs_to_submit <= 0:  # in case jobs finished during submission
                print(("{} calcs running,"
                       "max num calcs {} waiting....".format(
                           running_calculations, max_active_calculations)))
                time.sleep(sleep_interval)

        # start timer to inspect job submission times
        from timeit import default_timer as timer
        start = timer()

        # determine number of bands & setup the parameters
        parameters = wf_setupparams(base_parameter, structure,
                                    Str(pseudo_familyname),
                                    Float(nume2bnd_ratio))

        # determine kpoint mesh & setup kpoints
        kpoints = wf_getkpoints(structure, Int(kptper_recipang))

        # determine parallelization & resources (setup the settings & options)
        if number_of_nodes:
            num_machines = int(number_of_nodes)
        else:
            num_machines = get_nummachines_forcalc(structure,
                                                   pseudo_familyname)
            max_nodes_to_submit = 20
            if num_machines > max_nodes_to_submit:
                print("{} nodes requested, maximum is {}".format(
                    num_machines, max_nodes_to_submit))
                print(
                    "If you wish to cli please choose nodes manually with --number_of_nodes"
                )
                continue
        options_dict = {
            'max_wallclock_seconds': max_wallclock_seconds,
            'resources': {
                'num_machines': num_machines
            },
        }
        if memory_gb:
            options_dict['max_memory_kb'] = int(int(memory_gb) * 1024 * 1024)
        if run_debug:
            num_machines = 2
            options_dict['resources']['num_machines'] = num_machines
            options_dict['max_wallclock_seconds'] = int(30 * 60)
            options_dict['queue_name'] = 'debug'
        workchain_options = Dict(dict=options_dict)

        if npools:
            nk = npools
        else:
            nk = get_qe_nk(num_machines, code)
        settings_dict = {'cmdline': ['-nk', nk], 'no_bands': True}
        if ndiag:
            settings_dict['cmdline'] += ['-ndiag', ndiag]
        if z_movement_only:
            num_atoms = len(structure.get_ase())
            coordinate_fix = [[True, True, False]] * num_atoms
            settings_dict['fixed_coords'] = coordinate_fix
        settings = Dict(dict=settings_dict)

        # setup inputs & submit workchain
        clean_workdir = not keep_workdir
        inputs = {
            'structure': structure,
            'settings': settings,
            'clean_workdir': Bool(clean_workdir)
        }
        base_inputs = {
            'code': code,
            'pseudo_family': Str(pseudo_familyname),
            'kpoints': kpoints,
            'parameters': parameters,
            'options': workchain_options,
            'settings': settings,
        }
        if calc_method == 'scf':
            PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base')
            inputs.update(base_inputs)
        elif calc_method == 'relax':
            PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.relax')
            inputs['base'] = base_inputs
            inputs['relaxation_scheme'] = Str('relax')
            inputs['final_scf'] = Bool(False)
            inputs['meta_convergence'] = Bool(False)
        elif calc_method == 'vc-relax':
            PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.relax')
            inputs['base'] = base_inputs
            inputs['relaxation_scheme'] = Str('vc-relax')
            inputs['final_scf'] = Bool(True)
            inputs['meta_convergence'] = Bool(True)
        else:
            raise Exception("Invalid calc_method: {}".format(calc_method))

        def print_timing(start):
            end = timer()
            time_elapsed = end - start
            print("timing: {}s".format(time_elapsed))

        if dryrun:
            print("ase_structure: {}".format(structure.get_ase()))
            print("aiida_settings: {}".format(settings.get_dict()))
            print("aiida_options: {}".format(workchain_options.get_dict()))
            print("aiida_inputs: {}".format(inputs))
            print_timing(start)
        else:
            node = submit(PwBaseWorkChain, **inputs)
            workchain_group.add_nodes([node])
            print("WorkChain: {} submitted".format(node))
            print_timing(start)
        calcs_to_submit -= 1

        if run_debug:
            sys.exit()
Example #27
0
def example_base(raspa_code, submit=True):
    """Prepare and submit simple RASPA calculation."""

    # parameters
    parameters = Dict(
        dict={
            "GeneralSettings": {
                "SimulationType": "MonteCarlo",
                "NumberOfCycles": 50,
                "NumberOfInitializationCycles": 50,
                "PrintEvery": 10,
                "Forcefield": "GenericMOFs",
                "EwaldPrecision": 1e-6,
                "CutOff": 12.0,
                "WriteBinaryRestartFileEvery": 10,
            },
            "System": {
                "tcc1rs": {
                    "type": "Framework",
                    "UnitCells": "1 1 1",
                    "HeliumVoidFraction": 0.149,
                    "ExternalTemperature": 300.0,
                    "ExternalPressure": 5e5,
                },
            },
            "Component": {
                "methane": {
                    "MoleculeDefinition": "TraPPE",
                    "TranslationProbability": 0.5,
                    "ReinsertionProbability": 0.5,
                    "SwapProbability": 1.0,
                    "CreateNumberOfMolecules": 0,
                }
            },
        })

    # framework
    framework = CifData(
        file=os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                          'files', 'TCC1RS.cif'))

    # Contructing builder
    builder = raspa_code.get_builder()
    builder.framework = {
        "tcc1rs": framework,
    }
    builder.parameters = parameters
    builder.metadata.options = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1,
        },
        "max_wallclock_seconds": 1 * 30 * 60,  # 30 min
        "withmpi": False,
    }
    builder.metadata.dry_run = False
    builder.metadata.store_provenance = True

    if submit:
        print("Testing RASPA with simple input ...")
        res, pk = run_get_pk(builder)
        print("calculation pk: ", pk)
        print(
            "Average number of methane molecules/uc:",
            res['output_parameters'].dict.tcc1rs['components']['methane']
            ['loading_absolute_average'])
        print("OK, calculation has completed successfully")
        pytest.base_calc_pk = pk
    else:
        print("Generating test input ...")
        builder.metadata.dry_run = True
        builder.metadata.store_provenance = False
        run(builder)
        print("Submission test successful")
        print("In order to actually submit, add '--submit'")
    print("-----")
Example #28
0
    def parse(self, **kwargs):  #pylint: disable=too-many-branches
        """
        Parse output data folder, store results in database.

        :param retrieved: a dictionary of retrieved nodes, where
          the key is the link name
        :returns: a tuple with two values ``(bool, node_list)``,
          where:

          * ``bool``: variable to tell if the parsing succeeded
          * ``node_list``: list of new nodes to be stored in the db
            (as a list of tuples ``(link_name, node)``)
        """
        # pylint: disable=too-many-locals

        # Check that the retrieved folder is there
        try:
            self.retrieved
        except exceptions.NotExistent:
            return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER

        # Check the folder content is as expected
        list_of_files = self.retrieved.list_object_names()

        # pylint: disable=protected-access
        inp_params = self.node.inputs.parameters
        output_files = inp_params.output_files
        # Note: set(A) <= set(B) checks whether A is a subset of B
        if set(output_files) <= set(list_of_files):
            pass
        else:
            msg = 'Expected output files {}; found only {}.'\
                .format(output_files, list_of_files)
            self.logger.error(msg)
            return self.exit_codes.ERROR_OUTPUT_FILES_MISSING

        # Parse output files
        output_parsers = inp_params.output_parsers
        output_links = inp_params.output_links
        output_parameters = Dict(dict={})

        empty_block = False

        for fname, parser, link in list(
                zip(output_files, output_parsers, output_links)):

            with self.retrieved.open(fname, 'rb') as handle:
                if parser is None:

                    # just add file, if no parser implemented
                    parsed = SinglefileData(file=handle)
                    self.out(link, parsed)

                    # workaround: if block pocket file is empty, raise an error
                    # (it indicates the calculation did not finish)
                    if link == 'block':
                        if not parsed.get_content().strip():
                            self.logger.error(
                                'Empty block file. This indicates the calculation of blocked pockets did not finish.'
                            )
                            empty_block = True
                        else:
                            output_parameters.update_dict({
                                'Number_of_blocking_spheres':
                                int(parsed.get_content().split()[0])
                            })

                else:
                    # else parse and add keys to output_parameters
                    try:
                        # Note: We join it to the output_params
                        #parsed = parser.parse_aiida(f.read())
                        parsed_dict = parser.parse(
                            handle.read().decode('utf8'))
                    except ValueError:
                        self.logger.error(
                            'Error parsing file {} with parser {}'.format(
                                fname, parser))

                    output_parameters.update_dict(parsed_dict)

        # add name of input structures as parameter
        output_parameters.set_attribute(
            'Input_structure_filename',
            inp_params.get_structure_file_name(self.node.inputs.structure))
        # add input parameters for convenience
        # note: should be added at top-level in order to allow tab completion
        # of <calcnode>.res.Input_...
        for k in inp_params.keys():
            output_parameters.set_attribute('Input_{}'.format(k),
                                            inp_params.get_attribute(k))
        self.out('output_parameters', output_parameters)

        if empty_block:
            return self.exit_codes.ERROR_EMPTY_BLOCK

        return self.exit_codes.SUCCESS
import aiida
from aiida.orm import Code, Str, Dict
import pytest

import deliver
from deliver import deliver_stage, stage_solution

aiida.load_profile("<profile>")

deliver.GENERAL_INPUTS = {
    "code": Code.get_from_string('<code>'),
    "pseudo_family": Str("<pseudo-family>"),
    "options": Dict(dict={
        'withmpi': False,
        'max_wallclock_seconds': 3600 * 2
    }),
    "parameters": Dict(),
}


@pytest.mark.parametrize("stage", [1, 2, 3, 4])
def test_solution(stage):
    deliver_stage(stage, stage_solution(stage).deliverable)
Example #30
0
output_parameters = Dict( # pylint: disable=invalid-name
    dict={
        "Density": 0.440527,
        "Density_unit": "g/cm^3",
        "Estimated_saturation_loading": 41.985376,
        "Estimated_saturation_loading_unit": "mol/kg",
        "Input_block": [1.865, 100],
        "Input_ha": "DEF",
        "Input_structure_filename": "tmp4a13iby3.cif",
        "Input_volpo": [1.865, 1.865, 100000],
        "Number_of_blocking_spheres": 0,
        "POAV_A^3": 8623.69,
        "POAV_A^3_unit": "A^3",
        "POAV_Volume_fraction": 0.67999,
        "POAV_Volume_fraction_unit": None,
        "POAV_cm^3/g": 1.54358,
        "POAV_cm^3/g_unit": "cm^3/g",
        "PONAV_A^3": 0.0,
        "PONAV_A^3_unit": "A^3",
        "PONAV_Volume_fraction": 0.0,
        "PONAV_Volume_fraction_unit": None,
        "PONAV_cm^3/g": 0.0,
        "PONAV_cm^3/g_unit": "cm^3/g",
        "Unitcell_volume": 12682.1,
        "Unitcell_volume_unit": "A^3",
        "adsorption_energy_widom_average": -11.1626207486,
        "adsorption_energy_widom_dev": 0.02083606,
        "adsorption_energy_widom_unit": "kJ/mol",
        "conversion_factor_molec_uc_to_cm3stp_cm3": 2.9347915768,
        "conversion_factor_molec_uc_to_gr_gr": 4.7676018308,
        "conversion_factor_molec_uc_to_mol_kg": 0.2972320343,
        "henry_coefficient_average": 7.71003e-06,
        "henry_coefficient_dev": 1.65115e-08,
        "henry_coefficient_unit": "mol/kg/Pa",
        "is_kh_enough": True,
        "is_porous": True,
        "isotherm": {
            "enthalpy_of_adsorption_average": [-13.510607783958, -10.787702310577],
            "enthalpy_of_adsorption_dev": [0.76886345231266, 1.0196832123586],
            "enthalpy_of_adsorption_unit": "kJ/mol",
            "loading_absolute_average": [3.6279844874624, 16.11968088498],
            "loading_absolute_dev": [0.15865715470393, 0.075109385284932],
            "loading_absolute_unit": "mol/kg",
            "pressure": [5.8, 65],
            "pressure_unit": "bar"
        },
        "temperature": 298,
        "temperature_unit": "K"
    })