Example #1
0
    def exposed_outputs(self,
                        node: orm.ProcessNode,
                        process_class: Type['Process'],
                        namespace: Optional[str] = None,
                        agglomerate: bool = True) -> AttributeDict:
        """Return the outputs which were exposed from the ``process_class`` and emitted by the specific ``node``

        :param node: process node whose outputs to try and retrieve
        :param namespace: Namespace in which to search for exposed outputs.
        :param agglomerate: If set to true, all parent namespaces of the given ``namespace`` will also
            be searched for outputs. Outputs in lower-lying namespaces take precedence.

        :returns: exposed outputs

        """
        namespace_separator = self.spec().namespace_separator

        output_key_map = {}
        # maps the exposed name to all outputs that belong to it
        top_namespace_map = collections.defaultdict(list)
        link_types = (LinkType.CREATE, LinkType.RETURN)
        process_outputs_dict = node.get_outgoing(link_type=link_types).nested()

        for port_name in process_outputs_dict:
            top_namespace = port_name.split(namespace_separator)[0]
            top_namespace_map[top_namespace].append(port_name)

        for port_namespace in self._get_namespace_list(
                namespace=namespace, agglomerate=agglomerate):
            # only the top-level key is stored in _exposed_outputs
            for top_name in top_namespace_map:
                if top_name in self.spec(
                )._exposed_outputs[port_namespace][process_class]:  # pylint: disable=protected-access
                    output_key_map[top_name] = port_namespace

        result = {}

        for top_name, port_namespace in output_key_map.items():
            # collect all outputs belonging to the given top_name
            for port_name in top_namespace_map[top_name]:
                if port_namespace is None:
                    result[port_name] = process_outputs_dict[port_name]
                else:
                    result[port_namespace + namespace_separator +
                           port_name] = process_outputs_dict[port_name]

        return AttributeDict(result)
Example #2
0
    def load_instance_state(self, saved_state, load_context):
        super(WorkChain, self).load_instance_state(saved_state, load_context)
        # Load the context
        self._context = AttributeDict(
            **deserialize_data(saved_state[self._CONTEXT]))

        # Recreate the stepper
        self._stepper = None
        stepper_state = saved_state.get(self._STEPPER_STATE, None)
        if stepper_state is not None:
            self._stepper = self.spec().get_outline().recreate_stepper(
                stepper_state, self)

        self.set_logger(self._calc.logger)

        if self._awaitables:
            self.action_awaitables()
Example #3
0
    def exposed_inputs(self,
                       process_class: Type['Process'],
                       namespace: Optional[str] = None,
                       agglomerate: bool = True) -> AttributeDict:
        """Gather a dictionary of the inputs that were exposed for a given Process class under an optional namespace.

        :param process_class: Process class whose inputs to try and retrieve
        :param namespace: PortNamespace in which to look for the inputs
        :param agglomerate: If set to true, all parent namespaces of the given ``namespace`` will also be
            searched for inputs. Inputs in lower-lying namespaces take precedence.

        :returns: exposed inputs

        """
        exposed_inputs = {}

        namespace_list = self._get_namespace_list(namespace=namespace,
                                                  agglomerate=agglomerate)
        for sub_namespace in namespace_list:

            # The sub_namespace None indicates the base level sub_namespace
            if sub_namespace is None:
                inputs = self.inputs
                port_namespace = self.spec().inputs
            else:
                inputs = self.inputs
                for part in sub_namespace.split('.'):
                    inputs = inputs[part]  # type: ignore[index]
                try:
                    port_namespace = self.spec().inputs.get_port(
                        sub_namespace)  # type: ignore[assignment]
                except KeyError:
                    raise ValueError(
                        f'this process does not contain the "{sub_namespace}" input namespace'
                    )

            # Get the list of ports that were exposed for the given Process class in the current sub_namespace
            exposed_inputs_list = self.spec()._exposed_inputs[sub_namespace][
                process_class]  # pylint: disable=protected-access

            for name in port_namespace.ports.keys():
                if inputs and name in inputs and name in exposed_inputs_list:
                    exposed_inputs[name] = inputs[name]

        return AttributeDict(exposed_inputs)
Example #4
0
    def validate_inputs(self):
        """
        A HpCalculation can be continued either from a completed PwCalculation in which case
        the parent_calculation input should be set, or it can be a restart from a previous HpCalculation
        as for example the final post-processing calculation when parallelizing over atoms and
        or q-points, in which case the parent_folder should be set. In either case, at least one
        of the two inputs has to be defined properly
        """
        self.ctx.inputs = AttributeDict({
            'code': self.inputs.code,
            'qpoints': self.inputs.qpoints,
        })

        if not ('parent_calculation' in self.inputs
                or 'parent_folder' in self.inputs):
            self.abort_nowait(
                'Neither the parent_calculation nor the parent_folder input was defined'
            )

        try:
            self.ctx.inputs.parent_folder = self.inputs.parent_calculation.out.remote_folder
        except AttributeError:
            self.ctx.inputs.parent_folder = self.inputs.parent_folder

        if 'parameters' in self.inputs:
            self.ctx.inputs.parameters = self.inputs.parameters.get_dict()
        else:
            self.ctx.inputs.parameters = {}

        if 'settings' in self.inputs:
            self.ctx.inputs.settings = self.inputs.settings.get_dict()
        else:
            self.ctx.inputs.settings = {}

        if 'options' in self.inputs:
            self.ctx.inputs.options = self.inputs.options.get_dict()
        else:
            self.ctx.inputs.options = get_default_options()

        if 'INPUTHP' not in self.ctx.inputs.parameters:
            self.ctx.inputs.parameters['INPUTHP'] = {}

        if self.inputs.only_initialization.value:
            self.ctx.inputs.parameters['INPUTHP'][
                'determine_num_pert_only'] = True
Example #5
0
def prepare_process_inputs(inputs):
    """
    Prepare the inputs dictionary for a calculation process. Any remaining bare dictionaries in the inputs
    dictionary will be wrapped in a ParameterData data node except for the '_options' key which should remain
    a standard dictionary. Another exception are dictionaries whose keys are not strings but for example tuples.
    This is the format used by input groups as in for example the explicit pseudo dictionary where the key is
    a tuple of kind to which the UpfData corresponds.
    """
    prepared_inputs = AttributeDict()

    for key, val in inputs.iteritems():
        if key != '_options' and isinstance(val, dict) and all(
            [isinstance(k, (basestring)) for k in val.keys()]):
            prepared_inputs[key] = ParameterData(dict=val)
        else:
            prepared_inputs[key] = val

    return prepared_inputs
Example #6
0
    def _init_context(self):
        """Initialize context variables that are used during the."""
        # Set the exit code to error in case we forget to set it to NO_ERROR
        self.ctx.exit_code = self.exit_codes.ERROR_UNKNOWN  # pylint: disable=no-member

        # Copy structures to context, since we will empty it as we go.
        # Since structures is an input to a workchain we cannot modify it and need to copy.
        self.ctx.structures = dict(self.inputs.structures)

        # Continue to submit workchains until this is True
        self.ctx.is_finished = False

        # Define an interation index
        self.ctx.interation = 0

        # Define the context inputs
        self.ctx.inputs = AttributeDict()

        # Define container to store quantities that is extracted in each step
        self.ctx.quantities_container = []
Example #7
0
    def run_bands(self):
        """Run the PwBaseWorkChain in bands mode along the path of high-symmetry determined by Seekpath."""
        inputs = AttributeDict(
            self.exposed_inputs(PwBaseWorkChain, namespace='bands'))
        inputs.parameters = inputs.parameters.get_dict()
        inputs.parameters.setdefault('CONTROL', {})
        inputs.parameters['CONTROL']['restart_mode'] = 'restart'
        inputs.parameters['CONTROL']['calculation'] = 'bands'

        if 'kpoints' not in self.inputs.bands:
            inputs.kpoints = self.ctx.kpoints_path

        inputs.structure = self.ctx.current_structure
        inputs.parent_folder = self.ctx.current_folder

        inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
        running = self.submit(PwBaseWorkChain, **inputs)

        self.report('launching PwBaseWorkChain<{}> in {} mode'.format(
            running.pk, 'bands'))

        return ToContext(workchain_bands=running)
Example #8
0
    def setup(self):
        """Store exposed inputs in the context."""
        self.ctx.current_structure = self.inputs.structure
        self.ctx.current_restart_folder = None
        self.ctx.is_converged = False
        self.ctx.iteration = 0
        self.ctx.workchains = []

        self.ctx.inputs = AttributeDict()
        self.ctx.inputs.incar = self._assemble_incar()
        self.ctx.inputs.code = self.inputs.code
        self.ctx.inputs.structure = self.inputs.structure
        self.ctx.inputs.potcar_family = self.inputs.potcar_family
        self.ctx.inputs.potcar_mapping = self.inputs.potcar_mapping
        self.ctx.inputs.options = self.inputs.options
        self.ctx.inputs.settings = {'parser_settings': {'add_structure': True}}
        if 'max_iterations' in self.inputs.restart:
            self.ctx.inputs.max_iterations = self.inputs.restart.max_iterations
        if 'clean_workdir' in self.inputs.restart:
            self.ctx.inputs.clean_workdir = self.inputs.restart.clean_workdir
Example #9
0
 def write(self, dst):
     """Write the parameter file for premod at dst."""
     with open(dst, 'w') as handler:
         data = AttributeDict(self.data.get_dict())
         handler.write('MODE_SIM=' + data.MODE_SIM + '\n')
         handler.write('MODE_ENTITY=' + data.MODE_ENTITY + '\n')
         handler.write('MODE_SD=' + data.MODE_SD + '\n')
         handler.write('FILE_OUT_PPT=' + data.FILE_OUT_PPT + '\n')
         handler.write('OUTPUT_TIMES=' + str(len(data.OUTPUT_TIMES)) + '\n')
         # Loop over the output times
         for time in data.OUTPUT_TIMES:
             handler.write(str(time) + ' ')
         handler.write('\n')
         #handler.write('!' + '\n')
         handler.write('MODE_IO=' + data.MODE_IO + '\n')
         handler.write('FILE_SOLVER=' + data.FILE_SOLVER + '\n')
         handler.write('FILE_ALLOY=' + data.FILE_ALLOY + '\n')
         handler.write('FILE_PROCESS=' + data.FILE_PROCESS + '\n')
         handler.write('FILE_PHASES=' + data.FILE_PHASES + '\n')
         handler.write('FILE_PPTLIB=' + data.FILE_PPTLIB + '\n')
         handler.write('FILE_PPTSIM=' + data.FILE_PPTSIM + '\n')
Example #10
0
    def run_final_scf(self):
        """Run the PwBaseWorkChain to run a final scf PwCalculation for the relaxed structure."""
        inputs = AttributeDict(
            self.exposed_inputs(PwBaseWorkChain, namespace='base'))
        inputs.structure = self.ctx.current_structure
        inputs.parent_folder = self.ctx.current_parent_folder
        inputs.parameters = inputs.parameters.get_dict()

        inputs.parameters.setdefault('CONTROL', {})
        inputs.parameters['CONTROL']['calculation'] = 'scf'
        inputs.parameters['CONTROL']['restart_mode'] = 'restart'

        inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
        running = self.submit(PwBaseWorkChain, **inputs)

        self.report('launching PwBaseWorkChain<{}> for final scf'.format(
            running.pk))

        return ToContext(workchain_scf=running)
Example #11
0
    def init_inputs(self):
        self.ctx.running_calc = 0

        # prepare inputs
        self.ctx.inputs = AttributeDict()
        self.ctx.inputs.code = self.inputs.code
        self.ctx.inputs.parameters = self.inputs.parameters
        self.ctx.inputs.basis_family = self.inputs.basis_family
        label = self.inputs.metadata.get('label', 'CRYSTAL calc')

        # work with options
        options_dict = self.inputs.options.get_dict()

        # oxidation states
        self.ctx.try_oxi = options_dict.pop('try_oxi_if_fails', False)
        use_oxi = options_dict.pop('use_oxidation_states', None)
        if use_oxi is not None:
            self.report(f'{label}: Using oxidation states: {use_oxi}')
            self.ctx.inputs.use_oxistates = Dict(dict=use_oxi)
        self.ctx.high_spin_preferred = options_dict.pop(
            'high_spin_preferred', False)

        # magnetism
        is_magnetic = options_dict.pop('is_magnetic', False)
        if is_magnetic:
            self.report(f'{label}: is_magnetic is set, guessing magnetism')
            self.ctx.inputs.is_magnetic = Bool(True)
            self.ctx.inputs.spinlock_steps = Int(
                options_dict.pop('spinlock_steps', 5))

        # get calculation entry_point
        try:
            self.ctx.calculation = self._parallel_calculation \
                if (options_dict['resources']['num_machines'] > 1
                    or options_dict['resources']['num_mpiprocs_per_machine'] > 1) else self._serial_calculation
        except KeyError:
            self.ctx.calculation = self._parallel_calculation

        # remaining options are passed down to calculations
        self.ctx.options = options_dict
Example #12
0
    def _set_extra_vasp_parameters(self):
        """
        Find if there are any extra parameters that are not part of the INCAR that needs to be set.

        One example is the dynamic namespace which handles for instance flags for selective dynamics.
        These flags are more connected to a calculation than a StructureData and thus it was necessary
        to make sure it was valid input to the VASP workchain.

        """
        # return if dynamics is not supplied
        if 'dynamics' not in self._parameters:
            return

        if self._parameters.dynamics:
            self._massage.dynamics = AttributeDict()

        for key, item in self._parameters.dynamics.items():
            key = key.lower()
            if key in ['positions_dof']:
                self._massage.dynamics[key] = item
            else:
                warn(f"Key {key} is not supported for 'dynamics' input.")
Example #13
0
    def prepare_calculation(self):
        """Prepare all the neccessary input links to run the calculation"""
        self.ctx.inputs = AttributeDict({
            'code': self.inputs.code,
            'structure': self.ctx.structure,
            '_options': self.ctx.options,
        })

        # restart from the previous calculation only if the necessary data are
        # provided
        if self.ctx.restart_calc:
            self.ctx.inputs['parent_folder'] = self.ctx.restart_calc
            self.ctx.parameters['FORCE_EVAL']['DFT']['SCF'][
                'SCF_GUESS'] = 'RESTART'
        else:
            self.ctx.parameters['FORCE_EVAL']['DFT']['SCF'][
                'SCF_GUESS'] = 'ATOMIC'

        # use the new parameters
        p = ParameterData(dict=self.ctx.parameters)
        p.store()
        self.ctx.inputs['parameters'] = p
Example #14
0
def deserialize_data(data):
    """
    Deserialize a single value or a collection that may contain serialized AiiDA nodes. This is
    essentially the inverse operation of serialize_data which will reload node instances from
    the serialized UUID data. Encoded tuples that are used as dictionary keys will be decoded.

    :param data: serialized data
    :return: the deserialized data with keys decoded and node instances loaded from UUID's
    """
    if isinstance(data, AttributeDict):
        return AttributeDict({
            decode_key(key): deserialize_data(value)
            for key, value in data.iteritems()
        })
    elif isinstance(data, AttributesFrozendict):
        return AttributesFrozendict({
            decode_key(key): deserialize_data(value)
            for key, value in data.iteritems()
        })
    elif isinstance(data, collections.Mapping):
        return {
            decode_key(key): deserialize_data(value)
            for key, value in data.iteritems()
        }
    elif isinstance(data, collections.Sequence) and not isinstance(
            data, (str, unicode)):
        return [deserialize_data(value) for value in data]
    elif isinstance(data,
                    (str, unicode)) and data.startswith(_PREFIX_VALUE_NODE):
        return load_node(uuid=data[len(_PREFIX_VALUE_NODE):])
    elif isinstance(data,
                    (str, unicode)) and data.startswith(_PREFIX_VALUE_GROUP):
        return load_group(uuid=data[len(_PREFIX_VALUE_GROUP):])
    elif isinstance(data,
                    (str, unicode)) and data.startswith(_PREFIX_VALUE_UUID):
        return uuid.UUID(data[len(_PREFIX_VALUE_UUID):])
    else:
        return data
Example #15
0
    def prepare_calculation(self):
        """Prepare all the neccessary input links to run the calculation"""
        self.ctx.inputs = AttributeDict({
            'code': self.inputs.code,
            'structure': self.ctx.structure,
            '_options': self.ctx.options,
        })

        if self.ctx.restart_calc is not None:
            self.ctx.inputs['retrieved_parent_folder'] = self.ctx.restart_calc

        if self.ctx.block_component_0 is not None:
            self.ctx.inputs['block_component_0'] = self.ctx.block_component_0

        # Reading the CutOff, compute the UnitCells expansion
        cutoff = self.ctx.parameters['GeneralSettings']['CutOff']
        ucs = multiply_unit_cell(self.inputs.structure, cutoff * 2)
        self.ctx.parameters['GeneralSettings'][
            'UnitCells'] = "{} {} {}".format(ucs[0], ucs[1], ucs[2])
        # use the new parameters
        p = ParameterData(dict=self.ctx.parameters)
        p.store()
        self.ctx.inputs['parameters'] = p
Example #16
0
 def _add_overrides(self, incar):
     """Add incar tag overrides, except the ones controlled by other inputs (for provenance)."""
     overrides = AttributeDict({
         k.lower(): v
         for k, v in self.inputs.incar_add.get_dict().items()
     })
     if 'ibrion' in overrides:
         raise ValueError(
             'overriding IBRION not allowed, use relax.xxx inputs to control'
         )
     if 'isif' in overrides:
         raise ValueError(
             'overriding ISIF not allowed, use relax.xxx inputs to control')
     if 'nsw' in overrides:
         if self.inputs.relax.positions.value and overrides.nsw < 1:
             raise ValueError(
                 'NSW (num ionic steps) was set to 0 but relaxing positions was requested'
             )
         elif not self.inputs.relax.positions.value and overrides.nsw > 0:
             self.report(
                 'NSW (num ionic steps) > 1 but relaxing positions was not requested '
                 '(ionic steps will be performed but ions will not move)')
     incar.update(overrides)
Example #17
0
def serialize_data(data):
    """
    Serialize a value or collection that may potentially contain AiiDA nodes, which
    will be serialized to their UUID. Keys encountered in any mappings, such as a dictionary,
    will also be encoded if necessary. An example is where tuples are used as keys in the
    pseudo potential input dictionaries. These operations will ensure that the returned data is
    JSON serializable.

    :param data: a single value or collection
    :return: the serialized data with the same internal structure
    """
    if isinstance(data, Node):
        return '{}{}'.format(_PREFIX_VALUE_NODE, data.uuid)
    elif isinstance(data, Group):
        return '{}{}'.format(_PREFIX_VALUE_GROUP, data.uuid)
    elif isinstance(data, uuid.UUID):
        return '{}{}'.format(_PREFIX_VALUE_UUID, data)
    elif isinstance(data, AttributeDict):
        return AttributeDict({
            encode_key(key): serialize_data(value)
            for key, value in data.iteritems()
        })
    elif isinstance(data, AttributesFrozendict):
        return AttributesFrozendict({
            encode_key(key): serialize_data(value)
            for key, value in data.iteritems()
        })
    elif isinstance(data, collections.Mapping):
        return {
            encode_key(key): serialize_data(value)
            for key, value in data.iteritems()
        }
    elif isinstance(data, collections.Sequence) and not isinstance(
            data, (str, unicode)):
        return [serialize_data(value) for value in data]
    else:
        return data
Example #18
0
    def run_relax(self):
        """
        Run the PwBaseWorkChain to run a relax PwCalculation
        """
        self.ctx.iteration += 1

        inputs = AttributeDict(
            self.exposed_inputs(PwBaseWorkChain, namespace='base'))
        inputs.structure = self.ctx.current_structure
        inputs.parameters = inputs.parameters.get_dict()

        inputs.parameters.setdefault('CONTROL', {})
        inputs.parameters['CONTROL'][
            'calculation'] = self.inputs.relaxation_scheme.value

        # Do not clean workdirs of sub workchains, because then we won't be able to restart from them
        inputs.pop('clean_workdir', None)

        inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
        running = self.submit(PwBaseWorkChain, **inputs)

        self.report('launching PwBaseWorkChain<{}>'.format(running.pk))

        return ToContext(workchains=append_(running))
Example #19
0
    def validate_inputs(self):
        """Make sure all the required inputs are there and valid, create input dictionary for calculation."""
        self.ctx.inputs = AttributeDict()
        self.ctx.inputs.code = self.inputs.code
        self.ctx.inputs.structure = self.inputs.structure
        self.ctx.inputs.kpoints = self.inputs.kpoints
        self.ctx.inputs.parameters = self.inputs.incar
        if 'settings' in self.inputs:
            self.ctx.inputs.settings = self.inputs.settings

        ## Verify options
        options = AttributeDict()
        options.computer = self.inputs.code.get_computer()
        options.update(self.inputs.options.get_dict())
        expected_options = ['computer', 'resources']
        if options.computer.get_scheduler_type() != 'direct':
            expected_options.append('queue_name')
        for option in expected_options:
            if option not in options:
                self._fail_compat(exception=ValueError(
                    'option {} required but not passed!'.format(option)))
        if builder_interface(CalculationFactory(
                'vasp.vasp')):  ## aiida 1.0.0+ will use this
            self.ctx.inputs.options = options
        else:
            self.ctx.inputs._options = options  ## pylint: disable=protected-access

        ## Verify potcars
        try:
            self.ctx.inputs.potential = get_data_class(
                'vasp.potcar').get_potcars_from_structure(
                    structure=self.inputs.structure,
                    family_name=self.inputs.potcar_family.value,
                    mapping=self.inputs.potcar_mapping.get_dict())
        except ValueError as err:
            self._fail_compat(exception=err)
        except NotExistent as err:
            self._fail_compat(exception=err)
Example #20
0
def test_validate_input(test_properties_code, properties_calc_parameters,
                        test_wavefunction):
    from aiida.common.extendeddicts import AttributeDict
    from aiida_crystal_dft.calculations.properties import PropertiesCalculation
    inputs = AttributeDict()
    with pytest.raises(ValueError):
        PropertiesCalculation(inputs)
    inputs.metadata = {
        'options': {
            'resources': {
                'num_machines': 1,
                'num_mpiprocs_per_machine': 1
            }
        }
    }
    inputs.code = test_properties_code
    with pytest.raises(ValueError):
        PropertiesCalculation(inputs)
    inputs.wavefunction = test_wavefunction
    with pytest.raises(ValueError):
        PropertiesCalculation(inputs)
    inputs.parameters = properties_calc_parameters
    assert PropertiesCalculation(inputs)
Example #21
0
def compare_structures(structure_a, structure_b):
    """Compare two StructreData objects A, B and return a delta (A - B) of the relevant properties."""

    delta = AttributeDict()
    delta.absolute = AttributeDict()
    delta.relative = AttributeDict()
    volume_a = structure_a.get_cell_volume()
    volume_b = structure_b.get_cell_volume()
    delta.absolute.volume = np.absolute(volume_a - volume_b)
    delta.relative.volume = np.absolute(volume_a - volume_b) / volume_a

    pos_a = np.array([site.position for site in structure_a.sites])
    pos_b = np.array([site.position for site in structure_b.sites])
    delta.absolute.pos = pos_a - pos_b

    site_vectors = [
        delta.absolute.pos[i, :] for i in range(delta.absolute.pos.shape[0])
    ]
    a_lengths = np.linalg.norm(pos_a, axis=1)
    delta.absolute.pos_lengths = np.array(
        [np.linalg.norm(vector) for vector in site_vectors])
    delta.relative.pos_lengths = np.array(
        [np.linalg.norm(vector) for vector in site_vectors]) / a_lengths

    cell_lengths_a = np.array(structure_a.cell_lengths)
    delta.absolute.cell_lengths = np.absolute(
        cell_lengths_a - np.array(structure_b.cell_lengths))
    delta.relative.cell_lengths = np.absolute(
        cell_lengths_a - np.array(structure_b.cell_lengths)) / cell_lengths_a

    cell_angles_a = np.array(structure_a.cell_angles)
    delta.absolute.cell_angles = np.absolute(cell_angles_a -
                                             np.array(structure_b.cell_angles))
    delta.relative.cell_angles = np.absolute(
        cell_angles_a - np.array(structure_b.cell_angles)) / cell_angles_a

    return delta
Example #22
0
    def validate_inputs(self):
        """
        Validate inputs that depend might depend on each other and cannot be validated by the spec. Also define
        dictionary `inputs` in the context, that will contain the inputs for the calculation that will be launched
        in the `run_calculation` step.
        """
        self.ctx.inputs = AttributeDict({
            'code': self.inputs.code,
            'structure': self.inputs.structure,
            'kpoints': self.inputs.kpoints,
            'parameters': self.inputs.parameters.get_dict()
        })

        if 'CONTROL'not in self.ctx.inputs.parameters:
            self.ctx.inputs.parameters['CONTROL'] = {}

        if 'calculation' not in self.ctx.inputs.parameters['CONTROL']:
            self.ctx.inputs.parameters['CONTROL']['calculation'] = 'scf'

        if 'parent_folder' in self.inputs:
            self.ctx.inputs.parent_folder = self.inputs.parent_folder
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'restart'
        else:
            self.ctx.inputs.parent_folder = None
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'from_scratch'

        if 'settings' in self.inputs:
            self.ctx.inputs.settings = self.inputs.settings.get_dict()
        else:
            self.ctx.inputs.settings = {}

        if 'options' in self.inputs:
            self.ctx.inputs._options = self.inputs.options.get_dict()
        else:
            self.ctx.inputs._options = {}

        if 'vdw_table' in self.inputs:
            self.ctx.inputs.vdw_table = self.inputs.vdw_table

        # Either automatic_parallelization or options has to be specified
        if not any([key in self.inputs for key in ['options', 'automatic_parallelization']]):
            self.abort_nowait('you have to specify either the options or automatic_parallelization input')
            return

        # If automatic parallelization is not enabled, we better make sure that the options satisfy minimum requirements
        if 'automatic_parallelization' not in self.inputs:
            num_machines = self.ctx.inputs['_options'].get('resources', {}).get('num_machines', None)
            max_wallclock_seconds = self.ctx.inputs['_options'].get('max_wallclock_seconds', None)

            if num_machines is None or max_wallclock_seconds is None:
                self.abort_nowait("no automatic_parallelization requested, but the options do not specify both '{}' and '{}'"
                    .format('num_machines', 'max_wallclock_seconds'))

        # Validate the inputs related to pseudopotentials
        structure = self.inputs.structure
        pseudos = self.inputs.get('pseudos', None)
        pseudo_family = self.inputs.get('pseudo_family', None)

        try:
            self.ctx.inputs.pseudo = validate_and_prepare_pseudos_inputs(structure, pseudos, pseudo_family)
        except ValueError as exception:
            self.abort_nowait('{}'.format(exception))
Example #23
0
class PwBaseWorkChain(BaseRestartWorkChain):
    """
    Base workchain to launch a Quantum Espresso pw.x calculation
    """
    _verbose = True
    _calculation_class = PwCalculation
    _error_handler_entry_point = 'aiida_quantumespresso.workflow_error_handlers.pw.base'

    defaults = AttributeDict({
        'qe': qe_defaults,
        'delta_threshold_degauss': 30,
        'delta_factor_degauss': 0.1,
        'delta_factor_mixing_beta': 0.8,
        'delta_factor_max_seconds': 0.95,
    })

    @classmethod
    def define(cls, spec):
        super(PwBaseWorkChain, cls).define(spec)
        spec.input('code', valid_type=Code)
        spec.input('structure', valid_type=StructureData)
        spec.input('kpoints', valid_type=KpointsData)
        spec.input('parameters', valid_type=ParameterData)
        spec.input_group('pseudos', required=False)
        spec.input('pseudo_family', valid_type=Str, required=False)
        spec.input('parent_folder', valid_type=RemoteData, required=False)
        spec.input('vdw_table', valid_type=SinglefileData, required=False)
        spec.input('settings', valid_type=ParameterData, required=False)
        spec.input('options', valid_type=ParameterData, required=False)
        spec.input('automatic_parallelization', valid_type=ParameterData, required=False)
        spec.outline(
            cls.setup,
            cls.validate_inputs,
            if_(cls.should_run_init)(
                cls.validate_init_inputs,
                cls.run_init,
                cls.inspect_init,
            ),
            while_(cls.should_run_calculation)(
                cls.prepare_calculation,
                cls.run_calculation,
                cls.inspect_calculation,
            ),
            cls.results,
        )
        spec.output('output_array', valid_type=ArrayData, required=False)
        spec.output('output_band', valid_type=BandsData, required=False)
        spec.output('output_structure', valid_type=StructureData, required=False)
        spec.output('output_parameters', valid_type=ParameterData)
        spec.output('remote_folder', valid_type=RemoteData)
        spec.output('retrieved', valid_type=FolderData)

    def validate_inputs(self):
        """
        Validate inputs that depend might depend on each other and cannot be validated by the spec. Also define
        dictionary `inputs` in the context, that will contain the inputs for the calculation that will be launched
        in the `run_calculation` step.
        """
        self.ctx.inputs = AttributeDict({
            'code': self.inputs.code,
            'structure': self.inputs.structure,
            'kpoints': self.inputs.kpoints,
            'parameters': self.inputs.parameters.get_dict()
        })

        if 'CONTROL'not in self.ctx.inputs.parameters:
            self.ctx.inputs.parameters['CONTROL'] = {}

        if 'calculation' not in self.ctx.inputs.parameters['CONTROL']:
            self.ctx.inputs.parameters['CONTROL']['calculation'] = 'scf'

        if 'parent_folder' in self.inputs:
            self.ctx.inputs.parent_folder = self.inputs.parent_folder
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'restart'
        else:
            self.ctx.inputs.parent_folder = None
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'from_scratch'

        if 'settings' in self.inputs:
            self.ctx.inputs.settings = self.inputs.settings.get_dict()
        else:
            self.ctx.inputs.settings = {}

        if 'options' in self.inputs:
            self.ctx.inputs._options = self.inputs.options.get_dict()
        else:
            self.ctx.inputs._options = {}

        if 'vdw_table' in self.inputs:
            self.ctx.inputs.vdw_table = self.inputs.vdw_table

        # Either automatic_parallelization or options has to be specified
        if not any([key in self.inputs for key in ['options', 'automatic_parallelization']]):
            self.abort_nowait('you have to specify either the options or automatic_parallelization input')
            return

        # If automatic parallelization is not enabled, we better make sure that the options satisfy minimum requirements
        if 'automatic_parallelization' not in self.inputs:
            num_machines = self.ctx.inputs['_options'].get('resources', {}).get('num_machines', None)
            max_wallclock_seconds = self.ctx.inputs['_options'].get('max_wallclock_seconds', None)

            if num_machines is None or max_wallclock_seconds is None:
                self.abort_nowait("no automatic_parallelization requested, but the options do not specify both '{}' and '{}'"
                    .format('num_machines', 'max_wallclock_seconds'))

        # Validate the inputs related to pseudopotentials
        structure = self.inputs.structure
        pseudos = self.inputs.get('pseudos', None)
        pseudo_family = self.inputs.get('pseudo_family', None)

        try:
            self.ctx.inputs.pseudo = validate_and_prepare_pseudos_inputs(structure, pseudos, pseudo_family)
        except ValueError as exception:
            self.abort_nowait('{}'.format(exception))

    def should_run_init(self):
        """
        Return whether an initialization calculation should be run, which is the case if the user wants
        to use automatic parallelization and has specified the ParameterData node in the inputs
        """
        return 'automatic_parallelization' in self.inputs

    def validate_init_inputs(self):
        """
        Validate the inputs that are required for the initialization calculation. The automatic_parallelization
        input expects a ParameterData node with the following keys:

            * max_wallclock_seconds
            * target_time_seconds
            * max_num_machines

        If any of these keys are not set or any superfluous keys are specified, the workchain will abort.
        """
        parallelization = self.inputs.automatic_parallelization.get_dict()

        expected_keys = ['max_wallclock_seconds', 'target_time_seconds', 'max_num_machines']
        received_keys = [(key, parallelization.get(key, None)) for key in expected_keys]
        remaining_keys = [key for key in parallelization.keys() if key not in expected_keys]

        for k, v in [(key, value) for key, value in received_keys if value is None]:
            self.abort_nowait('required key "{}" in automatic_parallelization input not found'.format(k))
            return

        if remaining_keys:
            self.abort_nowait('detected unrecognized keys in the automatic_parallelization input: {}'
                .format(' '.join(remaining_keys)))
            return

        # Add the calculation mode to the automatic parallelization dictionary
        self.ctx.automatic_parallelization = {
            'max_wallclock_seconds': parallelization['max_wallclock_seconds'],
            'target_time_seconds': parallelization['target_time_seconds'],
            'max_num_machines': parallelization['max_num_machines'],
            'calculation_mode': self.ctx.inputs.parameters['CONTROL']['calculation']
        }

        self.ctx.inputs._options.setdefault('resources', {})['num_machines'] = parallelization['max_num_machines']
        self.ctx.inputs._options['max_wallclock_seconds'] = parallelization['max_wallclock_seconds']

    def run_init(self):
        """
        Run a first dummy pw calculation that will exit straight after initialization. At that
        point it will have generated some general output pertaining to the dimensions of the
        calculation which we can use to distribute available computational resources
        """
        inputs = self.ctx.inputs

        # Set the initialization flag and the initial default options
        inputs.settings['ONLY_INITIALIZATION'] = True
        inputs._options = update_mapping(inputs['_options'], get_default_options())

        # Prepare the final input dictionary
        inputs = self._prepare_process_inputs(inputs)
        process = PwCalculation.process()
        running = submit(process, **inputs)

        self.report('launching initialization PwCalculation<{}>'.format(running.pid))

        return ToContext(calculation_init=running)

    def inspect_init(self):
        """
        Use the initialization PwCalculation to determine the required resource settings for the
        requested calculation based on the settings in the automatic_parallelization input
        """
        calculation = self.ctx.calculation_init

        if not calculation.has_finished_ok():
            self.abort_nowait('the initialization calculation did not finish successfully')
            return

        # Get automated parallelization settings
        parallelization = get_pw_parallelization_parameters(calculation, **self.ctx.automatic_parallelization)

        node = ParameterData(dict=parallelization)
        self.out('automatic_parallelization', node)
        self.report('results of automatic parallelization in {}<{}>'.format(node.__class__.__name__, node.pk))

        options = self.ctx.inputs._options
        base_resources = options.get('resources', {})
        goal_resources = parallelization['resources']

        scheduler = calculation.get_computer().get_scheduler()
        resources = create_scheduler_resources(scheduler, base_resources, goal_resources)

        cmdline = self.ctx.inputs.settings.get('cmdline', [])
        cmdline = cmdline_remove_npools(cmdline)
        cmdline.extend(['-nk', str(parallelization['npools'])])

        # Set the new cmdline setting and resource options
        self.ctx.inputs.settings['cmdline'] = cmdline
        self.ctx.inputs._options = update_mapping(options, {'resources': resources})

        # Remove the only initialization flag
        self.ctx.inputs.settings.pop('ONLY_INITIALIZATION')

        return

    def prepare_calculation(self):
        """
        Prepare the inputs for the next calculation
        """
        if self.ctx.restart_calc:
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'restart'
            self.ctx.inputs.parent_folder = self.ctx.restart_calc.out.remote_folder

    def _prepare_process_inputs(self, inputs):
        """
        The 'max_seconds' setting in the 'CONTROL' card of the parameters will be set to a fraction of the
        'max_wallclock_seconds' that will be given to the job via the '_options' dictionary. This will prevent the job
        from being prematurely terminated by the scheduler without getting the chance to exit cleanly.
        """
        max_wallclock_seconds = inputs._options['max_wallclock_seconds']
        max_seconds_factor = self.defaults.delta_factor_max_seconds
        max_seconds = max_wallclock_seconds * max_seconds_factor
        inputs.parameters['CONTROL']['max_seconds'] = max_seconds

        return super(PwBaseWorkChain, self)._prepare_process_inputs(inputs)
Example #24
0
 def __init__(self, config, profile, **kwargs):
     """Construct an instance and define the `obj` dictionary that is required by the `Context`."""
     super().__init__(**kwargs)
     self.obj = AttributeDict({'config': config, 'profile': profile})
Example #25
0
calc.use_structure(structure)
calc.use_parameters(Dict(dict=dynaphopy_parameters))
calc.use_force_constants(force_constants)
calc.use_trajectory(trajectory)

calc.store_all()

calc.submit()
print("submitted calculation with PK={}".format(calc.dbnode.pk))

LammpsOptimizeCalculation = CalculationFactory('lammps.optimize')
inputs = LammpsOptimizeCalculation.get_builder()

# Computer options
options = AttributeDict()
options.account = ''
options.qos = ''
options.resources = {
    'num_machines': 1,
    'num_mpiprocs_per_machine': 1,
    'parallel_env': 'localmpi',
    'tot_num_mpiprocs': 1
}
#options.queue_name = 'iqtc04.q'
options.max_wallclock_seconds = 3600
inputs.metadata.options = options

# Setup code
inputs.code = Code.get_from_string(codename)
Example #26
0
 def _init_context(self):
     """Initialize context variables that are used during the logical flow."""
     self.ctx.exit_code = self.exit_codes.ERROR_UNKNOWN  # pylint: disable=no-member
     self.ctx.is_finished = False
     self.ctx.iteration = 0
     self.ctx.inputs = AttributeDict()
Example #27
0
class PwBaseWorkChain(BaseRestartWorkChain):
    """Workchain to run a Quantum ESPRESSO pw.x calculation with automated error handling and restarts"""

    _calculation_class = PwCalculation
    _error_handler_entry_point = 'aiida_quantumespresso.workflow_error_handlers.pw.base'

    defaults = AttributeDict({
        'qe': qe_defaults,
        'delta_threshold_degauss': 30,
        'delta_factor_degauss': 0.1,
        'delta_factor_mixing_beta': 0.8,
        'delta_factor_max_seconds': 0.95,
    })

    @classmethod
    def define(cls, spec):
        super(PwBaseWorkChain, cls).define(spec)
        spec.input('code', valid_type=Code)
        spec.input('structure', valid_type=StructureData)
        spec.input('kpoints', valid_type=KpointsData, required=False)
        spec.input('kpoints_distance', valid_type=Float, required=False)
        spec.input('kpoints_force_parity', valid_type=Bool, required=False)
        spec.input('parameters', valid_type=ParameterData)
        spec.input_namespace('pseudos', required=False)
        spec.input('pseudo_family', valid_type=Str, required=False)
        spec.input('parent_folder', valid_type=RemoteData, required=False)
        spec.input('vdw_table', valid_type=SinglefileData, required=False)
        spec.input('settings', valid_type=ParameterData, required=False)
        spec.input('options', valid_type=ParameterData, required=False)
        spec.input('automatic_parallelization',
                   valid_type=ParameterData,
                   required=False)
        spec.outline(
            cls.setup,
            cls.validate_inputs,
            if_(cls.should_run_init)(
                cls.validate_init_inputs,
                cls.run_init,
                cls.inspect_init,
            ),
            while_(cls.should_run_calculation)(
                cls.prepare_calculation,
                cls.run_calculation,
                cls.inspect_calculation,
            ),
            cls.results,
        )
        spec.exit_code(
            301,
            'ERROR_INVALID_INPUT_PSEUDO_POTENTIALS',
            message=
            "the explicitly passed 'pseudos' or 'pseudo_family' input could not be used to get the necessary potentials"
        )
        spec.exit_code(
            302,
            'ERROR_INVALID_INPUT_KPOINTS',
            message=
            "neither the 'kpoints' nor the 'kpoints_distance' input was specified"
        )
        spec.exit_code(
            303,
            'ERROR_INVALID_INPUT_RESOURCES',
            message=
            "neither the 'options' nor 'automatic_parallelization' input was specified"
        )
        spec.exit_code(
            304,
            'ERROR_INVALID_INPUT_RESOURCES_UNDERSPECIFIED',
            message=
            "the 'options' do not specify both 'num_machines' and 'max_wallclock_seconds'"
        )
        spec.exit_code(
            310,
            'ERROR_INVALID_INPUT_AUTOMATIC_PARALLELIZATION_MISSING_KEY',
            message=
            "required key for 'automatic_parallelization' was not specified")
        spec.exit_code(
            311,
            'ERROR_INVALID_INPUT_AUTOMATIC_PARALLELIZATION_UNRECOGNIZED_KEY',
            message=
            "unrecognized keys were specified for 'automatic_parallelization'")
        spec.exit_code(401,
                       'ERROR_INITIALIZATION_CALCULATION_FAILED',
                       message='the initialization calculation failed')
        spec.exit_code(
            402,
            'ERROR_CALCULATION_INVALID_INPUT_FILE',
            message=
            'the calculation failed because it had an invalid input file')
        spec.output('output_array', valid_type=ArrayData, required=False)
        spec.output('output_band', valid_type=BandsData, required=False)
        spec.output('output_structure',
                    valid_type=StructureData,
                    required=False)
        spec.output('output_parameters', valid_type=ParameterData)
        spec.output('remote_folder', valid_type=RemoteData)

    def validate_inputs(self):
        """
        Validate inputs that depend might depend on each other and cannot be validated by the spec. Also define
        dictionary `inputs` in the context, that will contain the inputs for the calculation that will be launched
        in the `run_calculation` step.
        """
        self.ctx.inputs = AttributeDict({
            'code':
            self.inputs.code,
            'structure':
            self.inputs.structure,
            'parameters':
            self.inputs.parameters.get_dict()
        })

        if 'CONTROL' not in self.ctx.inputs.parameters:
            self.ctx.inputs.parameters['CONTROL'] = {}

        if 'calculation' not in self.ctx.inputs.parameters['CONTROL']:
            self.ctx.inputs.parameters['CONTROL']['calculation'] = 'scf'

        if 'parent_folder' in self.inputs:
            self.ctx.inputs.parent_folder = self.inputs.parent_folder
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'restart'
        else:
            self.ctx.inputs.parameters['CONTROL'][
                'restart_mode'] = 'from_scratch'

        if 'settings' in self.inputs:
            self.ctx.inputs.settings = self.inputs.settings.get_dict()
        else:
            self.ctx.inputs.settings = {}

        if 'options' in self.inputs:
            self.ctx.inputs.options = self.inputs.options.get_dict()
        else:
            self.ctx.inputs.options = {}

        if 'vdw_table' in self.inputs:
            self.ctx.inputs.vdw_table = self.inputs.vdw_table

        # Either automatic_parallelization or options has to be specified
        if not any([
                key in self.inputs
                for key in ['options', 'automatic_parallelization']
        ]):
            return self.exit_codes.ERROR_INVALID_INPUT_RESOURCES

        # If automatic parallelization is not enabled, we better make sure that the options satisfy minimum requirements
        if 'automatic_parallelization' not in self.inputs:
            num_machines = self.ctx.inputs.options.get('resources', {}).get(
                'num_machines', None)
            max_wallclock_seconds = self.ctx.inputs.options.get(
                'max_wallclock_seconds', None)

            if num_machines is None or max_wallclock_seconds is None:
                return self.exit_codes.ERROR_INVALID_INPUT_RESOURCES_UNDERSPECIFIED

        # Either a KpointsData with given mesh/path, or a desired distance between k-points should be specified
        if all([
                key not in self.inputs
                for key in ['kpoints', 'kpoints_distance']
        ]):
            return self.exit_codes.ERROR_INVALID_INPUT_KPOINTS

        try:
            self.ctx.inputs.kpoints = self.inputs.kpoints
        except AttributeError:
            structure = self.inputs.structure
            distance = self.inputs.kpoints_distance
            force_parity = self.inputs.get('kpoints_force_parity', Bool(False))
            self.ctx.inputs.kpoints = create_kpoints_from_distance(
                structure, distance, force_parity)

        # Validate the inputs related to pseudopotentials
        structure = self.inputs.structure
        pseudos = self.inputs.get('pseudos', None)
        pseudo_family = self.inputs.get('pseudo_family', None)

        try:
            self.ctx.inputs.pseudo = validate_and_prepare_pseudos_inputs(
                structure, pseudos, pseudo_family)
        except ValueError as exception:
            self.report('{}'.format(exception))
            return self.exit_codes.ERROR_INVALID_INPUT_PSEUDO_POTENTIALS

    def should_run_init(self):
        """
        Return whether an initialization calculation should be run, which is the case if the user wants
        to use automatic parallelization and has specified the ParameterData node in the inputs
        """
        return 'automatic_parallelization' in self.inputs

    def validate_init_inputs(self):
        """
        Validate the inputs that are required for the initialization calculation. The automatic_parallelization
        input expects a ParameterData node with the following keys:

            * max_wallclock_seconds
            * target_time_seconds
            * max_num_machines

        If any of these keys are not set or any superfluous keys are specified, the workchain will abort.
        """
        parallelization = self.inputs.automatic_parallelization.get_dict()

        expected_keys = [
            'max_wallclock_seconds', 'target_time_seconds', 'max_num_machines'
        ]
        received_keys = [(key, parallelization.get(key, None))
                         for key in expected_keys]
        remaining_keys = [
            key for key in parallelization.keys() if key not in expected_keys
        ]

        for k, v in [(key, value) for key, value in received_keys
                     if value is None]:
            self.report(
                'required key "{}" in automatic_parallelization input not found'
                .format(k))
            return self.exit_codes.ERROR_INVALID_INPUT_AUTOMATIC_PARALLELIZATION_MISSING_KEY

        if remaining_keys:
            self.report(
                'detected unrecognized keys in the automatic_parallelization input: {}'
                .format(' '.join(remaining_keys)))
            return self.exit_codes.ERROR_INVALID_INPUT_AUTOMATIC_PARALLELIZATION_UNRECOGNIZED_KEY

        # Add the calculation mode to the automatic parallelization dictionary
        self.ctx.automatic_parallelization = {
            'max_wallclock_seconds':
            parallelization['max_wallclock_seconds'],
            'target_time_seconds':
            parallelization['target_time_seconds'],
            'max_num_machines':
            parallelization['max_num_machines'],
            'calculation_mode':
            self.ctx.inputs.parameters['CONTROL']['calculation']
        }

        self.ctx.inputs.options.setdefault(
            'resources',
            {})['num_machines'] = parallelization['max_num_machines']
        self.ctx.inputs.options['max_wallclock_seconds'] = parallelization[
            'max_wallclock_seconds']

    def run_init(self):
        """
        Run a first dummy pw calculation that will exit straight after initialization. At that
        point it will have generated some general output pertaining to the dimensions of the
        calculation which we can use to distribute available computational resources
        """
        inputs = self.ctx.inputs

        # Set the initialization flag and the initial default options
        inputs.settings['ONLY_INITIALIZATION'] = True
        inputs.options = update_mapping(inputs['options'],
                                        get_default_options())

        # Prepare the final input dictionary
        process = PwCalculation.process()
        inputs = self._prepare_process_inputs(process, inputs)
        running = self.submit(process, **inputs)

        self.report('launching initialization PwCalculation<{}>'.format(
            running.pk))

        return ToContext(calculation_init=running)

    def inspect_init(self):
        """
        Use the initialization PwCalculation to determine the required resource settings for the
        requested calculation based on the settings in the automatic_parallelization input
        """
        calculation = self.ctx.calculation_init

        if not calculation.is_finished_ok:
            return self.exit_codes.ERROR_INITIALIZATION_CALCULATION_FAILED

        # Get automated parallelization settings
        parallelization = get_pw_parallelization_parameters(
            calculation, **self.ctx.automatic_parallelization)

        node = ParameterData(dict=parallelization)
        self.out('automatic_parallelization', node)
        self.report('results of automatic parallelization in {}<{}>'.format(
            node.__class__.__name__, node.pk))

        options = self.ctx.inputs.options
        base_resources = options.get('resources', {})
        goal_resources = parallelization['resources']

        scheduler = calculation.get_computer().get_scheduler()
        resources = create_scheduler_resources(scheduler, base_resources,
                                               goal_resources)

        cmdline = self.ctx.inputs.settings.get('cmdline', [])
        cmdline = cmdline_remove_npools(cmdline)
        cmdline.extend(['-nk', str(parallelization['npools'])])

        # Set the new cmdline setting and resource options
        self.ctx.inputs.settings['cmdline'] = cmdline
        self.ctx.inputs.options = update_mapping(options,
                                                 {'resources': resources})

        # Remove the only initialization flag
        self.ctx.inputs.settings.pop('ONLY_INITIALIZATION')

        return

    def prepare_calculation(self):
        """
        Prepare the inputs for the next calculation
        """
        if self.ctx.restart_calc:
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'restart'
            self.ctx.inputs.parent_folder = self.ctx.restart_calc.out.remote_folder

    def _prepare_process_inputs(self, process, inputs):
        """
        The 'max_seconds' setting in the 'CONTROL' card of the parameters will be set to a fraction of the
        'max_wallclock_seconds' that will be given to the job via the 'options' dictionary. This will prevent the job
        from being prematurely terminated by the scheduler without getting the chance to exit cleanly.
        """
        max_wallclock_seconds = inputs.options['max_wallclock_seconds']
        max_seconds_factor = self.defaults.delta_factor_max_seconds
        max_seconds = max_wallclock_seconds * max_seconds_factor
        inputs.parameters['CONTROL']['max_seconds'] = max_seconds

        return super(PwBaseWorkChain,
                     self)._prepare_process_inputs(process, inputs)
Example #28
0
    def validate_inputs(self):
        """
        Validate inputs that depend might depend on each other and cannot be validated by the spec. Also define
        dictionary `inputs` in the context, that will contain the inputs for the calculation that will be launched
        in the `run_calculation` step.
        """
        self.ctx.inputs = AttributeDict({
            'code':
            self.inputs.code,
            'structure':
            self.inputs.structure,
            'parameters':
            self.inputs.parameters.get_dict()
        })

        if 'CONTROL' not in self.ctx.inputs.parameters:
            self.ctx.inputs.parameters['CONTROL'] = {}

        if 'calculation' not in self.ctx.inputs.parameters['CONTROL']:
            self.ctx.inputs.parameters['CONTROL']['calculation'] = 'scf'

        if 'parent_folder' in self.inputs:
            self.ctx.inputs.parent_folder = self.inputs.parent_folder
            self.ctx.inputs.parameters['CONTROL']['restart_mode'] = 'restart'
        else:
            self.ctx.inputs.parameters['CONTROL'][
                'restart_mode'] = 'from_scratch'

        if 'settings' in self.inputs:
            self.ctx.inputs.settings = self.inputs.settings.get_dict()
        else:
            self.ctx.inputs.settings = {}

        if 'options' in self.inputs:
            self.ctx.inputs.options = self.inputs.options.get_dict()
        else:
            self.ctx.inputs.options = {}

        if 'vdw_table' in self.inputs:
            self.ctx.inputs.vdw_table = self.inputs.vdw_table

        # Either automatic_parallelization or options has to be specified
        if not any([
                key in self.inputs
                for key in ['options', 'automatic_parallelization']
        ]):
            return self.exit_codes.ERROR_INVALID_INPUT_RESOURCES

        # If automatic parallelization is not enabled, we better make sure that the options satisfy minimum requirements
        if 'automatic_parallelization' not in self.inputs:
            num_machines = self.ctx.inputs.options.get('resources', {}).get(
                'num_machines', None)
            max_wallclock_seconds = self.ctx.inputs.options.get(
                'max_wallclock_seconds', None)

            if num_machines is None or max_wallclock_seconds is None:
                return self.exit_codes.ERROR_INVALID_INPUT_RESOURCES_UNDERSPECIFIED

        # Either a KpointsData with given mesh/path, or a desired distance between k-points should be specified
        if all([
                key not in self.inputs
                for key in ['kpoints', 'kpoints_distance']
        ]):
            return self.exit_codes.ERROR_INVALID_INPUT_KPOINTS

        try:
            self.ctx.inputs.kpoints = self.inputs.kpoints
        except AttributeError:
            structure = self.inputs.structure
            distance = self.inputs.kpoints_distance
            force_parity = self.inputs.get('kpoints_force_parity', Bool(False))
            self.ctx.inputs.kpoints = create_kpoints_from_distance(
                structure, distance, force_parity)

        # Validate the inputs related to pseudopotentials
        structure = self.inputs.structure
        pseudos = self.inputs.get('pseudos', None)
        pseudo_family = self.inputs.get('pseudo_family', None)

        try:
            self.ctx.inputs.pseudo = validate_and_prepare_pseudos_inputs(
                structure, pseudos, pseudo_family)
        except ValueError as exception:
            self.report('{}'.format(exception))
            return self.exit_codes.ERROR_INVALID_INPUT_PSEUDO_POTENTIALS
Example #29
0
def test_relax_wf(fresh_aiida_env, vasp_params, potentials, mock_vasp):
    """Test submitting only, not correctness, with mocked vasp code."""
    from aiida.orm import WorkflowFactory, Code
    from aiida import work

    rmq_config = None
    runner = work.Runner(poll_interval=0.,
                         rmq_config=rmq_config,
                         enable_persistence=True)
    work.set_runner(runner)

    base_wf_proc = WorkflowFactory('vasp.relax')

    mock_vasp.store()
    print(mock_vasp.get_remote_exec_path())
    comp = mock_vasp.get_computer()
    create_authinfo(computer=comp).store()

    structure = PoscarParser(
        file_path=data_path('test_relax_wf', 'inp', 'POSCAR')).get_quantity(
            'poscar-structure', {})['poscar-structure']
    kpoints = KpParser(
        file_path=data_path('test_relax_wf', 'inp', 'KPOINTS')).get_quantity(
            'kpoints-kpoints', {})['kpoints-kpoints']
    incar_add = IncarParser(
        file_path=data_path('test_relax_wf', 'inp', 'INCAR')).get_quantity(
            'incar', {})['incar'].get_dict()
    incar_add = {
        k: v
        for k, v in incar_add.items() if k not in ['isif', 'ibrion']
    }
    incar_add['system'] = 'test-case:test_relax_wf'

    restart_clean_workdir = get_data_node('bool', False)
    restart_clean_workdir.store()

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.incar_add = get_data_node('parameter', dict=incar_add)
    inputs.kpoints = AttributeDict()
    inputs.kpoints.mesh = kpoints
    inputs.potcar_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potcar_mapping = get_data_node('parameter', dict=POTCAR_MAP)
    inputs.options = get_data_node('parameter',
                                   dict={
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       }
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.convergence = AttributeDict()
    inputs.convergence.shape = AttributeDict()
    inputs.convergence.on = get_data_node('bool', True)
    inputs.convergence.positions = get_data_node('float', 0.1)
    inputs.restart = AttributeDict()
    inputs.restart.clean_workdir = restart_clean_workdir
    inputs.relax = AttributeDict()

    results = work.run(base_wf_proc, **inputs)
    assert 'relaxed_structure' in results
Example #30
0
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""Module with default values of parameters used in Quantum ESPRESSO."""
from __future__ import absolute_import

from aiida.common.extendeddicts import AttributeDict

pw = AttributeDict({
    'conv_thr': 1e-6,
    'degauss': 0.,
    'diagonalization': 'david',
    'electron_maxstep': 100,
    'etot_conv_thr': 1e-4,
    'forc_conv_thr': 1e-3,
    'mixing_beta': 0.7,
    'mixing_mode': 'plain',
    'mixing_ndim': 8,
    'noncolin': False,
    'nspin': 1,
    'occupations': None,
    'press': 0.,
    'press_conv_thr': 0.5,
    'smearing': '',
    'startmag': 0.,
    'wf_collect': False,
})