示例#1
0
 def report_wf(self):
     """
     """
     self.report('Final step.')
     from aiida.orm import DataFactory
     try:
         pw = self.ctx.pw_wf_res.out.pw.get_dict()
     except Exception:
         pw = {}
     gw = self.ctx.yambo_res.out.gw.get_dict()
     #gw.update(pw)
     self.out("yambo_remote_folder",
              self.ctx.yambo_res.out.yambo_remote_folder)
     self.out("scf_remote_folder", self.ctx.pw_wf_res.out.scf_remote_folder)
     self.out("nscf_remote_folder",
              self.ctx.pw_wf_res.out.nscf_remote_folder)
     if self.ctx.bands_groupname is not None:
         g_bands, _ = Group.get_or_create(name=self.ctx.bands_groupname)
         g_bands.add_nodes(self.ctx.yambo_res)
         self.report("Yambo calc (pk: {}) added to the group {}".format(
             yambo_res.pk, self.ctx.bands_groupname))
     else:
         self.report("Yambo calc done (pk: {}  ".format(gw))
     self.out("gw", DataFactory('parameter')(dict=gw))
     self.out("pw", DataFactory('parameter')(dict=pw))
     self.report("workflow complete")
 def get_data():
     """
     Return some ParameterData
     """
     from aiida.orm.utils import DataFactory
     data = DataFactory('parameter')(dict={'data': 'test'})
     data.store()
     return data
示例#3
0
 def report_wf(self):
     """
     Output final quantities
     """
     extra = {}
     nscf_pk = False
     scf_pk = False
     parameters = None
     from aiida.orm import DataFactory
     if 'nscf_pk' in self.ctx.r1.out.gw.get_dict():
         nscf_pk = self.ctx.r1.out.gw.get_dict()['nscf_pk']
         self.out("nscf_remote_folder", self.ctx.r1.out.nscf_remote_folder)
     if 'scf_pk' in self.ctx.r1.out.gw.get_dict():
         scf_pk = self.ctx.r1.out.gw.get_dict()['scf_pk']
         self.out("scf_remote_folder", self.ctx.r1.out.scf_remote_folder)
     if 'yambo_pk' in self.ctx.r1.out.gw.get_dict():
         parameters = load_node(self.ctx.r1.out.gw.get_dict()
                                ['yambo_pk']).inp.parameters.get_dict()
         self.out("yambo_remote_folder",
                  self.ctx.r1.out.yambo_remote_folder)
     self.out(
         "convergence",
         DataFactory('parameter')(
             dict={
                 "parameters": parameters,
                 "yambo_pk": self.ctx.r1.out.gw.get_dict()['yambo_pk'],
                 "convergence_space": self.ctx.conv_elem,
                 "energy_widths": self.ctx.en_diffs,
                 "nscf_pk": nscf_pk,
                 "scf_pk": scf_pk,
             }))
     self.report("completed 1-D convergence workflow")
示例#4
0
 def report(self):
     """
     Output final quantities
     """
     from aiida.orm import DataFactory
     self.out("steps", DataFactory('parameter')(dict={
         'steps': self.ctx.steps,
         'step0': self.ctx.step0}))
     self.out("structure", self.ctx.last_structure)
示例#5
0
    def test_statistics_default_class(self):
        """
        Test if the statistics query works properly.

        I try to implement it in a way that does not depend on the past state.
        """
        from aiida.orm import Node, DataFactory, Calculation
        from collections import defaultdict
        from aiida.backends.general.abstractqueries import AbstractQueryManager

        def store_and_add(n, statistics):
            n.store()
            statistics['total'] += 1
            statistics['types'][n._plugin_type_string] += 1
            statistics['ctime_by_day'][n.ctime.strftime('%Y-%m-%d')] += 1

        class QueryManagerDefault(AbstractQueryManager):
            pass

        qmanager_default = QueryManagerDefault()

        current_db_statistics = qmanager_default.get_creation_statistics()
        types = defaultdict(int)
        types.update(current_db_statistics['types'])
        ctime_by_day = defaultdict(int)
        ctime_by_day.update(current_db_statistics['ctime_by_day'])

        expected_db_statistics = {
            'total': current_db_statistics['total'],
            'types': types,
            'ctime_by_day': ctime_by_day
        }

        ParameterData = DataFactory('parameter')

        store_and_add(Node(), expected_db_statistics)
        store_and_add(ParameterData(), expected_db_statistics)
        store_and_add(ParameterData(), expected_db_statistics)
        store_and_add(Calculation(), expected_db_statistics)

        new_db_statistics = qmanager_default.get_creation_statistics()
        # I only check a few fields
        new_db_statistics = {
            k: v
            for k, v in new_db_statistics.iteritems()
            if k in expected_db_statistics
        }

        expected_db_statistics = {
            k: dict(v) if isinstance(v, defaultdict) else v
            for k, v in expected_db_statistics.iteritems()
        }

        self.assertEquals(new_db_statistics, expected_db_statistics)
def generate_scf_input_params(structure, codename, pseudo_family):
    # The inputs
    inputs = PwCalculation.process().get_inputs_template()

    # The structure
    inputs.structure = structure

    inputs.code = Code.get_from_string(codename.value)
    inputs._options.resources = {"num_machines": 1}
    inputs._options.max_wallclock_seconds = 30 * 60

    # Kpoints
    KpointsData = DataFactory("array.kpoints")
    kpoints = KpointsData()
    kpoints_mesh = 2
    kpoints.set_kpoints_mesh([kpoints_mesh, kpoints_mesh, kpoints_mesh])
    inputs.kpoints = kpoints

    # Calculation parameters
    parameters_dict = {
        "CONTROL": {
            "calculation": "scf",
            "tstress": True,  # Important that this stays to get stress
            "tprnfor": True,
        },
        "SYSTEM": {
            "ecutwfc": 30.,
            "ecutrho": 200.,
        },
        "ELECTRONS": {
            "conv_thr": 1.e-6,
        }
    }
    ParameterData = DataFactory("parameter")
    inputs.parameters = ParameterData(dict=parameters_dict)

    # Pseudopotentials
    inputs.pseudo = get_pseudos(structure, str(pseudo_family))

    return inputs
示例#7
0
 def report_wf(self):
     """Output final quantities"""
     from aiida.orm import DataFactory
     self.out(
         "result",
         DataFactory('parameter')(
             dict={
                 "kpoints": self.ctx.step0_res.out.convergence.get_dict(),
                 "fft": self.ctx.step1_res.out.convergence.get_dict(),
                 "bands": self.ctx.step2_res.out.convergence.get_dict(),
                 "cutoff": self.ctx.step3_res.out.convergence.get_dict(),
                 "ordered_step_output": self.ctx.ordered_outputs
             }))
def rescale(structure, scale):
    """
    Workfunction to rescale a structure

    :param structure: An AiiDA structure to rescale
    :param scale: The scale factor
    :return: The rescaled structure
    """
    the_ase = structure.get_ase()
    new_ase = the_ase.copy()
    new_ase.set_cell(the_ase.get_cell() * float(scale), scale_atoms=True)
    new_structure = DataFactory('structure')(ase=new_ase)
    return new_structure
示例#9
0
    def step_1(self, recheck=False):
        self.report("converging  FFTGvecs")
        extra = {}
        if self.inputs.parent_scf_folder:
            extra['parent_scf_folder'] = self.inputs.parent_scf_folder
        if self.inputs.structure:
            extra['structure'] = self.inputs.structure
        if self.ctx.last_step == 'step_2_1':
            extra['parameters'] = ParameterData(
                dict=self.ctx.step2_res.out.convergence.get_dict()
                ['parameters'])
        if self.ctx.last_step == 'step_0_1' and self.ctx.step_0_done == True:
            extra['parameters'] = ParameterData(
                dict=self.ctx.step0_res.out.convergence.get_dict()
                ['parameters'])
        convergence_parameters = DataFactory('parameter')(
            dict={
                'variable_to_converge': 'FFT_cutoff',
                'conv_tol': 0.1,
                'start_value': 4,
                'step': 2,
                'max_value': 60
            })
        p2y_result = submit(
            YamboConvergenceWorkflow,
            pwcode=self.inputs.pwcode,
            precode=self.inputs.precode,
            yambocode=self.inputs.yambocode,
            calculation_set=self.inputs.calculation_set,
            calculation_set_pw=self.inputs.calculation_set_pw,
            parent_nscf_folder=load_node(self.ctx.nscf_calc).out.remote_folder,
            pseudo=self.inputs.pseudo,
            convergence_parameters=convergence_parameters,
            #converge_parameters= converge_parameters,
            #parameters = self.ctx.step2_res["convergence"].get_dict()['parameters'],
            #threshold = Float(0.01),starting_points = starting_points,
            **extra)
        self.report("submitted 1-D FFT convergence workflow")
        reslt = {}
        if not recheck:
            reslt['step1_1_res'] = p2y_result
            self.ctx.last_step = 'step_1_1'
            self.ctx.first_runs1 = False
            self.ctx.step_1_done = True
        else:
            reslt['step1_2_res'] = p2y_result
            self.ctx.last_step = 'step_1_2'

        self.step1_res_ = p2y_result
示例#10
0
 def report_wf(self):
     """
     Output final quantities
     return information that may be used to figure out
     the status of the calculation.
     """
     from aiida.orm import DataFactory
     success = load_node(self.ctx.yambo_pks[-1]).get_state() == 'FINISHED'
     self.out(
         "gw",
         DataFactory('parameter')(dict={
             "yambo_pk": self.ctx.yambo_pks[-1],
             "success": success
         }))
     self.out("yambo_remote_folder",
              load_node(self.ctx.yambo_pks[-1]).out.remote_folder)
     self.report("workflow completed ")
示例#11
0
 def report_wf(self):
     """
     Output final quantities
     return information that may be used to figure out
     the status of the calculation.
     """
     self.report("Workflow Complete : scf {}  nscf {} success {}".format(
         self.ctx.scf_pk, self.ctx.nscf_pk, self.ctx.success))
     from aiida.orm import DataFactory
     res = {}
     if self.ctx.scf_pk:
         res['scf_pk'] = self.ctx.scf_pk
     if self.ctx.nscf_pk:
         res['nscf_pk'] = self.ctx.nscf_pk
     res['success'] = self.ctx.success
     self.out("pw", DataFactory('parameter')(dict=res))
     self.out("scf_remote_folder",
              load_node(self.ctx.scf_pk).out.remote_folder)
     self.out("nscf_remote_folder",
              load_node(self.ctx.nscf_pk).out.remote_folder)
示例#12
0
    def step_0(self, recheck=False):
        self.report("Working on K-point convergence ")
        extra = {}
        if self.inputs.parent_scf_folder:
            extra['parent_scf_folder'] = self.inputs.parent_scf_folder
        if self.inputs.structure:
            extra['structure'] = self.inputs.structure
        if self.ctx.last_step == 'step_1_1':
            extra['parameters'] = self.ctx.step1_res.out.convergence.get_dict(
            )['parameters']

        self.report("converging K-points ")
        convergence_parameters = DataFactory('parameter')(
            dict={
                'variable_to_converge': 'kpoints',
                'conv_tol': 0.1,
                'start_value': .9,
                'step': .1,
                'max_value': 0.017
            })

        p2y_result = submit(YamboConvergenceWorkflow,
                            pwcode=self.inputs.pwcode,
                            precode=self.inputs.precode,
                            yambocode=self.inputs.yambocode,
                            calculation_set=self.inputs.calculation_set,
                            calculation_set_pw=self.inputs.calculation_set_pw,
                            pseudo=self.inputs.pseudo,
                            convergence_parameters=convergence_parameters,
                            **extra)
        self.report("Submitted the K-point convergence step ")
        reslt = {}
        if not recheck:
            reslt['step0_1_res'] = p2y_result
            self.ctx.last_step = 'step_0_1'
            self.ctx.step_0_done = True
        else:
            reslt['step0_2_res'] = p2y_result
            self.ctx.last_step = 'step_0_2'
        self.step0_res_ = p2y_result
        self.ctx.first_run = False
示例#13
0
    def report_wf(self):
        """Output final quantities

        this reports the parameters used in the converged calculation, the pk of that calculation,
        a listing of the space sampled for convergence and the  gaps computed  at those points 
        in the convergence space.
        """
        extra = {}
        nscf_pk = False
        scf_pk = False
        parameters = None
        from aiida.orm import DataFactory
        if 'pw' in self.ctx.zero_calc.out:
            if 'nscf_pk' in self.ctx.zero_calc.out.pw.get_dict():
                nscf_pk = self.ctx.zero_calc.out.pw.get_dict()['nscf_pk']
                self.out("nscf_remote_folder",
                         self.ctx.zero_calc.out.nscf_remote_folder)
            if 'scf_pk' in self.ctx.zero_calc.out.pw.get_dict():
                scf_pk = self.ctx.zero_calc.out.pw.get_dict()['scf_pk']
                self.out("scf_remote_folder",
                         self.ctx.zero_calc.out.scf_remote_folder)
        if 'yambo_pk' in self.ctx.zero_calc.out.gw.get_dict():
            parameters = load_node(self.ctx.zero_calc.out.gw.get_dict()
                                   ['yambo_pk']).inp.parameters.get_dict()
            self.out("yambo_remote_folder",
                     self.ctx.zero_calc.out.yambo_remote_folder)
        self.out(
            "convergence",
            DataFactory('parameter')
            (dict={
                "parameters": parameters,
                "yambo_pk": self.ctx.zero_calc.out.gw.get_dict()['yambo_pk'],
                "convergence_space": self.ctx.conv_elem,
                "energy_widths": self.ctx.en_diffs,
                "nscf_pk": nscf_pk,
                "scf_pk": scf_pk,
            }))
        self.report("completed 1-D convergence workflow")
示例#14
0
 def update_parameters(self, paging):
     params = self.ctx.parameters.get_dict()
     for field in self.ctx.conv_elem.keys():
         starting_point = self.ctx.start_value +\
                          self.ctx.step * self.ctx.loop_length* self.ctx.iteration
         if starting_point < 0:
             self.report(
                 "*** ERROR:  updating in the negative direction, please check: starting_point {}"
                 .format(starting_point))
             self.report("*** ERROR:  self.ctx.start_value  {}".format(
                 self.ctx.start_value))
             self.report("*** ERROR:  self.ctx.step  {}".format(
                 self.ctx.step))
             self.report("*** ERROR:  self.ctx.loop_length  {}".format(
                 self.ctx.loop_length))
             self.report("*** ERROR:  self.ctx.iteration  {}".format(
                 self.ctx.iteration))
         params[field] = update_parameter_field(field, starting_point,
                                                self.ctx.step * paging)
         self.ctx.conv_elem[field].append(params[field])
     self.report(" extended convergence points: {}".format(
         self.ctx.conv_elem))
     self.ctx.parameters = DataFactory('parameter')(dict=params)
示例#15
0
# -*- coding: utf-8 -*-
"""Raspa input plugin."""
from __future__ import absolute_import
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.common.exceptions import InputValidationError
from aiida.common.utils import classproperty
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.utils import DataFactory
from six.moves import map
from six.moves import range
from ase.io import read, write
from shutil import copyfile

# data objects
CifData = DataFactory('cif')
FolderData = DataFactory('folder')
ParameterData = DataFactory('parameter')
RemoteData = DataFactory('remote')
SinglefileData = DataFactory('singlefile')


class RaspaCalculation(JobCalculation):
    """This is a RaspaCalculation, subclass of JobCalculation,
    to prepare input for an RaspaCalculation.
    For information on RASPA, refer to: https://github.com/numat/RASPA2.
    """

    # --------------------------------------------------------------------------
    def _init_internal_params(self):
        """
        Set parameters of instance
示例#16
0
    def init_parameters(self, paging):
        """This function initializes the  settings and parameters needed for  a convergence calculations.

        This function will store all needed parameters each in a self.ctx variable, and generate from
        defaults optional parameters that are not provieded. It uses functions from `yambo_utils` to 
        generates the defaults. This function is only called once,  after which the only update parameter
        is called.
        """
        convergence_parameters_dict = self.inputs.convergence_parameters.get_dict(
        )
        if 'calculation_set_pw' not in self.inputs.keys():
            self.inputs.calculation_set_pw = DataFactory('parameter')(
                dict=self.inputs.calculation_set.get_dict())

        if 'calculation_set_p2y' not in self.inputs.keys():
            main_set = self.inputs.calculation_set.get_dict()
            main_set['resources'] = {
                "num_machines": 1,
                "num_mpiprocs_per_machine": 1
            }
            self.ctx.calculation_set_p2y = DataFactory('parameter')(
                dict=main_set)
        else:
            self.ctx.calculation_set_p2y = self.inputs.calculation_set_p2y

        if 'parameters' not in self.inputs.keys():
            self.ctx.parameters = set_default_qp_param()
        else:
            self.ctx.parameters = self.inputs.parameters

        if 'parent_scf_folder' in self.inputs.keys():
            self.report("parent_scf folder was set")
            parent_calc = self.inputs.parent_scf_folder.get_inputs_dict(
                link_type=LinkType.CREATE)['remote_folder']
            if isinstance(parent_calc, PwCalculation):
                if parent_calc.get_state() == 'FINISHED':
                    if 'settings_pw' not in self.inputs.keys():
                        self.ctx.settings_pw = parent_calc.inp.settings
                    if 'structure' not in self.inputs.keys():
                        self.inputs.structure = parent_calc.inp.structure
                    if 'pseudo' not in self.inputs.keys():
                        raise InputValidationError("Pseudo should be provided")
                    if parent_calc.get_inputs_dict(
                            link_type=LinkType.CREATE)['parameters'].get_dict(
                            )['CONTROL']['calculation'] == 'scf':
                        if 'parameters_pw' not in self.inputs.keys():
                            self.ctx.parameters_pw = parent_calc.inp.parameters
                    else:
                        if 'parameters_pw_nscf' not in self.inputs.keys():
                            self.ctx.parameters_pw_nscf = parent_calc.inp.parameters
                    self.report("parent_scf_folder defined params {}".format(
                        self.ctx.parameters_pw.get_dict()))

            if isinstance(parent_calc, YamboCalculation):
                if parent_calc.get_state() == 'FINISHED':
                    if 'parameters' not in self.inputs.keys():
                        self.report(
                            "setting default parameters, parent was yambocalc "
                        )
                        self.ctx.parameters = set_default_qp_param()

            if 'kpoints' == self.ctx.variable_to_converge:
                if 'settings_pw' not in self.inputs.keys():
                    self.ctx.settings_pw = default_pw_settings()
                if 'parameters_pw' not in self.inputs.keys():
                    self.ctx.parameters_pw = set_default_pw_param()
                if 'parameters_pw_nscf' not in self.inputs.keys():
                    self.ctx.parameters_pw_nscf = set_default_pw_param(
                        nscf=True)

        else:
            if 'kpoints' == self.ctx.variable_to_converge:
                self.report(
                    " initializing in a kpoints convergence calculation")
                if 'settings_pw' not in self.inputs.keys():
                    self.ctx.settings_pw = default_pw_settings()
                if 'parameters_pw' not in self.inputs.keys():
                    self.report(
                        "  parameters_pw were not found setting them to default pw params"
                    )
                    self.ctx.parameters_pw = set_default_pw_param()
                if 'parameters_pw_nscf' not in self.inputs.keys():
                    self.report(
                        "  parameters_pw_nscf were not found setting them to default pw params"
                    )
                    self.ctx.parameters_pw_nscf = set_default_pw_param(
                        nscf=True)
            if 'kpoints' != self.ctx.variable_to_converge:
                if 'settings_pw' not in self.inputs.keys():
                    self.ctx.settings_pw = default_pw_settings()
                if 'parameters' not in self.inputs.keys():
                    self.ctx.parameters = set_default_qp_param()
            if 'structure' not in self.inputs.keys():
                raise InputValidationError(
                    "Structure should be provided if parent PW SCF folder is not given when converging kpoints"
                )
            if 'pseudo' not in self.inputs.keys():
                raise InputValidationError(
                    "Pseudo should be provided if parent PW calculation is not given when converging kpoints"
                )
            if 'pwcode' not in self.inputs.keys():
                raise InputValidationError(
                    "PW code  should be provided when converging kpoints")
            if 'kpoints'!=self.ctx.variable_to_converge and\
                                                   'parent_nscf_folder' not in self.inputs.keys():
                raise InputValidationError(
                    "Parent nscf folder should be provided when not converging kpoints"
                )

        if 'parent_nscf_folder' in self.inputs.keys():
            parent_calc = load_node(
                self.inputs.parent_nscf_folder.get_inputs_dict()
                ['remote_folder'].pk)
            if isinstance(parent_calc, PwCalculation):
                if parent_calc.get_inputs_dict()['parameters'].get_dict()['CONTROL']['calculation'] == 'nscf'\
                                    and parent_calc.get_state()== 'FINISHED'\
                                    and 'QPkrange' not in self.ctx.parameters.get_dict().keys():
                    self.ctx.parameters = default_qpkrange(
                        parent_calc.pk, self.ctx.parameters)
                    self.report(
                        "QPkrange not in  self.ctx.parameters.get_dict keys ")

        if 'settings_pw' in self.inputs.keys():
            self.ctx.settings_pw = self.inputs.settings_pw

        params = self.ctx.parameters.get_dict()
        if 'kpoints' != self.ctx.variable_to_converge:
            for field in self.ctx.conv_elem.keys():
                self.report(
                    "self.ctx.start_value {}  self.ctx.step {}  self.ctx.loop_length {}  self.ctx.iteration {}"
                    .format(self.ctx.start_value, self.ctx.step,
                            self.ctx.loop_length, self.ctx.iteration))
                starting_point = self.ctx.start_value +\
                                 self.ctx.step * self.ctx.loop_length * self.ctx.iteration
                new_ind_var = update_parameter_field(field, starting_point,
                                                     self.ctx.step * paging)
                if len(self.ctx.conv_elem[field]) == 1:
                    if self.ctx.conv_elem[field][0] == new_ind_var:
                        pass
                    else:
                        params[field] = new_ind_var
                        self.ctx.conv_elem[field].append(params[field])
                        self.report("initialized {} ".format(params[field]))
                else:
                    params[field] = new_ind_var
                    self.ctx.conv_elem[field].append(params[field])
                    self.report("initialized {} ".format(params[field]))
            self.ctx.parameters = DataFactory('parameter')(dict=params)
        self.report('Initialization step completed.')
示例#17
0
    def step_2(self, recheck=False):
        """This calls the YamboConvergenceWorkflow as a subworkflow, converging the  Bands.

        We converge the Bands,  using the converged kpoints from  a preceeding kpoints,
        converged FFT from a preceeding FFT convergence.
        This is  a 1-D convergence performed by a subworkflow.
        """
        self.report("Working on Bands Convergence ")
        nelec = load_node(self.ctx.nscf_calc).out.output_parameters.get_dict(
        )['number_of_electrons']
        nbands = load_node(self.ctx.nscf_calc).out.output_parameters.get_dict(
        )['number_of_bands']
        self.ctx.MAX_B_VAL = self.ctx.convergence_settings.dict.max_bands  #   int(nelec*8)
        band_cutoff = self.ctx.convergence_settings.dict.start_bands  #  min(nelec,nbands)
        extra = {}
        if 'restart_options_pw' in self.inputs.keys():
            extra['restart_options_pw'] = self.inputs.restart_options_pw
        if 'restart_options_gw' in self.inputs.keys():
            extra['restart_options_gw'] = self.inputs.restart_options_gw
        if 'settings_pw_nscf' in self.inputs.keys():
            extra['settings_pw_nscf'] = self.inputs.settings_pw_nscf
        if 'settings_pw' in self.inputs.keys():
            extra['settings_pw'] = self.inputs.settings_pw
        if 'settings_p2y' in self.inputs.keys():
            extra['settings_pw_p2y'] = self.inputs.settings_p2y
        if 'calculation_set_pw_nscf' in self.inputs.keys():
            extra[
                'calculation_set_pw_nscf'] = self.inputs.calculation_set_pw_nscf
        if 'parent_scf_folder' in self.inputs.keys():
            extra['parent_scf_folder'] = self.inputs.parent_scf_folder
        if 'parent_nscf_folder' in self.inputs.keys():
            extra['parent_nscf_folder'] = self.inputs.parent_nscf_folder
        if 'structure' in self.inputs.keys():
            extra['structure'] = self.inputs.structure
        if self.ctx.last_step == 'step_3_1':
            if self.ctx.step3_res.out.convergence.get_dict()['parameters'][
                    'NGsBlkXp'] == self.ctx.convergence_settings.dict.start_w_cutoff:  # 1 == default
                self.report(
                    " converged cutt-off are similar to the default used in a previous band convergence, consistency achieved"
                )
                self.ctx.step_2_done = True
                self.ctx.bands_n_cutoff_consistent = True
                return
            #band_cutoff = int(self.ctx.last_used_band *0.7)
            extra['parameters'] = ParameterData(
                dict=self.ctx.step3_res.out.convergence.get_dict()
                ['parameters'])
            self.report(
                "updated the bands convergence parameters with cut-off from cutoff convergence step"
            )
        if self.ctx.last_step == 'step_3_2':
            # CRUNCH TIME:  use  values from step3_2
            if self.ctx.step3_res.out.convergence.get_dict(
            )['parameters']['NGsBlkXp'] <= self.ctx.last_used_cutoff:  #
                # we are done, and consistent:
                self.report(
                    " converged cutt-off are similar to those used in a previous bands convergence, consistency achieved"
                )
                self.ctx.step_2_done = True
                self.ctx.bands_n_cutoff_consistent = True
                return
            self.report(
                "passing parameters from  converged cut-off, this can be repeated untill the two parameters are consistent "
            )
            #band_cutoff = self.ctx.last_used_band  ## BUG?
            #band_cutoff = int( self.ctx.last_used_band*0.7)
            extra['parameters'] = ParameterData(
                dict=self.ctx.step3_res.out.convergence.get_dict()
                ['parameters'])
            self.report(
                "updated the bands convegence parameters with cut-off from cutoff convergence"
            )
        if self.ctx.last_step != 'step_1_1' and self.ctx.last_step != 'step_1_2':  # last iteration was W_cutoff not FFT
            self.ctx.last_used_cutoff = self.ctx.step3_res.out.convergence.get_dict(
            )['parameters']['NGsBlkXp']
            self.report("Cut-off in last W-Cutoff convergence:  {}".format(
                self.ctx.last_used_cutoff))

        if self.ctx.last_step == 'step_1_1':
            params = self.ctx.step1_res.out.convergence.get_dict(
            )['parameters']
            extra['parameters'] = ParameterData(dict=params)
        convergence_parameters = DataFactory('parameter')(
            dict={
                'variable_to_converge': 'bands',
                'conv_tol': float(self.inputs.threshold),
                'start_value': band_cutoff,
                'step': 10,  #band_cutoff
                'max_value': self.ctx.MAX_B_VAL
            })  # self.ctx.MAX_B_VAL
        self.report("converging  BndsRnXp, GbndRnge")

        p2y_result = submit(
            YamboConvergenceWorkflow,
            pwcode=self.inputs.pwcode,
            precode=self.inputs.precode,
            yambocode=self.inputs.yambocode,
            calculation_set=self.inputs.calculation_set,
            calculation_set_pw=self.inputs.calculation_set_pw,
            parent_nscf_folder=load_node(self.ctx.nscf_calc).out.remote_folder,
            pseudo=self.inputs.pseudo,
            convergence_parameters=convergence_parameters,
            #converge_parameters= converge_parameters,
            #threshold = Float(0.01),starting_points = starting_points,
            **extra)
        self.report("submitted 1-D bands convergence workflow")
        reslt = {}
        if not recheck:
            step2_1_res = p2y_result
            self.ctx.last_step = 'step_2_1'
            self.ctx.first_runs2 = False
        else:
            self.step2_2_res = p2y_result
            self.ctx.last_step = 'step_2_2'
            self.ctx.step_2_done = True
        self.ctx.step2_res_ = p2y_result
示例#18
0
    def step_0(self, recheck=False):
        """This calls the YamboConvergenceWorkflow as a subworkflow, converging the K-points.

        We converge the k-points by performing SCF and {NSCF+GW} at different mesh sizes untill convergence.
        This is  a 1-D convergence performed by a subworkflow.
        """
        self.report("Working on K-point convergence ")
        extra = {}
        if 'restart_options_pw' in self.inputs.keys():
            extra['restart_options_pw'] = self.inputs.restart_options_pw
        if 'restart_options_gw' in self.inputs.keys():
            extra['restart_options_gw'] = self.inputs.restart_options_gw
        if 'settings_pw_nscf' in self.inputs.keys():
            extra['settings_pw_nscf'] = self.inputs.settings_pw_nscf
        if 'settings_pw' in self.inputs.keys():
            extra['settings_pw'] = self.inputs.settings_pw
        if 'settings_p2y' in self.inputs.keys():
            extra['settings_pw_p2y'] = self.inputs.settings_p2y
        if 'calculation_set_pw_nscf' in self.inputs.keys():
            extra[
                'calculation_set_pw_nscf'] = self.inputs.calculation_set_pw_nscf
        if 'parent_scf_folder' in self.inputs.keys():
            extra['parent_scf_folder'] = self.inputs.parent_scf_folder
        if 'parent_nscf_folder' in self.inputs.keys():
            extra['parent_nscf_folder'] = self.inputs.parent_nscf_folder
        if 'structure' in self.inputs.keys():
            extra['structure'] = self.inputs.structure
        if self.ctx.last_step == 'step_1_1':
            extra['parameters'] = self.ctx.step1_res.out.convergence.get_dict(
            )['parameters']
        if 'parameters' in self.inputs.keys():
            extra['parameters'] = self.inputs.parameters
        if 'parameters_pw' in self.inputs.keys():
            extra['parameters_pw'] = self.inputs.parameters_pw
        if 'parameters_pw_nscf' in self.inputs.keys():
            extra['parameters_pw_nscf'] = self.inputs.parameters_pw_nscf
        self.report("converging K-points ")
        convergence_parameters = DataFactory('parameter')(
            dict={
                'variable_to_converge': 'kpoints',
                'conv_tol': float(self.inputs.threshold),
                'start_value':
                self.ctx.convergence_settings.dict.kpoint_starting_distance,
                'step': .1,  # IGNORE STEP 
                'max_value':
                self.ctx.convergence_settings.dict.kpoint_min_distance
            })  # 0.34 , 0.0250508117676

        p2y_result = submit(YamboConvergenceWorkflow,
                            pwcode=self.inputs.pwcode,
                            precode=self.inputs.precode,
                            yambocode=self.inputs.yambocode,
                            calculation_set=self.inputs.calculation_set,
                            calculation_set_pw=self.inputs.calculation_set_pw,
                            pseudo=self.inputs.pseudo,
                            convergence_parameters=convergence_parameters,
                            **extra)
        self.report("Submitted the K-point convergence step ")
        reslt = {}
        if not recheck:
            reslt['step0_1_res'] = p2y_result
            self.ctx.last_step = 'step_0_1'
            self.ctx.step_0_done = True
        else:
            reslt['step0_2_res'] = p2y_result
            self.ctx.last_step = 'step_0_2'
        self.ctx.step0_res_ = p2y_result
        self.ctx.first_run = False
示例#19
0
    def yambo_should_restart(self):
        """This function encodes the logic to restart calculations from failures

        This function supports detecting failed/incomplete yambo runs, taking corrective
        action and resubmitting automatically. These classes of errors are taken care of:
        1. Memory probelms
        2. Parallelism problems
        3. Some input inconsistency problems (too low number of bands)
        4. Queue time exhaustion.
        The calculation is restarted upto a maximum number of 4 retries
        """
        self.report("Checking if yambo restart is needed")
        if self.ctx.restart >= self.ctx.max_restarts:
            self.report(
                "I will not restart: maximum restarts reached: {}".format(
                    self.ctx.max_restarts))
            return False

        self.report(
            "I can restart (# {}), max restarts ({}) not reached yet".format(
                self.ctx.restart, self.ctx.max_restarts))
        calc = load_node(self.ctx.yambo_pks[-1])
        if self.ctx.last == 'INITIALISE':
            return True

        if calc.get_state() == calc_states.SUBMISSIONFAILED:
            self.report(
                "I will not resubmit calc pk: {}, submission failed: {}, check the log or you settings "
                .format(calc.pk, calc.get_state()))
            return False

        max_input_seconds = self.ctx.calculation_set.get_dict(
        )['max_wallclock_seconds']

        last_time = 30  # seconds default value:
        try:
            last_time = calc.get_outputs_dict()['output_parameters'].get_dict(
            )['last_time']
        except Exception:
            pass  # Likely no logs were produced

        if calc.get_state() == calc_states.FAILED and (
                float(max_input_seconds) -
                float(last_time)) / float(max_input_seconds) * 100.0 < 1:
            max_input_seconds = int(max_input_seconds * 1.3)  # 30% increase
            calculation_set = self.ctx.calculation_set.get_dict()
            calculation_set['max_wallclock_seconds'] = max_input_seconds
            self.ctx.calculation_set = DataFactory('parameter')(
                dict=calculation_set)
            self.report(
                "Failed calculation, likely queue time exhaustion, restarting with new max_input_seconds = {}"
                .format(max_input_seconds))
            return True

        if calc.get_state(
        ) != calc_states.PARSINGFAILED and calc.get_state != calc_states.FINISHED:  # special case for parallelization needed
            output_p = {}
            if 'output_parameters' in calc.get_outputs_dict(
            ):  # calc.get_outputs_dict()['output_parameters'].get_dict().keys()
                output_p = calc.get_outputs_dict(
                )['output_parameters'].get_dict()
            if 'para_error' in output_p.keys():
                if output_p[
                        'para_error'] == True:  # Change parallelism or add missing parallelism inputs
                    self.report(" parallelism error detected")
                    params = self.ctx.parameters.get_dict()
                    X_all_q_CPU = params.pop('X_all_q_CPU', '')
                    X_all_q_ROLEs = params.pop('X_all_q_ROLEs', '')
                    SE_CPU = params.pop('SE_CPU', '')
                    SE_ROLEs = params.pop('SE_ROLEs', '')
                    calculation_set = self.ctx.calculation_set.get_dict()
                    params[
                        'X_all_q_CPU'], calculation_set = reduce_parallelism(
                            'X_all_q_CPU', X_all_q_ROLEs, X_all_q_CPU,
                            calculation_set)
                    params['SE_CPU'], calculation_set = reduce_parallelism(
                        'SE_CPU', SE_ROLEs, SE_CPU, calculation_set)
                    params["X_all_q_ROLEs"] = X_all_q_ROLEs
                    params["SE_ROLEs"] = SE_ROLEs
                    self.ctx.calculation_set = DataFactory('parameter')(
                        dict=calculation_set)
                    self.ctx.parameters = DataFactory('parameter')(dict=params)
                    self.report(
                        "Calculation {} failed from a parallelism problem: {}".
                        format(calc.pk, output_p['errors']))
                    self.report("Old parallelism {}= {} , {} = {} ".format(
                        X_all_q_ROLEs, X_all_q_CPU, SE_ROLEs, SE_CPU))
                    self.report("New parallelism {}={} , {} = {}".format(
                        X_all_q_ROLEs, params['X_all_q_CPU'], SE_ROLEs,
                        params['SE_CPU']))
                    return True
            if 'unphysical_input' in output_p.keys():
                if output_p['unphysical_input'] == True:
                    # this handles this type of error: "[ERROR][NetCDF] NetCDF: NC_UNLIMITED in the wrong index"
                    # we should reset the bands to a larger value, it may be too small.
                    # this is a probable cause, and it may not be the real problem, but often is the cause.
                    self.report(
                        "the calculation failed due to a problematic input, defaulting to increasing bands"
                    )
                    params = self.ctx.parameters.get_dict()
                    bandX = params.pop('BndsRnXp', None)
                    bandG = params.pop('GbndRnge', None)
                    if bandX:
                        bandX = (bandX[0], int(bandX[0] * 1.5))  #
                        params['BndsRnXp'] = bandX
                    if bandG:
                        bandG = (bandG[0], int(bandG[0] * 1.5))  #
                        params['GbndRnge'] = bandG
                    self.ctx.parameters = DataFactory('parameter')(dict=params)
                    return True

            if 'errors' in output_p.keys() and calc.get_state(
            ) == calc_states.FAILED:
                if len(calc.get_outputs_dict()['output_parameters'].get_dict()
                       ['errors']) < 1:
                    # No errors, We  check for memory issues, indirectly
                    if 'last_memory_time' in calc.get_outputs_dict(
                    )['output_parameters'].get_dict().keys():
                        # check if the last alloc happened close to the end:
                        last_mem_time = calc.get_outputs_dict(
                        )['output_parameters'].get_dict()['last_memory_time']
                        if abs(last_time - last_mem_time
                               ) < 3:  # 3 seconds  selected arbitrarily,
                            # this is (based on a simple heuristic guess, a memory related problem)
                            # change the parallelization to account for this before continuing, warn user too.
                            params = self.ctx.parameters.get_dict()
                            X_all_q_CPU = params.pop('X_all_q_CPU', '')
                            X_all_q_ROLEs = params.pop('X_all_q_ROLEs', '')
                            SE_CPU = params.pop('SE_CPU', '')
                            SE_ROLEs = params.pop('SE_ROLEs', '')
                            calculation_set = self.ctx.calculation_set.get_dict(
                            )
                            params[
                                'X_all_q_CPU'], calculation_set = reduce_parallelism(
                                    'X_all_q_CPU', X_all_q_ROLEs, X_all_q_CPU,
                                    calculation_set)
                            params[
                                'SE_CPU'], calculation_set = reduce_parallelism(
                                    'SE_CPU', SE_ROLEs, SE_CPU,
                                    calculation_set)
                            params["X_all_q_ROLEs"] = X_all_q_ROLEs
                            params["SE_ROLEs"] = SE_ROLEs
                            self.ctx.calculation_set = DataFactory(
                                'parameter')(dict=calculation_set)
                            self.ctx.parameters = DataFactory('parameter')(
                                dict=params)
                            self.report(
                                "Calculation  {} failed likely from memory issues"
                            )
                            self.report(
                                "Old parallelism {}= {} , {} = {} ".format(
                                    X_all_q_ROLEs, X_all_q_CPU, SE_ROLEs,
                                    SE_CPU))
                            self.report(
                                "New parallelism selected {}={}, {} = {} ".
                                format(X_all_q_ROLEs, params['X_all_q_CPU'],
                                       SE_ROLEs, params['SE_CPU']))
                            return True
                        else:
                            pass

        if calc.get_state() == calc_states.SUBMISSIONFAILED\
                   or calc.get_state() == calc_states.FAILED\
                   or 'output_parameters' not in calc.get_outputs_dict():
            self.report(
                "Calculation {} failed or did not genrerate outputs for unknow reason, restarting with no changes"
                .format(calc.pk))
            return True
        return False
示例#20
0
 def step_3(self, recheck=False):
     self.report("Working on W-cutoff ")
     w_cutoff = 1
     extra = {}
     if self.inputs.parent_scf_folder:
         extra['parent_scf_folder'] = self.inputs.parent_scf_folder
     if self.inputs.structure:
         extra['structure'] = self.inputs.structure
     if self.ctx.last_step == 'step_2_2':
         if self.ctx.last_used_band <= self.ctx.step2_res.out.convergence.get_dict(
         )['parameters']['BndsRnXp'][-1]:
             self.report(
                 "bands input is similar to that used in a previous cut-off convergence, consistency achieved"
             )
             self.ctx.step_3_done = True
             self.ctx.bands_n_cutoff_consistent = True
             return
         self.report("passing parameters from  re-converged bands ")
         extra['parameters'] = ParameterData(
             dict=self.ctx.step2_res.out.convergence.get_dict()
             ['parameters'])
         #w_cutoff =  self.ctx.last_used_cutoff  # start  from last used value. ## BUG?
         w_cutoff = int(self.ctx.last_used_cutoff * 0.7)
     if self.ctx.last_step == 'step_2_1':
         self.report("passing parameters from  converged bands ")
         # use cut-off from 2_1
         extra['parameters'] = ParameterData(
             dict=self.ctx.step2_res.out.convergence.get_dict()
             ['parameters'])
         #self.ctx.last_used_band = self.ctx.step2_res.out.convergence.get_dict()['parameters']['BndsRnXp'][-1]
     self.ctx.last_used_band = self.ctx.step2_res.out.convergence.get_dict(
     )['parameters']['BndsRnXp'][-1]
     self.report("Bands in last  bands convergence:  {}".format(
         self.ctx.last_used_band))
     convergence_parameters = DataFactory('parameter')(
         dict={
             'variable_to_converge': 'W_cutoff',
             'conv_tol': 0.1,
             'start_value': w_cutoff,
             'step': 1,
             'max_value': 20
         })
     self.report("converging 1-D  W-off")
     p2y_result = submit(
         YamboConvergenceWorkflow,
         pwcode=self.inputs.pwcode,
         precode=self.inputs.precode,
         yambocode=self.inputs.yambocode,
         calculation_set=self.inputs.calculation_set,
         calculation_set_pw=self.inputs.calculation_set_pw,
         parent_nscf_folder=load_node(self.ctx.nscf_calc).out.remote_folder,
         pseudo=self.inputs.pseudo,
         convergence_parameters=convergence_parameters,
         #converge_parameters= converge_parameters,
         #parameters = self.ctx.step2_res["convergence"].get_dict()['parameters'],
         #threshold = Float(0.01),starting_points = starting_points,
         **extra)
     self.report("Submitted  W-cut off Workflow  ")
     reslt = {}
     if not recheck:
         reslt['step3_1_res'] = p2y_result
         self.ctx.last_step = 'step_3_1'
         self.ctx.first_runs3 = False
     else:
         reslt['step3_2_res'] = p2y_result
         self.ctx.last_step = 'step_3_2'
         self.ctx.step_3_done = True
         # if this  differs from that of step3_1_res
     self.step3_res_ = p2y_result
示例#21
0
    def step_2(self, recheck=False):
        self.report("Working on Bands Convergence ")
        nelec = load_node(self.ctx.nscf_calc).out.output_parameters.get_dict(
        )['number_of_electrons']
        band_cutoff = int(nelec / 2)
        extra = {}
        if self.inputs.parent_scf_folder:
            extra['parent_scf_folder'] = self.inputs.parent_scf_folder
        if self.inputs.structure:
            extra['structure'] = self.inputs.structure
        if self.ctx.last_step == 'step_3_1':
            if self.ctx.step3_res.out.convergence.get_dict(
            )['parameters']['NGsBlkXp'] == 1:  # 1 == default,
                self.report(
                    " converged cutt-off are similar to the default used in a previous band convergence, consistency achieved"
                )
                self.ctx.step_2_done = True
                self.ctx.bands_n_cutoff_consistent = True
                return
            #band_cutoff = self.ctx.last_used_band ## BUG?
            band_cutoff = int(self.ctx.last_used_band * 0.7)
            extra['parameters'] = ParameterData(
                dict=self.ctx.step3_res.out.convergence.get_dict()
                ['parameters'])
            self.report(
                "updated the bands convegence parameters with cut-off from cutoff convergence step"
            )
        if self.ctx.last_step == 'step_3_2':
            # CRUNCH TIME:  use  values from step3_2
            if self.ctx.step3_res.out.convergence.get_dict(
            )['parameters']['NGsBlkXp'] <= self.ctx.last_used_cutoff:  #
                # we are done, and consistent:
                self.report(
                    " converged cutt-off are similar to those used in a previous bands convergence, consistency achieved"
                )
                self.ctx.step_2_done = True
                self.ctx.bands_n_cutoff_consistent = True
                return
            self.report(
                "passing parameters from  converged cut-off, this can be repeated untill the two parameters are consistent "
            )
            #band_cutoff = self.ctx.last_used_band  ## BUG?
            band_cutoff = int(self.ctx.last_used_band * 0.7)
            extra['parameters'] = ParameterData(
                dict=self.ctx.step3_res.out.convergence.get_dict()
                ['parameters'])
            self.report(
                "updated the bands convegence parameters with cut-off from cutoff convergence"
            )
        if self.ctx.last_step != 'step_1_1' and self.ctx.last_step != 'step_1_2':  # last iteration was W_cutoff not FFT
            self.ctx.last_used_cutoff = self.ctx.step3_res.out.convergence.get_dict(
            )['parameters']['NGsBlkXp']
            self.report("Cut-off in last W-Cutoff convergence:  {}".format(
                self.ctx.last_used_cutoff))

        if self.ctx.last_step == 'step_1_1':
            params = self.ctx.step1_res.out.convergence.get_dict(
            )['parameters']
            params['FFTGvecs'] = 2  #
            params['NGsBlkXp'] = 2  #
            extra['parameters'] = ParameterData(dict=params)
        convergence_parameters = DataFactory('parameter')(
            dict={
                'variable_to_converge': 'bands',
                'conv_tol': 0.1,
                'start_value': band_cutoff,
                'step': 1,
                'max_value': nelec - 2
            })
        self.report("converging  BndsRnXp, GbndRnge")
        p2y_result = submit(
            YamboConvergenceWorkflow,
            pwcode=self.inputs.pwcode,
            precode=self.inputs.precode,
            yambocode=self.inputs.yambocode,
            calculation_set=self.inputs.calculation_set,
            calculation_set_pw=self.inputs.calculation_set_pw,
            parent_nscf_folder=load_node(self.ctx.nscf_calc).out.remote_folder,
            pseudo=self.inputs.pseudo,
            convergence_parameters=convergence_parameters,
            #converge_parameters= converge_parameters,
            #threshold = Float(0.01),starting_points = starting_points,
            **extra)
        self.report("submitted 1-D bands convergence workflow")
        reslt = {}
        if not recheck:
            step2_1_res = p2y_result
            self.ctx.last_step = 'step_2_1'
            self.ctx.first_runs2 = False
        else:
            self.step2_2_res = p2y_result
            self.ctx.last_step = 'step_2_2'
            self.ctx.step_2_done = True
        self.step2_res_ = p2y_result
from __future__ import absolute_import
from __future__ import print_function
import os
import click

from aiida.common.example_helpers import test_and_get_code  # noqa
from aiida.orm.utils import DataFactory
from aiida.orm import load_node
from aiida.work.run import submit
from aiida_raspa.workflows import RaspaConvergeWorkChain

# data objects
CifData = DataFactory('cif')
ParameterData = DataFactory('parameter')
SinglefileData = DataFactory('singlefile')


@click.command('cli')
@click.argument('codelabel')
@click.option(
    '--block_pockets', '-b', required=True, type=int, help='Block pockets')
def main(codelabel, block_pockets):
    code = test_and_get_code(codelabel, expected_code_type='raspa')

    options_dict = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1,
        },
        "max_wallclock_seconds": 3 * 60 * 60,
    }
示例#23
0
    load_dbenv()
import json
import os, sys
from aiida_yambo.workflows.yambowf  import YamboWorkflow
 
try:
    from aiida.orm.data.base import Float, Str, NumericType, BaseType, Bool, List  
    from aiida.work.run import run, submit
except ImportError:
    from aiida.workflows2.db_types import Float, Str, NumericType, SimpleData, Bool
    from aiida.workflows2.db_types import  SimpleData as BaseType
    from aiida.orm.data.simple import  SimpleData as SimpleData_
    from aiida.workflows2.run import run

from aiida.orm.utils import DataFactory
ParameterData = DataFactory("parameter")
StructureData = DataFactory('structure')
KpointsData = DataFactory('array.kpoints')

def read_from_pw_inp(filename="../GS/nscf.in"):
    from aiida_quantumespresso.tools import pwinputparser
    pwinputfile = pwinputparser.PwInputFile(os.path.abspath(filename))
    struc =  pwinputfile.get_structuredata()
    pw_params = pwinputfile.namelists  # CONTROL, SYSTEM, ELECTRONS,...
    control = pw_params['CONTROL']
    system = pw_params['SYSTEM']
    del control['pseudo_dir']
    del control['outdir']
    del control['prefix']
    pw_params['CONTROL'] = control
    del system['ibrav']
示例#24
0
if not is_dbenv_loaded():
    load_dbenv()

from aiida.common.exceptions import InputValidationError, ValidationError, WorkflowInputValidationError
from aiida.orm import load_node
from aiida.orm.data.upf import get_pseudos_from_structure
from collections import defaultdict
from aiida.orm.utils import DataFactory, CalculationFactory
from aiida.orm.code import Code
from aiida.orm.data.structure import StructureData
from aiida.common.links import LinkType
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_yambo.calculations.gw import YamboCalculation

ParameterData = DataFactory("parameter")


def generate_yambo_input_params(precodename, yambocodename, parent_folder,
                                parameters, calculation_set, settings):
    inputs = YamboCalculation.process().get_inputs_template()
    inputs.preprocessing_code = Code.get_from_string(precodename.value)
    inputs.code = Code.get_from_string(yambocodename.value)
    calculation_set = calculation_set.get_dict()
    resource = calculation_set.pop('resources', {})
    if resource:
        inputs._options.resources = resource
    inputs._options.max_wallclock_seconds = calculation_set.pop(
        'max_wallclock_seconds', 86400)
    max_memory_kb = calculation_set.pop('max_memory_kb', None)
    if max_memory_kb:
示例#25
0
if not is_dbenv_loaded():
    load_dbenv()

from aiida_yambo.workflows.yamboconvergence import YamboConvergenceWorkflow

try:
    from aiida.orm.data.base import Float, Str, NumericType, BaseType, List
    from aiida.work.run import run, submit
except ImportError:
    from aiida.workflows2.db_types import Float, Str, NumericType, SimpleData, Bool
    from aiida.workflows2.db_types import SimpleData as BaseType
    from aiida.orm.data.simple import SimpleData as SimpleData_
    from aiida.workflows2.run import run

from aiida.orm.utils import DataFactory
ParameterData = DataFactory("parameter")

yambo_parameters = {
    'ppa': True,
    'gw0': True,
    'HF_and_locXC': True,
    'em1d': True,
    'DIP_Threads': 0,
    'BndsRnXp': (1, 16),
    'NGsBlkXp': 1,
    'NGsBlkXp_units': 'RL',
    'PPAPntXp': 20,
    'PPAPntXp_units': 'eV',
    'GbndRnge': (1, 16),
    'GDamping': 0.1,
    'GDamping_units': 'eV',
示例#26
0
    def step_3(self, recheck=False):
        """This calls the YamboConvergenceWorkflow as a subworkflow, converging the  G-cutoff.

        We converge the G-cutoff,  using the converged kpoints from  a preceeding kpoints,
        converged FFT from a preceeding FFT convergence and converged Bands from preceeding bands convergence
        This is  a 1-D convergence performed by a subworkflow.
        """
        nbands = load_node(self.ctx.nscf_calc).out.output_parameters.get_dict(
        )['number_of_bands']
        self.report("Working on W-cutoff ")
        w_cutoff = self.ctx.convergence_settings.dict.start_w_cutoff  #2
        extra = {}
        if 'restart_options_pw' in self.inputs.keys():
            extra['restart_options_pw'] = self.inputs.restart_options_pw
        if 'restart_options_gw' in self.inputs.keys():
            extra['restart_options_gw'] = self.inputs.restart_options_gw
        if 'settings_pw_nscf' in self.inputs.keys():
            extra['settings_pw_nscf'] = self.inputs.settings_pw_nscf
        if 'settings_pw' in self.inputs.keys():
            extra['settings_pw'] = self.inputs.settings_pw
        if 'settings_p2y' in self.inputs.keys():
            extra['settings_pw_p2y'] = self.inputs.settings_p2y
        if 'calculation_set_pw_nscf' in self.inputs.keys():
            extra[
                'calculation_set_pw_nscf'] = self.inputs.calculation_set_pw_nscf
        if 'parent_scf_folder' in self.inputs.keys():
            extra['parent_scf_folder'] = self.inputs.parent_scf_folder
        if 'parent_nscf_folder' in self.inputs.keys():
            extra['parent_nscf_folder'] = self.inputs.parent_nscf_folder
        if 'structure' in self.inputs.keys():
            extra['structure'] = self.inputs.structure
        if self.ctx.last_step == 'step_2_2':
            if self.ctx.last_used_band <= self.ctx.step2_res.out.convergence.get_dict(
            )['parameters']['BndsRnXp'][-1]:
                self.report(
                    "bands input is similar to that used in a previous cut-off convergence, consistency achieved"
                )
                self.ctx.step_3_done = True
                self.ctx.bands_n_cutoff_consistent = True
                return
            self.report("passing parameters from  re-converged bands ")
            extra['parameters'] = ParameterData(
                dict=self.ctx.step2_res.out.convergence.get_dict()
                ['parameters'])
            #w_cutoff =  self.ctx.last_used_cutoff  # start  from last used value. ## BUG?
            #w_cutoff =  int(self.ctx.last_used_cutoff*0.7)
            w_cutoff = int(self.ctx.step3_res.out.convergence.get_dict()
                           ['parameters']['NGsBlkXp'])
        if self.ctx.last_step == 'step_2_1':
            self.report("passing parameters from  converged bands ")
            # use cut-off from 2_1
            extra['parameters'] = ParameterData(
                dict=self.ctx.step2_res.out.convergence.get_dict()
                ['parameters'])
            #self.ctx.last_used_band = self.ctx.step2_res.out.convergence.get_dict()['parameters']['BndsRnXp'][-1]
        self.ctx.last_used_band = self.ctx.step2_res.out.convergence.get_dict(
        )['parameters']['BndsRnXp'][-1]
        self.report("Bands in last  bands convergence:  {}".format(
            self.ctx.last_used_band))
        convergence_parameters = DataFactory('parameter')(
            dict={
                'variable_to_converge': 'W_cutoff',
                'conv_tol': float(self.inputs.threshold),
                'start_value': w_cutoff,
                'step': 1,  # w_cutoff
                'max_value': self.ctx.convergence_settings.dict.max_w_cutoff
            })  #self.ctx.MAX_B_VAL
        self.report("converging 1-D  W-off")
        p2y_result = submit(YamboConvergenceWorkflow,
                            pwcode=self.inputs.pwcode,
                            precode=self.inputs.precode,
                            yambocode=self.inputs.yambocode,
                            calculation_set=self.inputs.calculation_set,
                            calculation_set_pw=self.inputs.calculation_set_pw,
                            parent_nscf_folder=load_node(
                                self.ctx.nscf_calc).out.remote_folder,
                            pseudo=self.inputs.pseudo,
                            convergence_parameters=convergence_parameters,
                            **extra)
        self.report("Submitted  W-cut off Workflow  ")
        reslt = {}
        if not recheck:
            reslt['step3_1_res'] = p2y_result
            self.ctx.last_step = 'step_3_1'
            self.ctx.first_runs3 = False
        else:
            reslt['step3_2_res'] = p2y_result
            self.ctx.last_step = 'step_3_2'
            self.ctx.step_3_done = True
            # if this  differs from that of step3_1_res
        self.ctx.step3_res_ = p2y_result
示例#27
0
from aiida.backends.utils import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
    load_dbenv()

from aiida_yambo.workflows.gwconvergence import YamboFullConvergenceWorkflow
from aiida.orm.data.base import Float, Str, NumericType, BaseType, List
from aiida.work.run import run, submit
from aiida.orm.utils import DataFactory
ParameterData = DataFactory("parameter")
StructureData = DataFactory('structure')

from ase.spacegroup import crystal
a = 5.388
cell = crystal('Si', [(0, 0, 0)],
               spacegroup=227,
               cellpar=[a, a, a, 90, 90, 90],
               primitive_cell=True)
struc = StructureData(ase=cell)

struc.store()

calculation_set_yambo = {
    'resources': {
        "num_machines": 1,
        "num_mpiprocs_per_machine": 8
    },
    'max_wallclock_seconds': 60 * 60 * 6,
    'max_memory_kb': 1 * 80 *
    1000000,  #REMOVE 'custom_scheduler_commands': u"#PBS -A  Pra14_3622\n",
    "queue_name": "s3par8c",
    'environment_variables': {
示例#28
0
    def start_workflow(self):
        self.ctx.pw_wf_res = DataFactory('parameter')(dict={})
        self.ctx.yambo_res = DataFactory('parameter')(dict={})
        self.ctx.last_step_pw_wf = None
        self.ctx.last_step_kind = None
        self.ctx.can_cont = 0
        self.ctx.yambo_pks = []
        self.ctx.pw_pks = []
        self.ctx.done = False
        if 'parent_folder' in self.inputs.keys():
            parent_calc = self.inputs.parent_folder.get_inputs_dict(
                link_type=LinkType.CREATE)['remote_folder']
            if isinstance(parent_calc, YamboCalculation):
                self.ctx.last_step_kind = 'yambo'
                self.ctx.yambo_res = DataFactory('parameter')(dict={
                    "out": {
                        "gw": {
                            "yambo_pk": parent_calc.pk,
                            "success": True
                        }
                    }
                })
                self.report(
                    "Yambo calculation (pk {}) found in input, I will start from there."
                    .format(parent_calc.pk))
            elif isinstance(parent_calc, PwCalculation):
                self.ctx.last_step_kind = 'pw'
                self.ctx.pw_wf_res = None
                self.report(
                    "PW calculation (pk {}) found in input, I will start from there."
                    .format(parent_calc.pk))
            else:
                self.ctx.pw_wf_res = None
                self.report(
                    "No PW or Yambo calculation found in input, I will start from scratch."
                )
        if 'previous_yambo_workchain' in self.inputs.keys():
            self.report(
                'WARNING: previous_yambo_workchain option should be used in DEBUG mode only!'
            )
            wf_outs = load_node(int(str(self.inputs.previous_yambo_workchain)))
            self.ctx.pw_wf_res = wf_outs  # has both gw and pw outputs in one
            self.ctx.yambo_res = wf_outs
            if 'scf_remote_folder' in wf_outs.get_outputs_dict().keys():
                scf_calc = wf_outs.out.scf_remote_folder.get_inputs_dict(
                    link_type=LinkType.CREATE)['remote_folder']
                if scf_calc.get_state() != u'FINISHED':
                    self.ctx.last_step_kind = 'pw'
                    del self.ctx['pw_wf_res']
            if 'nscf_remote_folder' in wf_outs.get_outputs_dict().keys():
                nscf_calc = wf_outs.out.scf_remote_folder.get_inputs_dict(
                    link_type=LinkType.CREATE)['remote_folder']
                if nscf_calc.get_state() != u'FINISHED':
                    self.ctx.last_step_kind = 'pw'
                    del self.ctx['pw_wf_res']
            if 'yambo_remote_folder' in wf_outs.get_outputs_dict().keys():
                parent_calc = wf_outs.out.yambo_remote_folder.get_inputs_dict(
                    link_type=LinkType.CREATE)['remote_folder']
                print(parent_calc.inp.settings.get_dict())
                init_calc = parent_calc.inp.settings.get_dict().pop(
                    'INITIALISE', False)
                if init_calc and parent_calc.get_state(
                ) == u'FINISHED':  # Finished P2Y
                    self.ctx.last_step_kind = 'yambo_p2y'
                elif init_calc == False and parent_calc.get_state(
                ) != u'FINISHED':  #  Unfinished QP
                    self.ctx.last_step_kind = 'yambo'
                elif init_calc == False and parent_calc.get_state(
                ) == u'FINISHED':  #  Finished QP?
                    self.ctx.last_step_kind = 'yambo'
                else:  # unifinished P2Y?
                    self.ctx.last_step_kind = 'pw'

            self.report("DEBUG: workchain {} loaded".format(
                self.ctx.yambo_res))

        try:
            self.ctx.bands_groupname = self.inputs.bands_groupname
            self.report("GW bands will be added to the group {}".format(
                self.inputs.bands_groupname))
        except AttributeError:
            self.ctx.bands_groupname = None
        self.ctx.parameters_yambo = self.inputs.parameters_yambo

        self.report(" workflow initilization step completed.")
示例#29
0
from aiida.orm.code import Code
from aiida.orm.utils import CalculationFactory, DataFactory
from aiida.work.workchain import WorkChain, ToContext, Outputs, while_
from aiida.work.run import submit
from .dftutilities import default_options, empty_pd
from copy import deepcopy

# data objects
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
RemoteData = DataFactory('remote')

# workchains
from aiida_cp2k.workflows import Cp2kDftBaseWorkChain
from aiida_cp2k.workflows import Cp2kGeoOptWorkChain
from aiida_cp2k.workflows import Cp2kCellOptWorkChain
from aiida_cp2k.workflows import Cp2kMdWorkChain


class Cp2kRobustGeoOptWorkChain(WorkChain):
    """Robust workflow that tries to optimize geometry combining molecular dynamics, cell optimization, and standard
    geometry optimization. It is under development!"""
    @classmethod
    def define(cls, spec):
        super(Cp2kRobustGeoOptWorkChain, cls).define(spec)

        # specify the inputs of the workchain
        spec.input('code', valid_type=Code)
        spec.input('structure', valid_type=StructureData)
        spec.input('parameters', valid_type=ParameterData, default=empty_pd)
        spec.input('_options',
示例#30
0
if not is_dbenv_loaded():
    load_dbenv()

from aiida_yambo.workflows.yamboconvergence import YamboConvergenceWorkflow

try:
    from aiida.orm.data.base import Float, Str, NumericType, BaseType, List
    from aiida.work.run import run, submit
except ImportError:
    from aiida.workflows2.db_types import Float, Str, NumericType, SimpleData, Bool
    from aiida.workflows2.db_types import SimpleData as BaseType
    from aiida.orm.data.simple import SimpleData as SimpleData_
    from aiida.workflows2.run import run

from aiida.orm.utils import DataFactory
ParameterData = DataFactory("parameter")

StructureData = DataFactory('structure')

cell = [
    [4.2262023163, 0.0000000000, 0.0000000000],
    [0.0000000000, 4.2262023163, 0.0000000000],
    [0.0000000000, 0.0000000000, 2.7009939524],
]
struc = StructureData(cell=cell)
struc.append_atom(position=(1.2610450495, 1.2610450495, 0.0000000000),
                  symbols='O')
struc.append_atom(position=(0.8520622471, 3.3741400691, 1.3504969762),
                  symbols='O')
struc.append_atom(position=(2.9651572668, 2.9651572668, 0.0000000000),
                  symbols='O')