Example #1
0
    def test_tick(self):
        registry = ProcessRegistry()

        rinfo = submit(ProcessEventsTester, _jobs_store=self.storage)
        # Tick the engine a number of times or until there is no more work
        i = 0
        while daemon.tick_workflow_engine(self.storage,
                                          print_exceptions=False):
            self.assertLess(i, 10, "Engine not done after 10 ticks")
            i += 1
        self.assertTrue(registry.has_finished(rinfo.pid))
Example #2
0
    def test_create_fail(self):
        registry = ProcessRegistry()

        dp_rinfo = submit(DummyProcess, _jobs_store=self.storage)
        fail_rinfo = submit(FailCreateFromSavedStateProcess,
                            _jobs_store=self.storage)

        # Tick the engine a number of times or until there is no more work
        i = 0
        while daemon.tick_workflow_engine(self.storage,
                                          print_exceptions=False):
            self.assertLess(i, 10, "Engine not done after 10 ticks")
            i += 1

        self.assertTrue(registry.has_finished(dp_rinfo.pid))
        self.assertTrue(registry.has_finished(fail_rinfo.pid))
Example #3
0
    def start_up(self):
        """
        init context and some parameters
        """

        #print('started delta workflow version {}'.format(self._workflowversion))
        #print("Workchain node identifiers: {}".format(ProcessRegistry().current_calc_node))
        self.report('started delta workflow version {} with idenifier: {}'
                    ''.format(self._workflowversion, ProcessRegistry().current_calc_node))

        # init
        self.ctx.calcs_to_run = []
        # input  check

        # check if right codes
        wf_dict = self.inputs.wf_parameters.get_dict()
        self.ctx.inputs_eos = {
            'fleur': self.inputs.fleur,
            'inpgen': self.inputs.inpgen,
            'wf_parameters':
                {'points' : wf_dict.get('points', 5),
                 'step' : wf_dict.get('step', 0.02),
                 'guess' : 1.0,
                 'resources' : wf_dict.get('resources', {"num_machines": 1}),
                 'walltime_sec':  wf_dict.get('walltime_sec', 3600),
                 'queue_name' : wf_dict.get('queue_name', ''),
                 'serial' : wf_dict.get('serial', False)
             }}
        self.ctx.wc_eos_para = ParameterData(dict=self.ctx.inputs_eos.get('wf_parameters'))
        self.get_calcs_from_groups()
        self.ctx.successful = True
        self.ctx.warnings = []
        self.ctx.labels = []
Example #4
0
    def start(self):
        '''
        check parameters, what condictions? complete?
        check input nodes
        '''
        ### input check ### ? or done automaticly, how optional?
        # check if fleuinp corresponds to fleur_calc
        print('started bands workflow version {}'.format(self._workflowversion))
        print("Workchain node identifiers: {}"
              "".format(ProcessRegistry().current_calc_node))

        self.ctx.fleurinp1 = ""
        self.ctx.last_calc = None
        self.ctx.successful = False
        self.ctx.warnings = []

        wf_dict = self.inputs.wf_parameters.get_dict()

        # if MPI in code name, execute parallel
        self.ctx.serial = wf_dict.get('serial', False)

        # set values, or defaults
        self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)
        self.ctx.resources = wf_dict.get('resources', {"num_machines": 1})
        self.ctx.walltime_sec = wf_dict.get('walltime_sec', 10*30)
        self.ctx.queue = wf_dict.get('queue_name', None)
Example #5
0
    def start(self):
        """
        init context and some parameters
        """

        print('started convergence workflow version {}'.format(
            self._workflowversion))
        print("Workchain node identifiers: {}".format(
            ProcessRegistry().current_calc_node))

        # init
        self.ctx.last_calc = None
        self.ctx.loop_count = 0
        self.ctx.calcs = []
        self.ctx.successful = False
        self.ctx.distance = []
        self.ctx.total_energy = []
        self.energydiff = 10000
        self.ctx.warnings = []
        self.ctx.errors = []
        self.ctx.fleurinp = None
        wf_dict = self.inputs.wf_parameters.get_dict()

        if wf_dict == {}:
            wf_dict = self._wf_default

        # if MPI in code name, execute parallel
        self.ctx.serial = wf_dict.get('serial', False)  #True

        # set values, or defaults
        self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)
        self.ctx.resources = wf_dict.get('resources', {"num_machines": 1})
        self.ctx.walltime_sec = wf_dict.get('walltime_sec', 60 * 60)
        self.ctx.queue = wf_dict.get('queue_name', '')
Example #6
0
    def start(self):
        """
        check parameters, what condictions? complete?
        check input nodes
        """
        self.report('started eos workflow version {}'.format(self._workflowversion))
        self.report("Workchain node identifiers: {}".format(ProcessRegistry().current_calc_node))
        #print('started eos workflow version {}'.format(self._workflowversion))
        #print("Workchain node identifiers: {}".format(ProcessRegistry().current_calc_node))
        ### input check ### ? or done automaticly, how optional?
        self.ctx.last_calc2 = None
        self.ctx.calcs = []
        self.ctx.calcs_future = []
        self.ctx.structures = []
        self.ctx.temp_calc = None
        self.ctx.structurs_uuids = []
        self.ctx.scalelist = []
        self.ctx.volume = []
        self.ctx.volume_peratom = []
        self.ctx.org_volume = -1# avoid div 0
        self.ctx.labels = []
        self.ctx.successful = True#False
        # TODO get all succesfull from convergence, if all True this


        wf_dict = self.inputs.wf_parameters.get_dict()
        # set values, or defaults, default: always converge charge density,
        # crit < 0.00002, max 4 fleur runs

        self.ctx.points = wf_dict.get('points', 9)
        self.ctx.step = wf_dict.get('step', 0.002)
        self.ctx.guess = wf_dict.get('guess', 1.00)
        self.ctx.serial = wf_dict.get('serial', False)#True
        self.ctx.custom_scheduler_commands = wf_dict.get('custom_scheduler_commands', '')
        self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)

        inputs = self.inputs

        if 'inpgen' in inputs:
            try:
                test_and_get_codenode(inputs.inpgen, 'fleur.inpgen', use_exceptions=True)
            except ValueError:
                error = ("The code you provided for inpgen of FLEUR does not "
                         "use the plugin fleur.inpgen")
                #self.control_end_wc(error)
                print(error)
                self.abort(error)

        if 'fleur' in inputs:
            try:
                test_and_get_codenode(inputs.fleur, 'fleur.fleur', use_exceptions=True)
            except ValueError:
                error = ("The code you provided for FLEUR does not "
                         "use the plugin fleur.fleur")
                #self.control_end_wc(error)
                #print(error)
                self.abort(error)
Example #7
0
 def init(self):
     print "Workchain node identifiers: {}".format(
         ProcessRegistry().current_calc_node)
     print "Run Ape with guess radii, just to get nodes and picks"
     apein = generate_ApeCalculation('apelocal', self.inputs.element, 2, 2,
                                     2, 2)
     self.ctx.Vo2, self.ctx.Bo2, self.ctx.Bp2 = get_reference(
         self.inputs.element)
     future = submit(ApeCalculation.process(), **apein)
     return ToContext(result=future)
Example #8
0
 def start(self):
     '''
     check parameters, what condictions? complete?
     check input nodes
     '''
     ### input check ### ? or done automaticly, how optional?
     # check if fleuinp corresponds to fleur_calc
     print('started bands workflow version {}'.format(self._workflowversion))
     print("Workchain node identifiers: {}"
           "".format(ProcessRegistry().current_calc_node))
Example #9
0
def run_wf():
  print "Workfunction node identifiers: {}".format(ProcessRegistry().current_calc_node)
  #Instantiate a JobCalc process and create basic structure
  JobCalc = SiestaCalculation.process()
  s0 = create_structure()
  calcs = {}
  for label, factor in zip(labels, scale_facs):
    s = rescale(s0,Float(factor))
    inputs = geninputs(s)
    print "Running a scf for Si with scale factor {}".format(factor)
    # result = run(JobCalc,**inputs)
    result = async(JobCalc,**inputs)
Example #10
0
    def start(self):
        """
        check parameters, what condictions? complete?
        check input nodes
        """
        self.report(
            'started fleur_optimize_parameter workflow version {}'.format(
                self._workflowversion))
        self.report("Workchain node identifiers: {}".format(
            ProcessRegistry().current_calc_node))

        ### input check ###

        # initialize contexts

        self.ctx.successful = True
        # Check on inputnodes

        inputs = self.inputs

        # wf_parameters:
        wf_dict = inputs.wf_parameters.get_dict()

        # set values, or DEFAULTS
        self.ctx.serial = wf_dict.get('serial', False)
        self.ctx.custom_scheduler_commands = wf_dict.get(
            'custom_scheduler_commands', '')
        self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)

        # codes
        if 'inpgen' in inputs:
            try:
                test_and_get_codenode(inputs.inpgen,
                                      'fleur.inpgen',
                                      use_exceptions=True)
            except ValueError:
                error = ("The code you provided for inpgen of FLEUR does not "
                         "use the plugin fleur.inpgen")
                self.control_end_wc(error)
                self.abort(error)

        if 'fleur' in inputs:
            try:
                test_and_get_codenode(inputs.fleur,
                                      'fleur.fleur',
                                      use_exceptions=True)
            except ValueError:
                error = ("The code you provided for FLEUR does not "
                         "use the plugin fleur.fleur")
                self.control_end_wc(error)
                self.abort(error)
Example #11
0
    def start(self):
        """
        init context and some parameters
        """
        self.report('INFO: started convergence workflow version {}\n'
                    'INFO: Workchain node identifiers: {}'
                    ''.format(self._workflowversion,
                              ProcessRegistry().current_calc_node))

        ####### init    #######

        # internal para /control para
        self.ctx.last_calc = None
        self.ctx.loop_count = 0
        self.ctx.calcs = []
        self.ctx.abort = False

        # input para
        wf_dict = self.inputs.wf_parameters.get_dict()

        if wf_dict == {}:
            wf_dict = self._wf_default

        self.ctx.serial = wf_dict.get('serial', False)

        # set values, or defaults
        self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)
        self.ctx.resources = wf_dict.get('resources', {"num_machines": 1})
        self.ctx.walltime_sec = wf_dict.get('walltime_sec', 60 * 60)
        self.ctx.queue = wf_dict.get('queue_name', '')
        self.ctx.custom_scheduler_commands = wf_dict.get(
            'custom_scheduler_commands', '')
        self.ctx.description_wf = self.inputs.get('_description',
                                                  '') + '|fleur_scf_wc|'
        self.ctx.label_wf = self.inputs.get('_label', 'fleur_scf_wc')

        # return para/vars
        self.ctx.successful = True
        self.ctx.distance = []
        self.ctx.total_energy = []
        self.ctx.energydiff = 10000
        self.ctx.warnings = []
        self.ctx.errors = []
        self.ctx.fleurinp = None
        self.ctx.formula = ''
Example #12
0
    def start(self):
        '''
        check parameters, what condictions? complete?
        check input nodes
        '''
        ### input check ### ? or done automaticly, how optional?
        # check if fleuinp corresponds to fleur_calc
        print('started dos workflow version {}'.format(self._workflowversion))
        print("Workchain node identifiers: {}"
              "".format(ProcessRegistry().current_calc_node))

        self.ctx.fleurinp1 = ""
        self.ctx.last_calc = None
        self.ctx.successful = False
        self.ctx.warnings = []

        wf_dict = self.inputs.wf_parameters.get_dict()

        # if MPI in code name, execute parallel
        self.ctx.serial = wf_dict.get('serial', False)

        # set values, or defaults
        self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)
        self.ctx.resources = wf_dict.get('resources', {"num_machines": 1})
        self.ctx.walltime_sec = wf_dict.get('walltime_sec', 10 * 60)
        self.ctx.queue = wf_dict.get('queue_name', None)

        inputs = self.inputs

        if 'fleur' in inputs:
            try:
                test_and_get_codenode(inputs.fleur,
                                      'fleur.fleur',
                                      use_exceptions=True)
            except ValueError:
                error = ("The code you provided for FLEUR does not "
                         "use the plugin fleur.fleur")
                #self.control_end_wc(error)
                print(error)
                self.abort()
Example #13
0
    def run_pw(self):
        print "Workchain node identifiers: {}".format(
            ProcessRegistry().current_calc_node)
        #Instantiate a JobCalc process and create basic structure
        JobCalc = PwCalculation.process()
        self.ctx.s0 = structure_init(Str(self.inputs.element))
        self.ctx.eos_names = []

        calcs = {}
        for label, factor in zip(labels, scale_facs):
            s = rescale(self.ctx.s0, Float(factor))
            inputs = generate_scf_input_params(s, str(self.inputs.code),
                                               self.inputs.pseudo,
                                               str(self.inputs.element))
            print "Running a scf for {} with scale factor {}".format(
                self.inputs.element, factor)
            future = submit(JobCalc, **inputs)
            calcs[label] = Outputs(future)

        # Ask the workflow to continue when the results are ready and store them
        # in the context
        return ToContext(**calcs)  #Here it waits
    def run_wf():
        # print "Workfunction node identifiers: {}".format(ProcessRegistry().current_calc_node)
        wcalc_uuid = ProcessRegistry().current_calc_node.uuid
        print "Workfunction node: {}".format(wcalc_uuid)
        #Instantiate a JobCalc process and create basic structure
        JobCalc = SiestaCalculation.process()
        s0 = create_structure()
        calcs = {}
        for label, factor in zip(labels, scale_facs):
            s = rescale(s0, Float(factor))
            inputs = geninputs(s)
            print "Running a scf for Si with scale factor {}".format(factor)
            result = run(JobCalc, **inputs)
            calcs[label] = get_info(result, s)

        eos = []
        for label in labels:
            eos.append(calcs[label])

        retdict = {'result': ParameterData(dict={'eos_data': eos})}

        return retdict
Example #15
0
 def start(self):
     """
     check parameters, what condictions? complete?
     check input nodes
     """
     print('started eos workflow version {}'.format(self._workflowversion))
     print("Workchain node identifiers: {}".format(
         ProcessRegistry().current_calc_node))
     ### input check ### ? or done automaticly, how optional?
     self.ctx.last_calc2 = None
     self.ctx.calcs = []
     self.ctx.structures = []
     self.ctx.structurs_uuids = []
     self.ctx.scalelist = []
     self.ctx.volume = []
     self.ctx.successful = True  #False # TODO get all succesfull from convergence, if all True this
     wf_dict = self.inputs.wf_parameters.get_dict()
     self.ctx.points = wf_dict.get('points', 2)  #9
     self.ctx.step = wf_dict.get('step', 0.002)
     self.ctx.guess = wf_dict.get('guess', 1.00)
     # set values, or defaults, default: always converge charge density, crit < 0.00002, max 4 fleur runs
     self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)
Example #16
0
    def check_input(self):
        """
        Init same context and check what input is given if it makes sence
        """
        ### input check ### ? or done automaticly, how optional?

        msg = ("INFO: Started inital_state_CLS workflow version {} "
               "Workchain node identifiers: {}"
               "".format(self._workflowversion,
                         ProcessRegistry().current_calc_node))
        self.report(msg)

        # init
        self.ctx.last_calc = None
        self.ctx.eximated_jobs = 0
        self.ctx.run_jobs = 0
        self.ctx.calcs_res = []
        self.ctx.labels = []
        self.ctx.ref_labels = []
        self.ctx.calcs_torun = []
        self.ctx.ref_calcs_torun = []
        self.ctx.ref_calcs_res = []
        self.ctx.struc_to_relax = []
        self.ctx.successful = False
        self.ctx.warnings = []
        self.ctx.errors = []
        self.ctx.ref = {}
        self.ctx.calculate_formation_energy = True

        #Style: {atomtype : listof all corelevel, atomtype_coresetup... }
        #ie: { 'W-1' : [shift_1s, ... shift 7/2 4f],
        #      'W-1_coreconfig' : ['1s','2s',...],
        #      'W-2' : [...], 'Be-1': [], ...} #all in eV!
        self.ctx.CLS = {}
        self.ctx.cl_energies = {}  # same style as CLS only energy <-> shift
        self.ctx.ref_cl_energies = {}
        #Style: {'Compound' : energy, 'ref_x' : energy , ...}
        #i.e {'Be12W' : 0.0, 'Be' : 0.104*htr_eV , 'W' : 0.12*htr_eV} # all in eV!
        self.ctx.fermi_energies = {}
        self.ctx.bandgaps = {}
        self.ctx.atomtypes = {}
        # set values, or defaults for Wf_para
        wf_dict = self.inputs.wf_parameters.get_dict()
        default = self._default_wf_para

        self.ctx.serial = wf_dict.get('serial', default.get('serial'))
        self.ctx.same_para = wf_dict.get('same_para', default.get('same_para'))
        self.ctx.scf_para = wf_dict.get('scf_para', default.get('scf_para'))
        self.ctx.relax = wf_dict.get('relax', default.get('relax'))
        self.ctx.relax_mode = wf_dict.get('relax_mode',
                                          default.get('relax_mode'))
        self.ctx.relax_para = wf_dict.get('relax_para',
                                          default.get('dos_para'))
        self.ctx.resources = wf_dict.get('resources', default.get('resources'))
        self.ctx.walltime_sec = wf_dict.get('walltime_sec',
                                            default.get('walltime_sec'))
        self.ctx.queue = wf_dict.get('queue_name', default.get('queue_name'))
        self.ctx.custom_scheduler_commands = wf_dict.get(
            'custom_scheduler_commands', '')
        # check if inputs given make sense # TODO sort this out in common wc
        inputs = self.inputs
        if 'fleurinp' in inputs:
            #TODO make a check if an extracted structure exists, since get_structuredata is wf
            structure = inputs.fleurinp.get_structuredata(inputs.fleurinp)
            self.ctx.elements = list(structure.get_composition().keys())
            self.ctx.calcs_torun.append(inputs.get('fleurinp'))
            #print('here1')
            if 'structure' in inputs:
                warning = 'WARNING: Ignoring Structure input, because Fleurinp was given'
                self.ctx.warnings.append(warning)
                self.report(warning)
            if 'calc_parameters' in inputs:
                warning = 'WARNING: Ignoring parameter input, because Fleurinp was given'
                self.ctx.warnings.append(warning)
                self.report(warning)
        elif 'structure' in inputs:
            self.ctx.elements = list(inputs.structure.get_composition().keys())
            #self.ctx.elements = list(s.get_symbols_set())
            if 'inpgen' not in inputs:
                error = 'ERROR: StructureData was provided, but no inpgen code was provided'
                self.ctx.errors.append(error)
                self.abort_nowait(error)
            if 'calc_parameters' in inputs:
                self.ctx.calcs_torun.append(
                    [inputs.get('structure'),
                     inputs.get('calc_parameters')])
                #print('here2')
            else:
                self.ctx.calcs_torun.append(inputs.get('structure'))
                #print('here3')
        else:
            error = 'ERROR: No StructureData nor FleurinpData was provided'
            #print(error)
            self.ctx.errors.append(error)
            self.abort_nowait(error)
        self.report('INFO: elements in structure: {}'.format(
            self.ctx.elements))
Example #17
0
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved.                     #
# This file is part of the AiiDA code.                                    #
#                                                                         #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file        #
# For further information please visit http://www.aiida.net               #
###########################################################################
from plum.engine.serial import SerialEngine
import plum.class_loader
from plum.engine.parallel import MultithreadedEngine

import plum.in_memory_database
import plum.knowledge_provider
import plum.knowledge_base
from aiida.work.class_loader import ClassLoader
from aiida.work.process_registry import ProcessRegistry

_kb = plum.knowledge_base.KnowledgeBase()
#_kb.add_provider(
#    plum.in_memory_database.InMemoryDatabase(
#        retain_inputs=False, retain_outputs=False))
_kb.add_provider(ProcessRegistry())
plum.knowledge_provider.set_global_provider(_kb)

# Have globals that can be used by all of AiiDA
class_loader = plum.class_loader.ClassLoader(ClassLoader())
registry = _kb
parallel_engine = MultithreadedEngine()
serial_engine = SerialEngine()
Example #18
0
    def start(self):
        """
        init context and some parameters
        """
        self.report('INFO: started VoroStart workflow version {}\n'
                    'INFO: Workchain node identifiers: {}'
                    ''.format(self._workflowversion,
                              ProcessRegistry().current_calc_node))

        ####### init    #######

        # internal para /control para
        self.ctx.abort = False

        # input para
        wf_dict = self.inputs.wf_parameters.get_dict()

        #TODO: check for completeness
        if wf_dict == {}:
            wf_dict = self._wf_default
            self.report('INFO: using default wf parameter')

        # set values, or defaults
        self.ctx.use_mpi = wf_dict.get('use_mpi', self._wf_default['use_mpi'])
        self.ctx.resources = wf_dict.get('resources',
                                         self._wf_default['resources'])
        self.ctx.walltime_sec = wf_dict.get('walltime_sec',
                                            self._wf_default['walltime_sec'])
        self.ctx.queue = wf_dict.get('queue_name',
                                     self._wf_default['queue_name'])
        self.ctx.custom_scheduler_commands = wf_dict.get(
            'custom_scheduler_commands',
            self._wf_default['custom_scheduler_commands'])

        self.ctx.dos_params_dict = wf_dict.get('dos_params',
                                               self._wf_default['dos_params'])

        self.ctx.description_wf = self.inputs.get('_description',
                                                  self._wf_description)
        self.ctx.label_wf = self.inputs.get('_label', self._wf_label)

        # iterative rerunning parameters
        self.ctx.iter = 0
        self.ctx.Nrerun = wf_dict.get('num_rerun',
                                      self._wf_default['num_rerun'])

        # initialize checking booleans
        self.ctx.is_starting_iter = True
        self.ctx.doscheck_ok = False
        self.ctx.voro_ok = False
        self.ctx.check_dos = wf_dict.get('check_dos',
                                         self._wf_default['check_dos'])
        self.ctx.dos_check_fail_reason = None

        # some physical parameters that are reused
        self.ctx.r_cls = wf_dict.get('r_cls', self._wf_default['r_cls'])
        self.ctx.nclsmin = wf_dict.get('natom_in_cls_min',
                                       self._wf_default['natom_in_cls_min'])
        self.ctx.fac_clsincrease = wf_dict.get(
            'fac_cls_increase', self._wf_default['fac_cls_increase'])
        self.ctx.efermi = None

        # difference in eV to emin (e_fermi) if emin (emax) are larger (smaller) than emin (e_fermi)
        self.ctx.delta_e = wf_dict.get('delta_e_min',
                                       self._wf_default['delta_e_min'])
        # threshold for dos comparison (comparison of dos at emin)
        self.ctx.threshold_dos_zero = wf_dict.get(
            'threshold_dos_zero', self._wf_default['threshold_dos_zero'])
        self.ctx.min_dist_core_states = wf_dict.get(
            'delta_e_min_core_states',
            self._wf_default['delta_e_min_core_states'])

        #TODO add missing info
        # print the inputs
        self.report(
            'INFO: use the following parameter:\n'
            'use_mpi: {}\n'
            'Resources: {}\n'
            'Walltime (s): {}\n'
            'queue name: {}\n'
            'scheduler command: {}\n'
            'description: {}\n'
            'label: {}\n'
            'dos_params: {}\n'
            'Max. number of voronoi reruns: {}\n'
            'factor cluster increase: {}\n'
            'default cluster radius (in alat): {}\n'
            'min. number of atoms in screening cls: {}\n'
            'min. dist in DOS contour to emin/emax: {} eV\n'
            'threshold where DOS is zero: {} states/eV\n'
            'minimal distance of highest core state from EMIN: {} Ry\n'.format(
                self.ctx.use_mpi, self.ctx.resources, self.ctx.walltime_sec,
                self.ctx.queue, self.ctx.custom_scheduler_commands,
                self.ctx.description_wf, self.ctx.label_wf,
                self.ctx.dos_params_dict, self.ctx.Nrerun,
                self.ctx.fac_clsincrease, self.ctx.r_cls, self.ctx.nclsmin,
                self.ctx.delta_e, self.ctx.threshold_dos_zero,
                self.ctx.min_dist_core_states))

        # return para/vars
        self.ctx.successful = True
        self.ctx.errors = []
        self.ctx.formula = ''

        # get kkr and voronoi codes from input
        try:
            test_and_get_codenode(self.inputs.kkr,
                                  'kkr.kkr',
                                  use_exceptions=True)
        except ValueError:
            error = ("The code you provided for kkr does not "
                     "use the plugin kkr.kkr")
            self.ctx.errors.append(error)
            self.control_end_wc(error)
        try:
            test_and_get_codenode(self.inputs.voronoi,
                                  'kkr.voro',
                                  use_exceptions=True)
        except ValueError:
            error = ("The code you provided for voronoi does not "
                     "use the plugin kkr.voro")
            self.ctx.errors.append(error)
            self.control_end_wc(error)
Example #19
0
    def start(self):
        """
        init context and some parameters
        """
        self.report('INFO: started KKR dos workflow version {}\n'
                    'INFO: Workchain node identifiers: {}'
                    ''.format(self._workflowversion,
                              ProcessRegistry().current_calc_node))

        ####### init    #######

        # internal para /control para
        self.ctx.abort = False

        # input para
        wf_dict = self.inputs.wf_parameters.get_dict()

        #TODO: check for completeness
        if wf_dict == {}:
            wf_dict = self._wf_default
            self.report('INFO: using default wf parameter')

        # set values, or defaults
        self.ctx.use_mpi = wf_dict.get('use_mpi', self._wf_default['use_mpi'])
        self.ctx.resources = wf_dict.get('resources',
                                         self._wf_default['resources'])
        self.ctx.walltime_sec = wf_dict.get('walltime_sec',
                                            self._wf_default['walltime_sec'])
        self.ctx.queue = wf_dict.get('queue_name',
                                     self._wf_default['queue_name'])
        self.ctx.custom_scheduler_commands = wf_dict.get(
            'custom_scheduler_commands',
            self._wf_default['custom_scheduler_commands'])

        self.ctx.dos_params_dict = wf_dict.get('dos_params',
                                               self._wf_default['dos_params'])
        self.ctx.dos_kkrparams = None  # is set in set_params_dos

        self.ctx.description_wf = self.inputs.get('_description',
                                                  self._wf_description)
        self.ctx.label_wf = self.inputs.get('_label', self._wf_label)

        self.report('INFO: use the following parameter:\n'
                    'use_mpi: {}\n'
                    'Resources: {}\n'
                    'Walltime (s): {}\n'
                    'queue name: {}\n'
                    'scheduler command: {}\n'
                    'description: {}\n'
                    'label: {}\n'
                    'dos_params: {}\n'.format(
                        self.ctx.use_mpi, self.ctx.resources,
                        self.ctx.walltime_sec, self.ctx.queue,
                        self.ctx.custom_scheduler_commands,
                        self.ctx.description_wf, self.ctx.label_wf,
                        self.ctx.dos_params_dict))

        # return para/vars
        self.ctx.successful = True
        self.ctx.errors = []
        self.ctx.formula = ''
Example #20
0
    def start(self):
        """
        check parameters, what condictions? complete?
        check input nodes
        """
        self.report('started simple eos workflow version {}'.format(
            self._workflowversion))
        self.report("Workchain node identifiers: {}".format(
            ProcessRegistry().current_calc_node))

        ### input check ###

        # initialize contexts

        self.ctx.last_calc2 = None
        self.ctx.calcs = []
        self.ctx.calcs_future = []
        self.ctx.structures = []
        self.ctx.temp_calc = None
        self.ctx.structurs_uuids = []
        self.ctx.scalelist = []
        self.ctx.volume = []
        self.ctx.volume_peratom = []
        self.ctx.org_volume = -1  # avoid div 0
        self.ctx.labels = []
        self.ctx.successful = True

        # Check on inputnodes

        inputs = self.inputs

        # wf_parameters:

        wf_dict = inputs.wf_parameters.get_dict()

        # set values, or DEFAULTS
        self.ctx.points = wf_dict.get('points', 9)
        self.ctx.step = wf_dict.get('step', 0.002)
        self.ctx.guess = wf_dict.get('guess', 1.00)
        self.ctx.serial = wf_dict.get('serial', False)
        self.ctx.custom_scheduler_commands = wf_dict.get(
            'custom_scheduler_commands', '')
        self.ctx.max_number_runs = wf_dict.get('fleur_runmax', 4)

        # codes
        if 'inpgen' in inputs:
            try:
                test_and_get_codenode(inputs.inpgen,
                                      'fleur.inpgen',
                                      use_exceptions=True)
            except ValueError:
                error = ("The code you provided for inpgen of FLEUR does not "
                         "use the plugin fleur.inpgen")
                self.control_end_wc(error)
                self.abort(error)

        if 'fleur' in inputs:
            try:
                test_and_get_codenode(inputs.fleur,
                                      'fleur.fleur',
                                      use_exceptions=True)
            except ValueError:
                error = ("The code you provided for FLEUR does not "
                         "use the plugin fleur.fleur")
                self.control_end_wc(error)
                self.abort(error)