def test_run(self): A = Str('A') B = Str('B') C = Str('C') three = Int(3) # Try the if(..) part work.run(Wf, value=A, n=three) # Check the steps that should have been run for step, finished in Wf.finished_steps.iteritems(): if step not in ['s3', 's4', 'isB']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) # Try the elif(..) part finished_steps = work.run(Wf, value=B, n=three) # Check the steps that should have been run for step, finished in finished_steps.iteritems(): if step not in ['isA', 's2', 's4']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) # Try the else... part finished_steps = work.run(Wf, value=C, n=three) # Check the steps that should have been run for step, finished in finished_steps.iteritems(): if step not in ['isA', 's2', 'isB', 's3']: self.assertTrue( finished, "Step {} was not called by workflow".format(step))
def run_async(): yield run_until_paused(process) self.assertTrue(process.paused) process.kill() with self.assertRaises(plumpy.KilledError): work.run(process)
def test_calculation_future_broadcasts(self): runner = utils.create_test_runner(with_communicator=True) proc = work.test_utils.DummyProcess() # No polling future = work.CalculationFuture( pk=proc.pid, poll_interval=None, communicator=runner.communicator) work.run(proc) calc_node = runner.run_until_complete(future) self.assertEqual(proc.calc.pk, calc_node.pk)
def main(): inputs = {'a': Float(3.14), 'b': Int(4), 'c': Int(6)} results = work.run(SumWorkChain, **inputs) print 'Result of SumWorkChain: {}'.format(results) results = work.run(ProductWorkChain, **inputs) print 'Result of ProductWorkChain: {}'.format(results) results = work.run(SumProductWorkChain, **inputs) print 'Result of SumProductWorkChain: {}'.format(results)
def test_calculation_future_polling(self): runner = utils.create_test_runner() proc = work.test_utils.DummyProcess() # No communicator future = work.CalculationFuture( pk=proc.pid, loop=runner.loop, poll_interval=0) work.run(proc) calc_node = runner.run_until_complete(future) self.assertEqual(proc.calc.pk, calc_node.pk)
def run_and_check_success(process_class, **kwargs): """ Instantiates the process class and executes it followed by a check that it is finished successfully :returns: instance of process """ process = process_class(inputs=kwargs) work.run(process) assert process.calc.is_finished_ok is True return process
def test_simple_run(self): """ Run the workchain which should hit the exception and therefore end up in the EXCEPTED state """ process = TestWorkChainAbortChildren.MainWorkChain() with Capturing(): with self.assertRaises(RuntimeError): work.run(process) self.assertEquals(process.calc.is_finished_ok, False) self.assertEquals(process.calc.is_excepted, True) self.assertEquals(process.calc.is_killed, False)
def test_fixed_inputs(self): def wf(a, b, c): return {'a': a, 'b': b, 'c': c} inputs = {'a': Int(4), 'b': Int(5), 'c': Int(6)} function_process_class = work.FunctionProcess.build(wf) self.assertEqual(work.run(function_process_class, **inputs), inputs)
def run_eos_wf(codename, pseudo_family, element): print "Workfunction node identifiers: {}".format(Process.current().calc) s0 = create_diamond_fcc(Str(element)) calcs = {} for label, factor in zip(labels, scale_facs): s = rescale(s0, Float(factor)) inputs = generate_scf_input_params(s, str(codename), Str(pseudo_family)) print "Running a scf for {} with scale factor {}".format( element, factor) result = run(PwCalculation, **inputs) print "RESULT: {}".format(result) calcs[label] = get_info(result) eos = [] for label in labels: eos.append(calcs[label]) # Return information to plot the EOS ParameterData = DataFactory('parameter') retdict = { 'initial_structure': s0, 'result': ParameterData(dict={'eos_data': eos}) } return retdict
def test_base(fresh_aiida_env, vasp_params, potentials, vasp_kpoints, vasp_structure, mock_vasp): """Test submitting only, not correctness, with mocked vasp code.""" from aiida.orm import WorkflowFactory, Code from aiida import work rmq_config = None runner = work.Runner(poll_interval=0., rmq_config=rmq_config, enable_persistence=True) work.set_runner(runner) base_wf_proc = WorkflowFactory('vasp.base') mock_vasp.store() print(mock_vasp.get_remote_exec_path()) comp = mock_vasp.get_computer() create_authinfo(computer=comp).store() # ~ os_env = os.environ.copy() # ~ sp.call(['verdi', 'daemon', 'start'], env=os_env) # ~ print sp.check_output(['verdi', 'daemon', 'status'], env=os_env) # ~ print sp.check_output(['which', 'verdi'], env=os_env) kpoints, _ = vasp_kpoints inputs = AttributeDict() inputs.code = Code.get_from_string('mock-vasp@localhost') inputs.structure = vasp_structure inputs.incar = vasp_params inputs.kpoints = kpoints inputs.potcar_family = get_data_node('str', POTCAR_FAMILY_NAME) inputs.potcar_mapping = get_data_node('parameter', dict=POTCAR_MAP) inputs.options = get_data_node( 'parameter', dict={ 'queue_name': 'None', 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 } }) inputs.max_iterations = get_data_node('int', 1) inputs.settings = get_data_node('parameter', dict={'parser_settings': {'add_structure': False, 'should_parse_CONTCAR': False}}) # ~ workchain = run(base_wf_proc, **inputs) results = work.run(base_wf_proc, **inputs) # ~ workchain = load_node(running.pk) # ~ timeout = 5 # ~ waiting_for = 0 # ~ while not workchain.is_terminated and waiting_for < timeout: # ~ time.sleep(1) # ~ waiting_for += 1 assert 'retrieved' in results assert 'output_parameters' in results assert 'remote_folder' in results
def test_launchers(self): """ Verify that the various launchers are working """ result = run(self.wf_return_true) self.assertTrue(result) result, node = run_get_node(self.wf_return_true) self.assertTrue(result) self.assertEqual(result, get_true_node()) self.assertTrue(isinstance(node, FunctionCalculation)) with self.assertRaises(AssertionError): submit(self.wf_return_true)
def test_nested_expose(self): res = work.run(GrandParentExposeWorkChain, sub=dict(sub=dict( a=Int(1), sub_1={ 'b': Float(2.3), 'c': Bool(True) }, sub_2={ 'b': Float(1.2), 'sub_3': { 'c': Bool(False) } }, ))) self.assertEquals( res, { 'sub.sub.a': Float(2.2), 'sub.sub.sub_1.b': Float(2.3), 'sub.sub.sub_1.c': Bool(True), 'sub.sub.sub_2.b': Float(1.2), 'sub.sub.sub_2.sub_3.c': Bool(False) })
def test_expose(self): res = work.run( ParentExposeWorkChain, a=Int(1), sub_1={ 'b': Float(2.3), 'c': Bool(True) }, sub_2={ 'b': Float(1.2), 'sub_3': { 'c': Bool(False) } }, ) self.assertEquals( res, { 'a': Float(2.2), 'sub_1.b': Float(2.3), 'sub_1.c': Bool(True), 'sub_2.b': Float(1.2), 'sub_2.sub_3.c': Bool(False) })
def test_persisting(self): persister = plumpy.test_utils.TestPersister() runner = work.new_runner(persister=persister) workchain = Wf(runner=runner) work.run(workchain)
def test_run_pointless_workchain(self): """Running the pointless workchain should not incur any exceptions""" work.run(TestWorkChainReturnDict.PointlessWorkChain)
class F1WaitFor(WorkChain): @classmethod def define(cls, spec): super(F1WaitFor, cls).define(spec) spec.dynamic_input() spec.dynamic_output() spec.outline(cls.s1, cls.s2) def s1(self): p2 = async(long_running, a=self.inputs.inp) self.ctx.a = 1 self.ctx.r2 = p2.result() def s2(self): print("a={}".format(self.ctx.a)) print("r2={}".format(self.ctx.r2)) self.out("r1", self.ctx.r2['r2']) if __name__ == '__main__': five = Int(5) r1 = f1(five) run(F1, inp=five) R1 = run(F1WaitFor, inp=five)['r1']
def test_run(self): self.assertTrue(run(simple_wf)['result']) self.assertTrue(run(return_input, get_true_node())['result'])
[ alat, 0., 0., ], [ 0., alat, 0., ], [ 0., 0., alat, ], ] # BaTiO3 cubic structure s = StructureData(cell=cell) s.append_atom(position=(0., 0., 0.), symbols=['Ba']) s.append_atom(position=(alat / 2., alat / 2., alat / 2.), symbols=['Ti']) s.append_atom(position=(alat / 2., alat / 2., 0.), symbols=['O']) s.append_atom(position=(alat / 2., 0., alat / 2.), symbols=['O']) s.append_atom(position=(0., alat / 2., alat / 2.), symbols=['O']) g = Group.create(name="input_group") g.add_nodes(s.store()) w = TestWorkChain run(w, structure=s.store(), code=code)
def test_workfunction_run(self): result = run(add, a=self.a, b=self.b) self.assertEquals(result, self.result)
def test_scf_wc_Cu_simple(self): """ simple Cu noSOC, FP, lmax2 full example using scf workflow """ from aiida.orm import Code, load_node, DataFactory from aiida.work import run from aiida_kkr.tools.kkr_params import kkrparams from aiida_kkr.workflows.kkr_scf import kkr_scf_wc from pprint import pprint from scipy import array ParameterData = DataFactory('parameter') StructureData = DataFactory('structure') alat = 6.83 # in a_Bohr abohr = 0.52917721067 # conversion factor to Angstroem units # bravais vectors bravais = array([[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]]) a = 0.5 * alat * abohr Cu = StructureData(cell=[[a, a, 0.0], [a, 0.0, a], [0.0, a, a]]) Cu.append_atom(position=[0.0, 0.0, 0.0], symbols='Cu') Cu.store() print(Cu) # here we create a parameter node for the workflow input (workflow specific parameter) and adjust the convergence criterion. wfd = kkr_scf_wc.get_wf_defaults() wfd['convergence_criterion'] = 10**-4 wfd['check_dos'] = False wfd['kkr_runmax'] = 5 wfd['nsteps'] = 50 wfd['queue_name'] = '' wfd['resources']['num_machines'] = 1 wfd['use_mpi'] = False #True wfd['num_rerun'] = 2 wfd['natom_in_cls_min'] = 20 KKRscf_wf_parameters = ParameterData(dict=wfd) # The scf-workflow needs also the voronoi and KKR codes to be able to run the calulations VoroCode = Code.get_from_string('voronoi@my_mac') KKRCode = Code.get_from_string('KKRcode@my_mac') # Finally we use the kkrparams class to prepare a valid set of KKR parameters that are stored as a ParameterData object for the use in aiida ParaNode = ParameterData(dict=kkrparams( LMAX=2, RMAX=7, GMAX=65, NSPIN=1, RCLUSTZ=1.9).get_dict()) label = 'KKR-scf for Cu bulk' descr = 'KKR self-consistency workflow for Cu bulk' try: out = run(kkr_scf_wc, structure=Cu, calc_parameters=ParaNode, voronoi=VoroCode, kkr=KKRCode, wf_parameters=KKRscf_wf_parameters, _label=label, _description=descr) except: print 'some Error occured in run of kkr_scf_wc' # load node of workflow print out n = load_node(out[1]) print '\noutputs of workflow\n-------------------------------------------------' pprint(n.get_outputs_dict()) # get output dictionary n = n.get_outputs()[-1] out = n.get_dict() print '\n\noutput dictionary:\n-------------------------------------------------' pprint(out) # finally check some output print '\n\ncheck values ...\n-------------------------------------------------' print 'voronoi_step_success', out['voronoi_step_success'] assert out['voronoi_step_success'] print 'kkr_step_success', out['kkr_step_success'] assert out['kkr_step_success'] print 'successful', out['successful'] assert out['successful'] print 'error', out['errors'] assert out['errors'] == [] print 'warning', out['warnings'] assert out['warnings'] == [] print 'convergence_reached', out['convergence_reached'] assert out['convergence_reached'] print 'convergence_value', out['convergence_value'] assert out['convergence_value'] < 10**-4 print 'charge_neutrality', abs(out['charge_neutrality']) assert abs(out['charge_neutrality']) < 5 * 10**-4 print 'used_higher_accuracy', out['used_higher_accuracy'] assert out['used_higher_accuracy'] print '\ndone with checks\n'
def main(): results = work.run(OutlineWorkChain, a=Int(16))
def test_workchain_run(self): result = run(AddWorkChain, a=self.a, b=self.b) self.assertEquals(result['result'], self.result)
#!/usr/bin/env runaiida from __future__ import print_function from aiida.orm.data.bool import Bool from aiida.orm.data.float import Float from aiida.orm.data.int import Int from aiida.work import run from simple_parent import SimpleParentWorkChain if __name__ == '__main__': result = run(SimpleParentWorkChain, a=Int(1), b=Float(1.2), c=Bool(True)) print(result) # {u'e': 1.2, u'd': 1, u'f': True}
def test_relax_wf(fresh_aiida_env, vasp_params, potentials, mock_vasp): """Test submitting only, not correctness, with mocked vasp code.""" from aiida.orm import WorkflowFactory, Code from aiida import work rmq_config = None runner = work.Runner(poll_interval=0., rmq_config=rmq_config, enable_persistence=True) work.set_runner(runner) base_wf_proc = WorkflowFactory('vasp.relax') mock_vasp.store() print(mock_vasp.get_remote_exec_path()) comp = mock_vasp.get_computer() create_authinfo(computer=comp).store() structure = PoscarParser( file_path=data_path('test_relax_wf', 'inp', 'POSCAR')).get_quantity( 'poscar-structure', {})['poscar-structure'] kpoints = KpParser( file_path=data_path('test_relax_wf', 'inp', 'KPOINTS')).get_quantity( 'kpoints-kpoints', {})['kpoints-kpoints'] incar_add = IncarParser( file_path=data_path('test_relax_wf', 'inp', 'INCAR')).get_quantity( 'incar', {})['incar'].get_dict() incar_add = { k: v for k, v in incar_add.items() if k not in ['isif', 'ibrion'] } incar_add['system'] = 'test-case:test_relax_wf' restart_clean_workdir = get_data_node('bool', False) restart_clean_workdir.store() inputs = AttributeDict() inputs.code = Code.get_from_string('mock-vasp@localhost') inputs.structure = structure inputs.incar_add = get_data_node('parameter', dict=incar_add) inputs.kpoints = AttributeDict() inputs.kpoints.mesh = kpoints inputs.potcar_family = get_data_node('str', POTCAR_FAMILY_NAME) inputs.potcar_mapping = get_data_node('parameter', dict=POTCAR_MAP) inputs.options = get_data_node('parameter', dict={ 'queue_name': 'None', 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 } }) inputs.max_iterations = get_data_node('int', 1) inputs.convergence = AttributeDict() inputs.convergence.shape = AttributeDict() inputs.convergence.on = get_data_node('bool', True) inputs.convergence.positions = get_data_node('float', 0.1) inputs.restart = AttributeDict() inputs.restart.clean_workdir = restart_clean_workdir inputs.relax = AttributeDict() results = work.run(base_wf_proc, **inputs) assert 'relaxed_structure' in results
def test_workchain_builder_run(self): builder = AddWorkChain.get_builder() builder.a = self.a builder.b = self.b result = run(builder) self.assertEquals(result['result'], self.result)
#!/usr/bin/env runaiida from __future__ import print_function from aiida.orm.data.bool import Bool from aiida.orm.data.float import Float from aiida.orm.data.int import Int from aiida.work import run from complex_parent import ComplexParentWorkChain if __name__ == '__main__': result = run(ComplexParentWorkChain, a=Int(1), child_1=dict(b=Float(1.2), c=Bool(True)), child_2=dict(b=Float(2.3), c=Bool(False))) print(result) # { # u'e': 1.2, # u'child_1.d': 1, u'child_1.f': True, # u'child_2.d': 1, u'child_2.f': False # }
# now run the inputgenerator: code = Code.get_from_string(codename) #computer = Computer.get(computer_name) JobCalc = FleurinputgenCalculation.process() attrs ={'max_wallclock_seconds' :180, 'resources' : {"num_machines": 1}, 'withmpi' : False, #'computer': computer } inp = {'structure' : s, 'parameters' : parameters, 'code' : code} print('running inpgen') f = run(JobCalc, _options=attrs, **inp) fleurinp= f['fleurinpData'] fleurinpd = load_node(fleurinp.pk) # now run a Fleur calculation ontop of an inputgen calculation code = Code.get_from_string(codename2) JobCalc = FleurCalculation.process() attrs ={'max_wallclock_seconds' : 180, 'resources' : {"num_machines": 1} } inp1 = {'code' : code, 'fleurinpdata' : fleurinpd}#'parent' : parent_calc, print('running Fleur') f1 = run(JobCalc, _options=attrs, **inp1) print('copper example run was succcesful, check the results in your DB') print('Hint: Fleur did run for just 9 iterations, check if convergence already reached (No)') '''