def test_run(self): A = Str('A') B = Str('B') C = Str('C') three = Int(3) # Try the if(..) part launch.run(Wf, value=A, n=three) # Check the steps that should have been run for step, finished in Wf.finished_steps.items(): if step not in ['s3', 's4', 'isB']: self.assertTrue(finished, 'Step {} was not called by workflow'.format(step)) # Try the elif(..) part finished_steps = launch.run(Wf, value=B, n=three) # Check the steps that should have been run for step, finished in finished_steps.items(): if step not in ['isA', 's2', 's4']: self.assertTrue(finished, 'Step {} was not called by workflow'.format(step)) # Try the else... part finished_steps = launch.run(Wf, value=C, n=three) # Check the steps that should have been run for step, finished in finished_steps.items(): if step not in ['isA', 's2', 'isB', 's3']: self.assertTrue(finished, 'Step {} was not called by workflow'.format(step))
def test_issue_1741_expose_inputs(self): """Test that expose inputs works correctly when copying a stored default value. .. note:: a node instance is used for a port default, which is normally not advisable, but given that this regression test relies on the default being a stored node, we cannot change it. Given that the default is only used within this test, it should not pose any problems. """ class Parent(WorkChain): @classmethod def define(cls, spec): super().define(spec) spec.input('a', default=Int(5).store()) spec.outline(cls.step1) def step1(self): pass class Child(WorkChain): @classmethod def define(cls, spec): super().define(spec) spec.expose_inputs(Parent) spec.outline(cls.step1) def step1(self): pass launch.run(Child)
def test_issue_1741_expose_inputs(self): """Test that expose inputs works correctly when copying a stored default value""" stored_a = Int(5).store() class Parent(WorkChain): @classmethod def define(cls, spec): super(Parent, cls).define(spec) spec.input('a', default=stored_a) spec.outline(cls.step1) def step1(self): pass class Child(WorkChain): @classmethod def define(cls, spec): super(Child, cls).define(spec) spec.expose_inputs(Parent) spec.outline(cls.step1) def step1(self): pass launch.run(Child)
def run_async(): yield run_until_paused(process) self.assertTrue(process.paused) process.kill() with self.assertRaises(plumpy.ClosedError): launch.run(process)
def test_serialize_builder(self): """ Test serailization when using a builder. """ builder = SerializeWorkChain.get_builder() builder.test = Int builder.reference = Str(ObjectLoader().identify_object(Int)) launch.run(builder)
def test_define_not_calling_super(self): """A `WorkChain` that does not call super in `define` classmethod should raise.""" class IncompleteDefineWorkChain(WorkChain): @classmethod def define(cls, spec): pass with self.assertRaises(AssertionError): launch.run(IncompleteDefineWorkChain)
def run_and_check_success(process_class, **kwargs): """ Instantiates the process class and executes it followed by a check that it is finished successfully :returns: instance of process """ process = process_class(inputs=kwargs) launch.run(process) assert process.node.is_finished_ok is True return process
def test_define_not_calling_super(self): """A `CalcJob` that does not call super in `define` classmethod should raise.""" class IncompleteDefineCalcJob(CalcJob): """Test class with incomplete define method""" @classmethod def define(cls, spec): pass def prepare_for_submission(self, folder): pass with self.assertRaises(AssertionError): launch.run(IncompleteDefineCalcJob)
def test_simple_run(self): """ Run the workchain which should hit the exception and therefore end up in the EXCEPTED state """ process = TestWorkChainAbortChildren.MainWorkChain() with Capturing(): with self.assertRaises(RuntimeError): launch.run(process) self.assertEqual(process.node.is_finished_ok, False) self.assertEqual(process.node.is_excepted, True) self.assertEqual(process.node.is_killed, False)
def test_workchain_builder_run(self): """Test workchain builder run.""" builder = AddWorkChain.get_builder() builder.term_a = self.term_a builder.term_b = self.term_b result = launch.run(builder) self.assertEqual(result['result'], self.result)
def test_expose(self): res = launch.run( ParentExposeWorkChain, a=Int(1), sub_1={ 'b': Float(2.3), 'c': Bool(True) }, sub_2={ 'b': Float(1.2), 'sub_3': { 'c': Bool(False) } }, ) self.assertEquals( res, { 'a': Float(2.2), 'sub_1': { 'b': Float(2.3), 'c': Bool(True) }, 'sub_2': { 'b': Float(1.2), 'sub_3': { 'c': Bool(False) } } })
def test_exception_presubmit(self): """Test that an exception in the presubmit circumvents the exponential backoff and just excepts the process. The `presubmit` call of the `CalcJob` is now called in `aiida.engine.processes.calcjobs.tasks.task_upload_job` which is wrapped in the exponential backoff mechanism. The latter was introduced to recover from transient problems such as connection problems during the actual upload to a remote machine. However, it should not catch exceptions from the `presubmit` call which are not actually transient and thus not automatically recoverable. In this case the process should simply except. Here we test this by mocking the presubmit to raise an exception and check that it is bubbled up and the process does not end up in a paused state. """ from aiida.engine.processes.calcjobs.tasks import PreSubmitException with self.assertRaises(PreSubmitException) as context: launch.run(ArithmeticAddCalculation, code=self.remote_code, **self.inputs) self.assertIn('exception occurred in presubmit call', str(context.exception))
def test_invalid_options_type(self): """Verify that passing an invalid type to `metadata.options` raises a `TypeError`.""" class SimpleCalcJob(CalcJob): """Simple `CalcJob` implementation""" @classmethod def define(cls, spec): super().define(spec) def prepare_for_submission(self, folder): pass # The `metadata.options` input expects a plain dict and not a node `Dict` with self.assertRaises(TypeError): launch.run(SimpleCalcJob, code=self.remote_code, metadata={'options': orm.Dict(dict={'a': 1})})
def test_run_base_class(self): """Verify that it is impossible to run, submit or instantiate a base `WorkChain` class.""" with self.assertRaises(exceptions.InvalidOperation): WorkChain() with self.assertRaises(exceptions.InvalidOperation): launch.run(WorkChain) with self.assertRaises(exceptions.InvalidOperation): launch.run.get_node(WorkChain) with self.assertRaises(exceptions.InvalidOperation): launch.run.get_pk(WorkChain) with self.assertRaises(exceptions.InvalidOperation): launch.submit(WorkChain)
def test_nested_expose(self): res = launch.run( GrandParentExposeWorkChain, sub=dict( sub=dict( a=Int(1), sub_1={ 'b': Float(2.3), 'c': Bool(True) }, sub_2={ 'b': Float(1.2), 'sub_3': { 'c': Bool(False) } }, ))) self.assertEqual( res, { 'sub': { 'sub': { 'a': Float(2.2), 'sub_1': { 'b': Float(2.3), 'c': Bool(True) }, 'sub_2': { 'b': Float(1.2), 'sub_3': { 'c': Bool(False) } } } } })
def test_run_base_class(self): """Verify that it is impossible to run, submit or instantiate a base `CalcJob` class.""" with self.assertRaises(exceptions.InvalidOperation): CalcJob() with self.assertRaises(exceptions.InvalidOperation): launch.run(CalcJob) with self.assertRaises(exceptions.InvalidOperation): launch.run_get_node(CalcJob) with self.assertRaises(exceptions.InvalidOperation): launch.run_get_pk(CalcJob) with self.assertRaises(exceptions.InvalidOperation): launch.submit(CalcJob)
def test_bandevaluation(configure_with_daemon, band_difference_builder): # pylint: disable=unused-argument,redefined-outer-name """ Run the band evaluation workflow. """ from aiida.engine.launch import run builder = band_difference_builder output = run(builder) assert np.isclose(output['cost_value'].value, 0.)
def test_window_search(configure_with_daemon, window_search_builder): # pylint: disable=unused-argument,redefined-outer-name """ Run a window_search on the sample wannier input folder. """ from aiida.engine.launch import run result = run(window_search_builder) assert all( key in result for key in ['cost_value', 'tb_model', 'window', 'plot'] )
def test_out_unstored(self): """Calling `self.out` on an unstored `Node` should raise. It indicates that users created new data whose provenance will be lost. """ class IllegalWorkChain(WorkChain): @classmethod def define(cls, spec): super().define(spec) spec.outline(cls.illegal) spec.outputs.dynamic = True def illegal(self): self.out('not_allowed', orm.Int(2)) with self.assertRaises(ValueError): launch.run(IllegalWorkChain)
def test_simple_log(self): from aiida.engine.launch import run from aiida.orm.nodes import Float, Str, NumericType, List, Bool from aiida_yambo.workflows.yamborestart import YamboRestartWf p2y_result = run(YamboRestartWf, precode=Str('p2y'), yambocode=Str('yambo'), parameters=self.parameters, calculation_set=self.yambo_calc_set, parent_folder=self.remote_folder, settings=self.yambo_settings) assert 'retrieved' in p2y_result
def process_run(self, **kwargs): """Finally launch the code. Routine associated to the running of the ``bigdft`` executable. """ # check if the debug file will be updated (case of erroneous run) timedbg = self._get_debugfile_date() # Run the command out = launch.run(self.job, code=self.code, metadata=self.metadata) name = self.run_options.get('name', '') self.outputs[name] = out logname = out['retrieved']._repository._get_base_folder().get_abs_path( self._get_logname()) return {'timedbg': timedbg, 'logname': logname}
def test_launchers_dry_run_no_provenance(self): """Test the launchers in `dry_run` mode with `store_provenance=False`.""" from aiida.plugins import CalculationFactory ArithmeticAddCalculation = CalculationFactory('arithmetic.add') # pylint: disable=invalid-name code = orm.Code(input_plugin_name='arithmetic.add', remote_computer_exec=[self.computer, '/bin/true']).store() inputs = { 'code': code, 'x': orm.Int(1), 'y': orm.Int(1), 'metadata': { 'dry_run': True, 'store_provenance': False, 'options': { 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 } } } } result = launch.run(ArithmeticAddCalculation, **inputs) self.assertEqual(result, {}) result, pk = launch.run_get_pk(ArithmeticAddCalculation, **inputs) self.assertEqual(result, {}) self.assertIsNone(pk) result, node = launch.run_get_node(ArithmeticAddCalculation, **inputs) self.assertEqual(result, {}) self.assertIsInstance(node, orm.CalcJobNode) self.assertFalse(node.is_stored) self.assertIsInstance(node.dry_run_info, dict) self.assertIn('folder', node.dry_run_info) self.assertIn('script_filename', node.dry_run_info) node = launch.submit(ArithmeticAddCalculation, **inputs) self.assertIsInstance(node, orm.CalcJobNode) self.assertFalse(node.is_stored)
def test_launchers_dry_run(self): """All launchers should work with `dry_run=True`, even `submit` which forwards to `run`.""" from aiida.plugins import CalculationFactory ArithmeticAddCalculation = CalculationFactory('arithmetic.add') # pylint: disable=invalid-name code = orm.Code(input_plugin_name='arithmetic.add', remote_computer_exec=[self.computer, '/bin/true']).store() inputs = { 'code': code, 'x': orm.Int(1), 'y': orm.Int(1), 'metadata': { 'dry_run': True, 'options': { 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 } } } } result = launch.run(ArithmeticAddCalculation, **inputs) self.assertEqual(result, {}) result, pk = launch.run_get_pk(ArithmeticAddCalculation, **inputs) self.assertEqual(result, {}) self.assertIsInstance(pk, int) result, node = launch.run_get_node(ArithmeticAddCalculation, **inputs) self.assertEqual(result, {}) self.assertIsInstance(node, orm.CalcJobNode) self.assertIsInstance(node.dry_run_info, dict) self.assertIn('folder', node.dry_run_info) self.assertIn('script_filename', node.dry_run_info) node = launch.submit(ArithmeticAddCalculation, **inputs) self.assertIsInstance(node, orm.CalcJobNode)
def test_run_pointless_workchain(self): """Running the pointless workchain should not incur any exceptions""" launch.run(TestWorkChainMisc.PointlessWorkChain)
def test_calcfunction_run(self): """Test calcfunction run.""" result = launch.run(add, term_a=self.term_a, term_b=self.term_b) self.assertEqual(result, self.result)
def test_global_submit_raises(self): """Using top-level submit should raise.""" with self.assertRaises(exceptions.InvalidOperation): launch.run(TestWorkChainMisc.IllegalSubmitWorkChain)
def test_workchain_run(self): """Test workchain run.""" result = launch.run(AddWorkChain, term_a=self.term_a, term_b=self.term_b) self.assertEqual(result['result'], self.result)
def execute(args): """ The main execution of the script, which will run some preliminary checks on the command line arguments before passing them to the workchain and running it """ code = load_code(args.codename) stm_code = load_code(args.stm_codename) height = Float(args.height) e1 = Float(args.e1) e2 = Float(args.e2) protocol = Str(args.protocol) alat = 15. # angstrom cell = [ [ alat, 0., 0., ], [ 0., alat, 0., ], [ 0., 0., alat, ], ] # Benzene molecule # s = StructureData(cell=cell) def perm(x, y, z): return (z, y + 0.5 * alat, x + 0.5 * alat) s.append_atom(position=perm(0.000, 0.000, 0.468), symbols=['H']) s.append_atom(position=perm(0.000, 0.000, 1.620), symbols=['C']) s.append_atom(position=perm(0.000, -2.233, 1.754), symbols=['H']) s.append_atom(position=perm(0.000, 2.233, 1.754), symbols=['H']) s.append_atom(position=perm(0.000, -1.225, 2.327), symbols=['C']) s.append_atom(position=perm(0.000, 1.225, 2.327), symbols=['C']) s.append_atom(position=perm(0.000, -1.225, 3.737), symbols=['C']) s.append_atom(position=perm(0.000, 1.225, 3.737), symbols=['C']) s.append_atom(position=perm(0.000, -2.233, 4.311), symbols=['H']) s.append_atom(position=perm(0.000, 2.233, 4.311), symbols=['H']) s.append_atom(position=perm(0.000, 0.000, 4.442), symbols=['C']) s.append_atom(position=perm(0.000, 0.000, 5.604), symbols=['H']) if args.structure > 0: structure = load_node(args.structure) else: structure = s run(SiestaSTMWorkChain, code=code, stm_code=stm_code, structure=structure, protocol=protocol, height=height, e1=e1, e2=e2)
def test_workchain_builder_run(self): builder = AddWorkChain.get_builder() builder.a = self.a builder.b = self.b result = launch.run(builder) self.assertEquals(result['result'], self.result)
def test_persisting(self): persister = plumpy.test_utils.TestPersister() runner = get_manager().get_runner() workchain = Wf(runner=runner) launch.run(workchain)