def test_process_state(self): """ Check the properties of a newly created bare Calculation """ calculation = Calculation() self.assertEquals(calculation.is_terminated, False) self.assertEquals(calculation.is_excepted, False) self.assertEquals(calculation.is_killed, False) self.assertEquals(calculation.is_finished, False) self.assertEquals(calculation.is_finished_ok, False) self.assertEquals(calculation.is_failed, False)
def test_addremovenodes(self): """ Test group addnotes command """ from aiida.orm.calculation import Calculation calc = Calculation() calc._set_attr("attr1", "OK") # pylint: disable=protected-access calc._set_attr("attr2", "OK") # pylint: disable=protected-access calc.store() result = self.runner.invoke(group_addnodes, ["--force", "--group=dummygroup1", calc.uuid]) self.assertIsNone(result.exception) # check if node is added in group using group show command result = self.runner.invoke(group_show, ["dummygroup1"]) self.assertIsNone(result.exception) self.assertIn("Calculation", result.output) self.assertIn(str(calc.pk), result.output) ## remove same node result = self.runner.invoke(group_removenodes, ["--force", "--group=dummygroup1", calc.uuid]) self.assertIsNone(result.exception) # check if node is added in group using group show command result = self.runner.invoke(group_show, ["dummygroup1"]) self.assertIsNone(result.exception) self.assertNotIn("Calculation", result.output) self.assertNotIn(str(calc.pk), result.output)
def test_calculation_updatable_not_copied(self): """ Check that updatable attributes of Calculation are not copied """ a = Calculation() a._set_attr('state', self.stateval) a.store() b = a.copy() # updatable attributes are not copied with self.assertRaises(AttributeError): b.get_attr('state')
def test_db_log_handler(self): """ Verify that the db log handler is attached correctly by firing a log message through the regular logging module attached to a calculation node """ message = 'Testing logging of critical failure' calc = Calculation() # Firing a log for an unstored should not end up in the database calc.logger.critical(message) logs = self._backend.log.find() self.assertEquals(len(logs), 0) # After storing the node, logs above log level should be stored calc.store() calc.logger.critical(message) logs = self._backend.log.find() self.assertEquals(len(logs), 1) self.assertEquals(logs[0].message, message)
def test_db_log_handler(self): """ Verify that the db log handler is attached correctly by firing a log message through the regular logging module attached to a calculation node """ message = 'Testing logging of critical failure' calc = Calculation() # Make sure that global logging is not accidentally disabled logging.disable(logging.NOTSET) # # Temporarily disable logging to the stream handler (i.e. screen) # # because otherwise fix_calc_states will print warnings # handler = next((h for h in logging.getLogger('aiida').handlers if # isinstance(h, logging.StreamHandler)), None) # # try: # if handler: # original_level = handler.level # handler.setLevel(logging.CRITICAL + 1) # Firing a log for an unstored should not end up in the database calc.logger.critical(message) logs = self._backend.log.find() self.assertEquals(len(logs), 0) # After storing the node, logs above log level should be stored calc.store() calc.logger.critical(message) logs = self._backend.log.find() self.assertEquals(len(logs), 1) self.assertEquals(logs[0].message, message)
def setUpClass(cls): """ Basides the standard setup we need to add few more objects in the database to be able to explore different requests/filters/orderings etc. """ # call parent setUpClass method super(RESTApiTestCase, cls).setUpClass() # connect the app and the api # Init the api by connecting it the the app (N.B. respect the following # order, api.__init__) kwargs = dict(PREFIX=cls._url_prefix, PERPAGE_DEFAULT=cls._PERPAGE_DEFAULT, LIMIT_DEFAULT=cls._LIMIT_DEFAULT) cls.app = App(__name__) cls.app.config['TESTING'] = True api = AiidaApi(cls.app, **kwargs) # create test inputs cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.)) structure = StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() cif = CifData(ase=structure.get_ase()) cif.store() parameter1 = ParameterData(dict={"a": 1, "b": 2}) parameter1.store() parameter2 = ParameterData(dict={"c": 3, "d": 4}) parameter2.store() kpoint = KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() calc = Calculation() calc._set_attr("attr1", "OK") calc._set_attr("attr2", "OK") calc.store() calc.add_link_from(structure) calc.add_link_from(parameter1) kpoint.add_link_from(calc, link_type=LinkType.CREATE) calc1 = Calculation() calc1.store() from aiida.orm.computer import Computer dummy_computers = [{ "name": "test1", "hostname": "test1.epfl.ch", "transport_type": "ssh", "scheduler_type": "pbspro", }, { "name": "test2", "hostname": "test2.epfl.ch", "transport_type": "ssh", "scheduler_type": "torque", }, { "name": "test3", "hostname": "test3.epfl.ch", "transport_type": "local", "scheduler_type": "slurm", }, { "name": "test4", "hostname": "test4.epfl.ch", "transport_type": "ssh", "scheduler_type": "slurm", }] for dummy_computer in dummy_computers: computer = Computer(**dummy_computer) computer.store() # Prepare typical REST responses cls.process_dummy_data()
def setUpClass(cls): """ Basides the standard setup we need to add few more objects in the database to be able to explore different requests/filters/orderings etc. """ # call parent setUpClass method super(RESTApiTestCase, cls).setUpClass() # create test inputs cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.)) structure = StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() parameter1 = ParameterData(dict={"a": 1, "b": 2}) parameter1.store() parameter2 = ParameterData(dict={"c": 3, "d": 4}) parameter2.store() kpoint = KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() calc = Calculation() calc._set_attr("attr1", "OK") calc._set_attr("attr2", "OK") calc.store() calc.add_link_from(structure) calc.add_link_from(parameter1) kpoint.add_link_from(calc, link_type=LinkType.CREATE) calc1 = Calculation() calc1.store() from aiida.orm.computer import Computer dummy_computers = [{ "name": "test1", "hostname": "test1.epfl.ch", "transport_type": "ssh", "scheduler_type": "pbspro", }, { "name": "test2", "hostname": "test2.epfl.ch", "transport_type": "ssh", "scheduler_type": "torque", }, { "name": "test3", "hostname": "test3.epfl.ch", "transport_type": "local", "scheduler_type": "slurm", }, { "name": "test4", "hostname": "test4.epfl.ch", "transport_type": "ssh", "scheduler_type": "slurm", }] for dummy_computer in dummy_computers: computer = Computer(**dummy_computer) computer.store() # Prepare typical REST responses cls.process_dummy_data()
def test_calculation_updatable_attribute(self): """ Check that updatable attributes and only those can be mutated for a stored but unsealed Calculation """ a = Calculation() attrs_to_set = { 'bool': self.boolval, 'integer': self.intval, 'float': self.floatval, 'string': self.stringval, 'dict': self.dictval, 'list': self.listval, 'state': self.stateval } for k, v in attrs_to_set.iteritems(): a._set_attr(k, v) # Check before storing a._set_attr(Calculation.PROCESS_STATE_KEY, self.stateval) self.assertEquals(a.get_attr(Calculation.PROCESS_STATE_KEY), self.stateval) a.store() # Check after storing self.assertEquals(a.get_attr(Calculation.PROCESS_STATE_KEY), self.stateval) # I should be able to mutate the updatable attribute but not the others a._set_attr(Calculation.PROCESS_STATE_KEY, 'FINISHED') a._del_attr(Calculation.PROCESS_STATE_KEY) # Deleting non-existing attribute should raise attribute error with self.assertRaises(AttributeError): a._del_attr(Calculation.PROCESS_STATE_KEY) with self.assertRaises(ModificationNotAllowed): a._set_attr('bool', False) with self.assertRaises(ModificationNotAllowed): a._del_attr('bool') a.seal() # After sealing, even updatable attributes should be immutable with self.assertRaises(ModificationNotAllowed): a._set_attr(Calculation.PROCESS_STATE_KEY, 'FINISHED') with self.assertRaises(ModificationNotAllowed): a._del_attr(Calculation.PROCESS_STATE_KEY)
def test_stash(self): """ Here I'm testing the 'stash' """ # creatin a first calculation with 3 input data: c = Calculation().store() dins = set() # To compare later, dins is a set of the input data pks. for i in range(3): data_in = Data().store() dins.add(data_in.id) # ~ c.add_incoming(data_in, # ~ link_type=LinkType.INPUT_CALC, # ~ link_label='lala-{}'.format(i)) c.add_link_from(data_in, link_type=LinkType.INPUT, label='lala-{}'.format(i)) # Creating output data to that calculation: douts = set() # Similar to dins, this is the set of data output pks for i in range(4): data_out = Data().store() douts.add(data_out.id) # ~ data_out.add_incoming(c, # ~ link_type=LinkType.CREATE, # ~ link_label='lala-{}'.format(i)) data_out.add_link_from(c, link_type=LinkType.CREATE, label='lala-{}'.format(i)) #print(draw_children # adding another calculation, with one input from c's outputs, # and one input from c's inputs c2 = Calculation().store() # ~ c2.add_incoming(data_in, link_type=LinkType.INPUT_CALC, link_label='b') # ~ c2.add_incoming(data_out, link_type=LinkType.INPUT_CALC, link_label='c') c2.add_link_from(data_in, link_type=LinkType.INPUT, label='b') c2.add_link_from(data_out, link_type=LinkType.INPUT, label='c') # ALso here starting with a set that only contains the starting the calculation: es = get_basket(node_ids=(c.id,)) # Creating the rule for getting input nodes: rule_in = UpdateRule(QueryBuilder().append( Node, tag='n').append( Node, input_of='n')) # ~ rule_in = UpdateRule(QueryBuilder().append( # ~ Node, tag='n').append( # ~ Node, with_outgoing='n')) # Creating the rule for getting output nodes rule_out = UpdateRule(QueryBuilder().append( Node, tag='n').append( Node, output_of='n')) # ~ Node, with_incoming='n')) #, edge_filters={'type':LinkType.CREATE.value})) # I'm testing the input rule. Since I'm updating, I should # have the input and the calculation itself: is_set = rule_in.run(es.copy())['nodes']._set self.assertEqual(is_set, dins.union({c.id})) # Testing the output rule, also here, output + calculation c is expected: is_set = rule_out.run(es.copy())['nodes']._set self.assertEqual(is_set, douts.union({c.id})) # Now I'm testing the rule sequence. # I first apply the rule to get outputs, than the rule to get inputs rs1 = RuleSequence((rule_out, rule_in)) is_set = rs1.run(es.copy())['nodes']._set # I expect the union of inputs, outputs, and the calculation: self.assertEqual(is_set, douts.union(dins).union({c.id})) # If the order of the rules is exchanged, I end up of also attaching c2 to the results. # This is because c and c2 share one data-input: rs2 = RuleSequence((rule_in, rule_out)) is_set = rs2.run(es.copy())['nodes']._set self.assertEqual(is_set, douts.union(dins).union({c.id, c2.id})) # Testing similar rule, but with the possibility to stash the results: stash = es.copy(with_data=False) rsave = RuleSaveWalkers(stash) # Checking whether Rule does the right thing i.e If I stash the result, # the active walkers should be an empty set: self.assertEqual(rsave.run(es.copy()), es.copy(with_data=False)) # Whereas the stash contains the same data as the starting point: self.assertEqual(stash,es) rs2 = RuleSequence(( RuleSaveWalkers(stash), rule_in, RuleSetWalkers(stash) ,rule_out)) is_set = rs2.run(es.copy())['nodes']._set # NOw I test whether the stash does the right thing, # namely not including c2 in the results: self.assertEqual(is_set, douts.union(dins).union({c.id}))