def populate_restapi_database(clear_database_before_test): """Populates the database with a considerable set of nodes to test the restAPI""" # pylint: disable=unused-argument from aiida import orm struct_forcif = orm.StructureData().store() orm.StructureData().store() orm.StructureData().store() orm.Dict().store() orm.Dict().store() orm.CifData(ase=struct_forcif.get_ase()).store() orm.KpointsData().store() orm.FolderData().store() orm.CalcFunctionNode().store() orm.CalcJobNode().store() orm.CalcJobNode().store() orm.WorkFunctionNode().store() orm.WorkFunctionNode().store() orm.WorkChainNode().store()
def test_parser_get_outputs_for_parsing(self): """Make sure that the `get_output_for_parsing` method returns the correct output nodes.""" ArithmeticAddCalculation.define = CustomCalcJob.define node = orm.CalcJobNode(computer=self.computer, process_type=CustomCalcJob.build_process_type()) node.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) node.set_option('max_wallclock_seconds', 1800) node.store() retrieved = orm.FolderData().store() retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') output = orm.Data().store() output.add_incoming(node, link_type=LinkType.CREATE, link_label='output') parser = ArithmeticAddParser(node) outputs_for_parsing = parser.get_outputs_for_parsing() self.assertIn('retrieved', outputs_for_parsing) self.assertEqual(outputs_for_parsing['retrieved'].uuid, retrieved.uuid) self.assertIn('output', outputs_for_parsing) self.assertEqual(outputs_for_parsing['output'].uuid, output.uuid)
def test_calc_job_node_get_builder_restart(self): """Test the `CalcJobNode.get_builder_restart` method.""" original = orm.CalcJobNode( computer=self.computer, process_type='aiida.calculations:arithmetic.add', label='original') original.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) original.set_option('max_wallclock_seconds', 1800) original.add_incoming(orm.Int(1).store(), link_type=LinkType.INPUT_CALC, link_label='x') original.add_incoming(orm.Int(2).store(), link_type=LinkType.INPUT_CALC, link_label='y') original.store() builder = original.get_builder_restart() self.assertIn('x', builder) self.assertIn('y', builder) self.assertIn('metadata', builder) self.assertIn('options', builder.metadata) self.assertEqual(builder.x, orm.Int(1)) self.assertEqual(builder.y, orm.Int(2)) self.assertDictEqual(builder.metadata.options, original.get_options())
def test_parser_exit_codes(self): """Ensure that exit codes from the `CalcJob` can be retrieved through the parser instance.""" node = orm.CalcJobNode( computer=self.computer, process_type=ArithmeticAddCalculation.build_process_type()) parser = ArithmeticAddParser(node) self.assertEqual(parser.exit_codes, ArithmeticAddCalculation.spec().exit_codes)
def test_nodes_in_group(self, temp_dir): """ This test checks that nodes that belong to a specific group are correctly imported and exported. """ from aiida.common.links import LinkType # Create another user new_email = '[email protected]' user = orm.User(email=new_email) user.store() # Create a structure data node that has a calculation as output sd1 = orm.StructureData() sd1.user = user sd1.label = 'sd1' sd1.store() jc1 = orm.CalcJobNode() jc1.computer = self.computer jc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) jc1.user = user jc1.label = 'jc1' jc1.add_incoming(sd1, link_type=LinkType.INPUT_CALC, link_label='link') jc1.store() jc1.seal() # Create a group and add the data inside gr1 = orm.Group(label='node_group') gr1.store() gr1.add_nodes([sd1, jc1]) gr1_uuid = gr1.uuid # At this point we export the generated data filename1 = os.path.join(temp_dir, 'export1.tar.gz') export([sd1, jc1, gr1], outfile=filename1, silent=True) n_uuids = [sd1.uuid, jc1.uuid] self.clean_db() self.insert_data() import_data(filename1, silent=True) # Check that the imported nodes are correctly imported and that # the user assigned to the nodes is the right one for uuid in n_uuids: self.assertEqual(orm.load_node(uuid).user.email, new_email) # Check that the exported group is imported correctly builder = orm.QueryBuilder() builder.append(orm.Group, filters={'uuid': {'==': gr1_uuid}}) self.assertEqual(builder.count(), 1, 'The group was not found.')
def test_input_code(self, temp_dir): """ This test checks that when a calculation is exported then the corresponding code is also exported. It also checks that the links are also in place after the import. """ code_label = 'test_code1' code = orm.Code() code.set_remote_computer_exec((self.computer, '/bin/true')) code.label = code_label code.store() code_uuid = code.uuid calc = orm.CalcJobNode() calc.computer = self.computer calc.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc.add_incoming(code, LinkType.INPUT_CALC, 'code') calc.store() calc.seal() links_count = 1 export_links = get_all_node_links() export_file = os.path.join(temp_dir, 'export.aiida') export([calc], filename=export_file) self.clean_db() import_data(export_file) # Check that the code node is there self.assertEqual(orm.load_node(code_uuid).label, code_label) # Check that the link is in place import_links = get_all_node_links() self.assertListEqual(sorted(export_links), sorted(import_links)) self.assertEqual( len(export_links), links_count, 'Expected to find only one link from code to ' 'the calculation node before export. {} found.'.format( len(export_links))) self.assertEqual( len(import_links), links_count, 'Expected to find only one link from code to ' 'the calculation node after import. {} found.'.format( len(import_links)))
def test_calc_of_structuredata(aiida_profile, tmp_path, file_format): """Simple ex-/import of CalcJobNode with input StructureData""" aiida_profile.reset_db() struct = orm.StructureData() struct.store() computer = orm.Computer( label='localhost-test', description='localhost computer set up by test manager', hostname='localhost-test', workdir=str(tmp_path / 'workdir'), transport_type='local', scheduler_type='direct') computer.store() computer.configure() calc = orm.CalcJobNode() calc.computer = computer calc.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc.add_incoming(struct, link_type=LinkType.INPUT_CALC, link_label='link') calc.store() calc.seal() pks = [struct.pk, calc.pk] attrs = {} for pk in pks: node = orm.load_node(pk) attrs[node.uuid] = dict() for k in node.attributes.keys(): attrs[node.uuid][k] = node.get_attribute(k) filename = str(tmp_path / 'export.aiida') export([calc], filename=filename, file_format=file_format) aiida_profile.reset_db() import_data(filename) for uuid in attrs: node = orm.load_node(uuid) for k in attrs[uuid].keys(): assert attrs[uuid][k] == node.get_attribute(k)
def _generate_calc_job_node(entry_point_name, computer, test_name=None, inputs=None, attributes=None): """Fixture to generate a mock `CalcJobNode` for testing parsers. :param entry_point_name: entry point name of the calculation class :param computer: a `Computer` instance :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder. :param inputs: any optional nodes to add as input links to the corrent CalcJobNode :param attributes: any optional attributes to set on the node :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node """ import os from aiida import orm from aiida.common import LinkType from aiida.plugins.entry_point import format_entry_point_string entry_point = format_entry_point_string('aiida.calculations', entry_point_name) node = orm.CalcJobNode(computer=computer, process_type=entry_point) node.set_attribute('input_filename', 'aiida.in') node.set_attribute('output_filename', 'aiida.out') node.set_attribute('error_filename', 'aiida.err') node.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) node.set_option('max_wallclock_seconds', 1800) if attributes: node.set_attributes(attributes) if inputs: for link_label, input_node in flatten_inputs(inputs): input_node.store() node.add_incoming(input_node, link_type=LinkType.INPUT_CALC, link_label=link_label) node.store() if test_name is not None: basepath = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(basepath, 'parsers', 'fixtures', entry_point_name[len('quantumespresso.'):], test_name) retrieved = orm.FolderData() retrieved.put_object_from_tree(filepath) retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') retrieved.store() remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp') remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder') remote_folder.store() return node
def test_calc_of_structuredata(self, temp_dir): """Simple ex-/import of CalcJobNode with input StructureData""" from aiida.common.links import LinkType struct = orm.StructureData() struct.store() calc = orm.CalcJobNode() calc.computer = self.computer calc.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc.add_incoming(struct, link_type=LinkType.INPUT_CALC, link_label='link') calc.store() calc.seal() pks = [struct.pk, calc.pk] attrs = {} for pk in pks: node = orm.load_node(pk) attrs[node.uuid] = dict() for k in node.attributes.keys(): attrs[node.uuid][k] = node.get_attribute(k) filename = os.path.join(temp_dir, 'export.aiida') export([calc], filename=filename, silent=True) self.clean_db() self.create_user() # NOTE: it is better to load new nodes by uuid, rather than assuming # that they will have the first 3 pks. In fact, a recommended policy in # databases is that pk always increment, even if you've deleted elements import_data(filename, silent=True) for uuid in attrs: node = orm.load_node(uuid) for k in attrs[uuid].keys(): self.assertEqual(attrs[uuid][k], node.get_attribute(k))
def test_parser_retrieved(self): """Verify that the `retrieved` property returns the retrieved `FolderData` node.""" node = orm.CalcJobNode( computer=self.computer, process_type=ArithmeticAddCalculation.build_process_type()) node.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) node.set_option('max_wallclock_seconds', 1800) node.store() retrieved = orm.FolderData().store() retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') parser = ArithmeticAddParser(node) self.assertEqual(parser.node.uuid, node.uuid) self.assertEqual(parser.retrieved.uuid, retrieved.uuid)
def test_parse_from_node(self): """Test that the `parse_from_node` returns a tuple of the parsed output nodes and a calculation node. The calculation node represents the parsing process """ summed = 3 output_filename = 'aiida.out' # Mock the `CalcJobNode` which should have the `retrieved` folder containing the sum in the outputfile file # This is the value that should be parsed into the `sum` output node node = orm.CalcJobNode( computer=self.computer, process_type=ArithmeticAddCalculation.build_process_type()) node.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) node.set_option('max_wallclock_seconds', 1800) node.set_option('output_filename', output_filename) node.store() retrieved = orm.FolderData() retrieved.put_object_from_filelike(io.StringIO('{}'.format(summed)), output_filename) retrieved.store() retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') for cls in [ArithmeticAddParser, SimpleArithmeticAddParser]: result, calcfunction = cls.parse_from_node(node) self.assertIsInstance(result['sum'], orm.Int) self.assertEqual(result['sum'].value, summed) self.assertIsInstance(calcfunction, orm.CalcFunctionNode) self.assertEqual(calcfunction.exit_status, 0) # Verify that the `retrieved_temporary_folder` keyword can be passed, there is no validation though result, calcfunction = ArithmeticAddParser.parse_from_node( node, retrieved_temporary_folder='/some/path')
def test_import_of_computer_json_params(self, temp_dir): """ This test checks that the metadata and transport params are exported and imported correctly in both backends. """ # Set the computer name comp1_name = 'localhost_1' comp1_metadata = {'workdir': '/tmp/aiida'} self.computer.label = comp1_name self.computer.metadata = comp1_metadata # Store a calculation calc1_label = 'calc1' calc1 = orm.CalcJobNode() calc1.computer = self.computer calc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc1.label = calc1_label calc1.store() calc1.seal() # Export the first job calculation filename1 = os.path.join(temp_dir, 'export1.aiida') export([calc1], filename=filename1, silent=True) # Clean the local database self.clean_db() self.create_user() # Import the data import_data(filename1, silent=True) builder = orm.QueryBuilder() builder.append(orm.Computer, project=['metadata'], tag='comp') self.assertEqual(builder.count(), 1, 'Expected only one computer') res = builder.dict()[0] self.assertEqual(res['comp']['metadata'], comp1_metadata, 'Not the expected metadata were found')
def test_same_computer_import(self, temp_dir): """ Test that you can import nodes in steps without any problems. In this test we will import a first calculation and then a second one. The import should work as expected and have in the end two job calculations. Each calculation is related to the same computer. In the end we should have only one computer """ # Use local computer comp = self.computer # Store two job calculation related to the same computer calc1_label = 'calc1' calc1 = orm.CalcJobNode() calc1.computer = comp calc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc1.label = calc1_label calc1.store() calc1.seal() calc2_label = 'calc2' calc2 = orm.CalcJobNode() calc2.computer = comp calc2.set_option('resources', { 'num_machines': 2, 'num_mpiprocs_per_machine': 2 }) calc2.label = calc2_label calc2.store() calc2.seal() # Store locally the computer name comp_name = str(comp.label) comp_uuid = str(comp.uuid) # Export the first job calculation filename1 = os.path.join(temp_dir, 'export1.aiida') export([calc1], filename=filename1, silent=True) # Export the second job calculation filename2 = os.path.join(temp_dir, 'export2.aiida') export([calc2], filename=filename2, silent=True) # Clean the local database self.clean_db() self.create_user() # Check that there are no computers builder = orm.QueryBuilder() builder.append(orm.Computer, project=['*']) self.assertEqual( builder.count(), 0, 'There should not be any computers in the database at this point.') # Check that there are no calculations builder = orm.QueryBuilder() builder.append(orm.CalcJobNode, project=['*']) self.assertEqual( builder.count(), 0, 'There should not be any calculations in the database at this point.' ) # Import the first calculation import_data(filename1, silent=True) # Check that the calculation computer is imported correctly. builder = orm.QueryBuilder() builder.append(orm.CalcJobNode, project=['label']) self.assertEqual(builder.count(), 1, 'Only one calculation should be found.') self.assertEqual(str(builder.first()[0]), calc1_label, 'The calculation label is not correct.') # Check that the referenced computer is imported correctly. builder = orm.QueryBuilder() builder.append(orm.Computer, project=['name', 'uuid', 'id']) self.assertEqual(builder.count(), 1, 'Only one computer should be found.') self.assertEqual(str(builder.first()[0]), comp_name, 'The computer name is not correct.') self.assertEqual(str(builder.first()[1]), comp_uuid, 'The computer uuid is not correct.') # Store the id of the computer comp_id = builder.first()[2] # Import the second calculation import_data(filename2, silent=True) # Check that the number of computers remains the same and its data # did not change. builder = orm.QueryBuilder() builder.append(orm.Computer, project=['name', 'uuid', 'id']) self.assertEqual( builder.count(), 1, 'Found {} computers' 'but only one computer should be found.'.format(builder.count())) self.assertEqual(str(builder.first()[0]), comp_name, 'The computer name is not correct.') self.assertEqual(str(builder.first()[1]), comp_uuid, 'The computer uuid is not correct.') self.assertEqual(builder.first()[2], comp_id, 'The computer id is not correct.') # Check that now you have two calculations attached to the same # computer. builder = orm.QueryBuilder() builder.append(orm.Computer, tag='comp') builder.append(orm.CalcJobNode, with_computer='comp', project=['label']) self.assertEqual(builder.count(), 2, 'Two calculations should be found.') ret_labels = set(_ for [_] in builder.all()) self.assertEqual(ret_labels, set([calc1_label, calc2_label]), 'The labels of the calculations are not correct.')
def test_cif_structure_roundtrip(self): from aiida.tools.dbexporters.tcod import export_cif, export_values from aiida.common.folders import SandboxFolder import tempfile with tempfile.NamedTemporaryFile(mode='w+') as tmpf: tmpf.write(''' data_test _cell_length_a 10 _cell_length_b 10 _cell_length_c 10 _cell_angle_alpha 90 _cell_angle_beta 90 _cell_angle_gamma 90 loop_ _atom_site_label _atom_site_fract_x _atom_site_fract_y _atom_site_fract_z C 0 0 0 O 0.5 0.5 0.5 ''') tmpf.flush() a = orm.CifData(filepath=tmpf.name) c = a.get_structure() c.store() pd = orm.Dict() code = orm.Code(local_executable='test.sh') with tempfile.NamedTemporaryFile(mode='w+') as tmpf: tmpf.write("#/bin/bash\n\necho test run\n") tmpf.flush() code.put_object_from_filelike(tmpf, 'test.sh') code.store() calc = orm.CalcJobNode(computer=self.computer) calc.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc.add_incoming(code, LinkType.INPUT_CALC, "code") calc.set_option('environment_variables', { 'PATH': '/dev/null', 'USER': '******' }) with tempfile.NamedTemporaryFile(mode='w+', prefix="Fe") as tmpf: tmpf.write("<UPF version=\"2.0.1\">\nelement=\"Fe\"\n") tmpf.flush() upf = orm.UpfData(filepath=tmpf.name) upf.store() calc.add_incoming(upf, LinkType.INPUT_CALC, "upf") with tempfile.NamedTemporaryFile(mode='w+') as tmpf: tmpf.write("data_test") tmpf.flush() cif = orm.CifData(filepath=tmpf.name) cif.store() calc.add_incoming(cif, LinkType.INPUT_CALC, "cif") with SandboxFolder() as fhandle: calc.put_object_from_tree(fhandle.abspath) calc.store() fd = orm.FolderData() with fd.open('_scheduler-stdout.txt', 'w') as fhandle: fhandle.write(u"standard output") with fd.open('_scheduler-stderr.txt', 'w') as fhandle: fhandle.write(u"standard error") fd.store() fd.add_incoming(calc, LinkType.CREATE, calc.link_label_retrieved) pd.add_incoming(calc, LinkType.CREATE, "create1") pd.store() with self.assertRaises(ValueError): export_cif(c, parameters=pd) c.add_incoming(calc, LinkType.CREATE, "create2") export_cif(c, parameters=pd) values = export_values(c, parameters=pd) values = values['0'] self.assertEquals(values['_tcod_computation_environment'], ['PATH=/dev/null\nUSER=unknown']) self.assertEquals(values['_tcod_computation_command'], ['cd 1; ./_aiidasubmit.sh'])
def setUpClass(cls, *args, **kwargs): super().setUpClass(*args, **kwargs) from aiida.common.links import LinkType from aiida.engine import ProcessState cls.computer = orm.Computer(name='comp', hostname='localhost', transport_type='local', scheduler_type='direct', workdir='/tmp/aiida').store() cls.code = orm.Code(remote_computer_exec=(cls.computer, '/bin/true')).store() cls.group = orm.Group(label='test_group').store() cls.node = orm.Data().store() cls.calcs = [] user = orm.User.objects.get_default() authinfo = orm.AuthInfo(computer=cls.computer, user=user) authinfo.store() process_class = CalculationFactory('templatereplacer') process_type = get_entry_point_string_from_class( process_class.__module__, process_class.__name__) # Create 5 CalcJobNodes (one for each CalculationState) for calculation_state in CalcJobState: calc = orm.CalcJobNode(computer=cls.computer, process_type=process_type) calc.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc.store() calc.set_process_state(ProcessState.RUNNING) cls.calcs.append(calc) if calculation_state == CalcJobState.PARSING: cls.KEY_ONE = 'key_one' cls.KEY_TWO = 'key_two' cls.VAL_ONE = 'val_one' cls.VAL_TWO = 'val_two' output_parameters = orm.Dict(dict={ cls.KEY_ONE: cls.VAL_ONE, cls.KEY_TWO: cls.VAL_TWO, }).store() output_parameters.add_incoming(calc, LinkType.CREATE, 'output_parameters') # Create shortcut for easy dereferencing cls.result_job = calc # Add a single calc to a group cls.group.add_nodes([calc]) # Create a single failed CalcJobNode cls.EXIT_STATUS = 100 calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc.store() calc.set_exit_status(cls.EXIT_STATUS) calc.set_process_state(ProcessState.FINISHED) cls.calcs.append(calc) # Load the fixture containing a single ArithmeticAddCalculation node import_archive('calcjob/arithmetic.add.aiida') # Get the imported ArithmeticAddCalculation node ArithmeticAddCalculation = CalculationFactory('arithmetic.add') calculations = orm.QueryBuilder().append( ArithmeticAddCalculation).all()[0] cls.arithmetic_job = calculations[0]
def construct_complex_graph(self, export_combination=0, work_nodes=None, calc_nodes=None): # pylint: disable=too-many-statements """ This method creates a "complex" graph with all available link types: INPUT_WORK, INPUT_CALC, CALL_WORK, CALL_CALC, CREATE, and RETURN and returns the nodes of the graph. It also returns various combinations of nodes that need to be extracted but also the final expected set of nodes (after adding the expected predecessors, desuccessors). Graph:: data1 ---------------INPUT_WORK----------------+ | | | data2 -INPUT_WORK-+ | | V V +-------INPUT_WORK--> work1 --CALL_WORK--> work2 ----+ | | | | CALL_CALC---------------------+ | | | +-> data3 <-+ | | V | | | +--INPUT_CALC--> calc1 --CREATE-+-> data4 <-+-----RETURN +-> data5 | | INPUT_CALC--> calc2 --CREATE-+-> data6 """ if export_combination < 0 or export_combination > 9: return None if work_nodes is None: work_nodes = ['WorkflowNode', 'WorkflowNode'] if calc_nodes is None: calc_nodes = ['CalculationNode', 'CalculationNode'] # Class mapping # "CalcJobNode" is left out, since it is special. string_to_class = { 'WorkflowNode': orm.WorkflowNode, 'WorkChainNode': orm.WorkChainNode, 'WorkFunctionNode': orm.WorkFunctionNode, 'CalculationNode': orm.CalculationNode, 'CalcFunctionNode': orm.CalcFunctionNode } # Node creation data1 = orm.Int(1).store() data2 = orm.Int(1).store() work1 = string_to_class[work_nodes[0]]() work2 = string_to_class[work_nodes[1]]() if calc_nodes[0] == 'CalcJobNode': calc1 = orm.CalcJobNode() calc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) else: calc1 = string_to_class[calc_nodes[0]]() calc1.computer = self.computer # Waiting to store Data nodes until they have been "created" with the links below, # because @calcfunctions cannot return data, i.e. return stored Data nodes data3 = orm.Int(1) data4 = orm.Int(1) if calc_nodes[1] == 'CalcJobNode': calc2 = orm.CalcJobNode() calc2.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) else: calc2 = string_to_class[calc_nodes[1]]() calc2.computer = self.computer # Waiting to store Data nodes until they have been "created" with the links below, # because @calcfunctions cannot return data, i.e. return stored Data nodes data5 = orm.Int(1) data6 = orm.Int(1) # Link creation work1.add_incoming(data1, LinkType.INPUT_WORK, 'input1') work1.add_incoming(data2, LinkType.INPUT_WORK, 'input2') work2.add_incoming(data1, LinkType.INPUT_WORK, 'input1') work2.add_incoming(work1, LinkType.CALL_WORK, 'call2') work1.store() work2.store() calc1.add_incoming(data1, LinkType.INPUT_CALC, 'input1') calc1.add_incoming(work2, LinkType.CALL_CALC, 'call1') calc1.store() data3.add_incoming(calc1, LinkType.CREATE, 'create3') # data3 is stored now, because a @workfunction cannot return unstored Data, # i.e. create data. data3.store() data3.add_incoming(work2, LinkType.RETURN, 'return3') data4.add_incoming(calc1, LinkType.CREATE, 'create4') # data3 is stored now, because a @workfunction cannot return unstored Data, # i.e. create data. data4.store() data4.add_incoming(work2, LinkType.RETURN, 'return4') calc2.add_incoming(data4, LinkType.INPUT_CALC, 'input4') calc2.store() data5.add_incoming(calc2, LinkType.CREATE, 'create5') data6.add_incoming(calc2, LinkType.CREATE, 'create6') data5.store() data6.store() work1.seal() work2.seal() calc1.seal() calc2.seal() graph_nodes = [ data1, data2, data3, data4, data5, data6, calc1, calc2, work1, work2 ] # Create various combinations of nodes that should be exported # and the final set of nodes that are exported in each case, following # predecessor(INPUT, CREATE)/successor(CALL, RETURN, CREATE) links. export_list = [ (work1, [data1, data2, data3, data4, calc1, work1, work2]), (work2, [data1, data3, data4, calc1, work2, work1, data2]), (data3, [data1, data3, data4, calc1, work2, work1, data2]), (data4, [data1, data3, data4, calc1, work2, work1, data2]), (data5, [ data1, data3, data4, data5, data6, calc1, calc2, work2, work1, data2 ]), (data6, [ data1, data3, data4, data5, data6, calc1, calc2, work2, work1, data2 ]), (calc1, [data1, data3, data4, calc1, work2, work1, data2]), (calc2, [ data1, data3, data4, data5, data6, calc1, calc2, work2, work1, data2 ]), (data1, [data1]), (data2, [data2]) ] return graph_nodes, export_list[export_combination]
def create_provenance(self): """create an example provenance graph """ pd0 = orm.Dict() pd0.label = 'pd0' pd0.store() pd1 = orm.Dict() pd1.label = 'pd1' pd1.store() wc1 = orm.WorkChainNode() wc1.set_process_state(ProcessState.RUNNING) wc1.add_incoming(pd0, link_type=LinkType.INPUT_WORK, link_label='input1') wc1.add_incoming(pd1, link_type=LinkType.INPUT_WORK, link_label='input2') wc1.store() calc1 = orm.CalcJobNode() calc1.computer = self.computer calc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc1.label = 'calc1' calc1.set_process_state(ProcessState.FINISHED) calc1.set_exit_status(0) calc1.add_incoming(pd0, link_type=LinkType.INPUT_CALC, link_label='input1') calc1.add_incoming(pd1, link_type=LinkType.INPUT_CALC, link_label='input2') calc1.add_incoming(wc1, link_type=LinkType.CALL_CALC, link_label='call1') calc1.store() rd1 = orm.RemoteData() rd1.label = 'rd1' rd1.set_remote_path('/x/y.py') rd1.computer = self.computer rd1.store() rd1.add_incoming(calc1, link_type=LinkType.CREATE, link_label='output') pd2 = orm.Dict() pd2.label = 'pd2' pd2.store() calcf1 = orm.CalcFunctionNode() calcf1.label = 'calcf1' calcf1.set_process_state(ProcessState.FINISHED) calcf1.set_exit_status(200) calcf1.add_incoming(rd1, link_type=LinkType.INPUT_CALC, link_label='input1') calcf1.add_incoming(pd2, link_type=LinkType.INPUT_CALC, link_label='input2') calcf1.add_incoming(wc1, link_type=LinkType.CALL_CALC, link_label='call2') calcf1.store() pd3 = orm.Dict() pd3.label = 'pd3' fd1 = orm.FolderData() fd1.label = 'fd1' pd3.add_incoming(calcf1, link_type=LinkType.CREATE, link_label='output1') pd3.store() fd1.add_incoming(calcf1, link_type=LinkType.CREATE, link_label='output2') fd1.store() pd3.add_incoming(wc1, link_type=LinkType.RETURN, link_label='output1') fd1.add_incoming(wc1, link_type=LinkType.RETURN, link_label='output2') return AttributeDict({ 'pd0': pd0, 'pd1': pd1, 'calc1': calc1, 'rd1': rd1, 'pd2': pd2, 'calcf1': calcf1, 'pd3': pd3, 'fd1': fd1, 'wc1': wc1 })
def _generate_calc_job_node( entry_point_name='base', computer=None, test_name=None, inputs=None, attributes=None, retrieve_temporary=None ): """Fixture to generate a mock `CalcJobNode` for testing parsers. :param entry_point_name: entry point name of the calculation class :param computer: a `Computer` instance :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder. :param inputs: any optional nodes to add as input links to the corrent CalcJobNode :param attributes: any optional attributes to set on the node :param retrieve_temporary: optional tuple of an absolute filepath of a temporary directory and a list of filenames that should be written to this directory, which will serve as the `retrieved_temporary_folder`. For now this only works with top-level files and does not support files nested in directories. :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node. """ from aiida import orm from aiida.common import LinkType from aiida.plugins.entry_point import format_entry_point_string if computer is None: computer = fixture_localhost filepath_folder = None if test_name is not None: basepath = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(entry_point_name[len('quantumespresso.'):], test_name) filepath_folder = os.path.join(basepath, 'parsers', 'fixtures', filename) filepath_input = os.path.join(filepath_folder, 'aiida.in') entry_point = format_entry_point_string('aiida.calculations', entry_point_name) node = orm.CalcJobNode(computer=computer, process_type=entry_point) node.set_attribute('input_filename', 'aiida.in') node.set_attribute('output_filename', 'aiida.out') node.set_attribute('error_filename', 'aiida.err') node.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) node.set_option('max_wallclock_seconds', 1800) if attributes: node.set_attribute_many(attributes) if filepath_folder: from qe_tools.utils.exceptions import ParsingError from aiida_quantumespresso.tools.pwinputparser import PwInputFile try: parsed_input = PwInputFile(filepath_input) except ParsingError: pass else: inputs['structure'] = parsed_input.get_structuredata() inputs['parameters'] = orm.Dict(dict=parsed_input.namelists) if inputs: metadata = inputs.pop('metadata', {}) options = metadata.get('options', {}) for name, option in options.items(): node.set_option(name, option) for link_label, input_node in flatten_inputs(inputs): input_node.store() node.add_incoming(input_node, link_type=LinkType.INPUT_CALC, link_label=link_label) node.store() if retrieve_temporary: dirpath, filenames = retrieve_temporary for filename in filenames: shutil.copy(os.path.join(filepath_folder, filename), os.path.join(dirpath, filename)) if filepath_folder: retrieved = orm.FolderData() retrieved.put_object_from_tree(filepath_folder) # Remove files that are supposed to be only present in the retrieved temporary folder if retrieve_temporary: for filename in filenames: retrieved.delete_object(filename) retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') retrieved.store() remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp') remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder') remote_folder.store() return node
def test_complex_graph_import_export(self, temp_dir): """ This test checks that a small and bit complex graph can be correctly exported and imported. It will create the graph, store it to the database, export it to a file and import it. In the end it will check if the initial nodes are present at the imported graph. """ from aiida.common.exceptions import NotExistent calc1 = orm.CalcJobNode() calc1.computer = self.computer calc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc1.label = 'calc1' calc1.store() pd1 = orm.Dict() pd1.label = 'pd1' pd1.store() pd2 = orm.Dict() pd2.label = 'pd2' pd2.store() rd1 = orm.RemoteData() rd1.label = 'rd1' rd1.set_remote_path('/x/y.py') rd1.computer = self.computer rd1.store() rd1.add_incoming(calc1, link_type=LinkType.CREATE, link_label='link') calc2 = orm.CalcJobNode() calc2.computer = self.computer calc2.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc2.label = 'calc2' calc2.add_incoming(pd1, link_type=LinkType.INPUT_CALC, link_label='link1') calc2.add_incoming(pd2, link_type=LinkType.INPUT_CALC, link_label='link2') calc2.add_incoming(rd1, link_type=LinkType.INPUT_CALC, link_label='link3') calc2.store() fd1 = orm.FolderData() fd1.label = 'fd1' fd1.store() fd1.add_incoming(calc2, link_type=LinkType.CREATE, link_label='link') calc1.seal() calc2.seal() node_uuids_labels = { calc1.uuid: calc1.label, pd1.uuid: pd1.label, pd2.uuid: pd2.label, rd1.uuid: rd1.label, calc2.uuid: calc2.label, fd1.uuid: fd1.label } filename = os.path.join(temp_dir, 'export.aiida') export([fd1], filename=filename, silent=True) self.clean_db() self.create_user() import_data(filename, silent=True, ignore_unknown_nodes=True) for uuid, label in node_uuids_labels.items(): try: orm.load_node(uuid) except NotExistent: self.fail( 'Node with UUID {} and label {} was not found.'.format( uuid, label))
def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements """ Basides the standard setup we need to add few more objects in the database to be able to explore different requests/filters/orderings etc. """ # call parent setUpClass method super(RESTApiTestCase, cls).setUpClass() # connect the app and the api # Init the api by connecting it the the app (N.B. respect the following # order, api.__init__) kwargs = dict(PREFIX=cls._url_prefix, PERPAGE_DEFAULT=cls._PERPAGE_DEFAULT, LIMIT_DEFAULT=cls._LIMIT_DEFAULT) cls.app = App(__name__) cls.app.config['TESTING'] = True AiidaApi(cls.app, **kwargs) # create test inputs cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.)) structure = orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() structure.add_comment('This is test comment.') structure.add_comment('Add another comment.') cif = orm.CifData(ase=structure.get_ase()) cif.store() parameter1 = orm.Dict(dict={'a': 1, 'b': 2}) parameter1.store() parameter2 = orm.Dict(dict={'c': 3, 'd': 4}) parameter2.store() kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in = 'The input file\nof the CalcJob node' # Add the calcjob_inputs folder with the aiida.in file to the CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, key='calcjob_inputs/aiida.in', force=True) calc.store() # create log message for calcjob import logging from aiida.common.log import LOG_LEVEL_REPORT from aiida.common.timezone import now from aiida.orm import Log log_record = { 'time': now(), 'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message': 'This is a template record message', 'metadata': { 'content': 'test' }, } Log(**log_record) aiida_out = 'The output file\nof the CalcJob node' retrieved_outputs = orm.FolderData() # Add the calcjob_outputs folder with the aiida.out file to the FolderData node with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike( handle, key='calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store() dummy_computers = [{ 'name': 'test1', 'hostname': 'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'pbspro', }, { 'name': 'test2', 'hostname': 'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'torque', }, { 'name': 'test3', 'hostname': 'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type': 'slurm', }, { 'name': 'test4', 'hostname': 'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'slurm', }] for dummy_computer in dummy_computers: computer = orm.Computer(**dummy_computer) computer.store() # Prepare typical REST responses cls.process_dummy_data()
def test_different_computer_same_name_import(self, temp_dir): """ This test checks that if there is a name collision, the imported computers are renamed accordingly. """ from aiida.tools.importexport.common.config import DUPL_SUFFIX # Set the computer name comp1_name = 'localhost_1' self.computer.label = comp1_name # Store a calculation calc1_label = 'calc1' calc1 = orm.CalcJobNode() calc1.computer = self.computer calc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc1.label = calc1_label calc1.store() calc1.seal() # Export the first job calculation filename1 = os.path.join(temp_dir, 'export1.aiida') export([calc1], filename=filename1, silent=True) # Reset the database self.clean_db() self.insert_data() # Set the computer name to the same name as before self.computer.label = comp1_name # Store a second calculation calc2_label = 'calc2' calc2 = orm.CalcJobNode() calc2.computer = self.computer calc2.set_option('resources', { 'num_machines': 2, 'num_mpiprocs_per_machine': 2 }) calc2.label = calc2_label calc2.store() calc2.seal() # Export the second job calculation filename2 = os.path.join(temp_dir, 'export2.aiida') export([calc2], filename=filename2, silent=True) # Reset the database self.clean_db() self.insert_data() # Set the computer name to the same name as before self.computer.label = comp1_name # Store a third calculation calc3_label = 'calc3' calc3 = orm.CalcJobNode() calc3.computer = self.computer calc3.set_option('resources', { 'num_machines': 2, 'num_mpiprocs_per_machine': 2 }) calc3.label = calc3_label calc3.store() calc3.seal() # Export the third job calculation filename3 = os.path.join(temp_dir, 'export3.aiida') export([calc3], filename=filename3, silent=True) # Clean the local database self.clean_db() self.create_user() # Check that there are no computers builder = orm.QueryBuilder() builder.append(orm.Computer, project=['*']) self.assertEqual( builder.count(), 0, 'There should not be any computers in the database at this point.') # Check that there are no calculations builder = orm.QueryBuilder() builder.append(orm.CalcJobNode, project=['*']) self.assertEqual( builder.count(), 0, 'There should not be any ' 'calculations in the database at ' 'this point.') # Import all the calculations import_data(filename1, silent=True) import_data(filename2, silent=True) import_data(filename3, silent=True) # Retrieve the calculation-computer pairs builder = orm.QueryBuilder() builder.append(orm.CalcJobNode, project=['label'], tag='jcalc') builder.append(orm.Computer, project=['name'], with_node='jcalc') self.assertEqual(builder.count(), 3, 'Three combinations expected.') res = builder.all() self.assertIn([calc1_label, comp1_name], res, 'Calc-Computer combination not found.') self.assertIn([calc2_label, comp1_name + DUPL_SUFFIX.format(0)], res, 'Calc-Computer combination not found.') self.assertIn([calc3_label, comp1_name + DUPL_SUFFIX.format(1)], res, 'Calc-Computer combination not found.')
def _generate_calc_job_node( # pylint: disable=too-many-arguments,too-many-locals entry_point_name, computer, seedname=None, test_name=None, inputs=None, attributes=None, ): """Fixture to generate a mock `CalcJobNode` for testing parsers. :param entry_point_name: entry point name of the calculation class :param computer: a `Computer` instance :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder. :param inputs: any optional nodes to add as input links to the corrent CalcJobNode :param attributes: any optional attributes to set on the node :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node """ from aiida import orm from aiida.common import LinkType from aiida.plugins.entry_point import format_entry_point_string entry_point = format_entry_point_string('aiida.calculations', entry_point_name) # If no seedname is specified, use the default 'aiida' evaluated_seedname = seedname or 'aiida' node = orm.CalcJobNode(computer=computer, process_type=entry_point) node.set_attribute('input_filename', '{}.win'.format(evaluated_seedname)) node.set_attribute('output_filename', '{}.wout'.format(evaluated_seedname)) node.set_attribute('error_filename', '{}.werr'.format(evaluated_seedname)) node.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) node.set_option('max_wallclock_seconds', 1800) node.set_option('seedname', evaluated_seedname) if attributes: node.set_attribute_many(attributes) if inputs: for link_label, input_node in flatten_inputs(inputs): input_node.store() node.add_incoming(input_node, link_type=LinkType.INPUT_CALC, link_label=link_label) node.store() if test_name is not None: # TODO: remove cast to 'str' when Python2 support is dropped filepath = str(shared_datadir / test_name) retrieved = orm.FolderData() retrieved.put_object_from_tree(filepath) retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') retrieved.store() remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp') remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder') remote_folder.store() return node
def test_non_default_user_nodes(self, temp_dir): # pylint: disable=too-many-statements """ This test checks that nodes belonging to user A (which is not the default user) can be correctly exported, imported, enriched with nodes from the default user, re-exported & re-imported and that in the end all the nodes that have been finally imported belonging to the right users. """ from aiida.common.links import LinkType from aiida.manage.manager import get_manager manager = get_manager() # Create another user new_email = '[email protected]' user = orm.User(email=new_email).store() # Create a structure data node that has a calculation as output sd1 = orm.StructureData() sd1.user = user sd1.label = 'sd1' sd1.store() jc1 = orm.CalcJobNode() jc1.computer = self.computer jc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) jc1.user = user jc1.label = 'jc1' jc1.add_incoming(sd1, link_type=LinkType.INPUT_CALC, link_label='link') jc1.store() # Create some nodes from a different user sd2 = orm.StructureData() sd2.user = user sd2.label = 'sd2' sd2.add_incoming(jc1, link_type=LinkType.CREATE, link_label='l1') sd2.store() jc1.seal() sd2_uuid = sd2.uuid # At this point we export the generated data filename1 = os.path.join(temp_dir, 'export1.aiidaz') export([sd2], filename=filename1, silent=True) uuids1 = [sd1.uuid, jc1.uuid, sd2.uuid] self.clean_db() self.insert_data() import_data(filename1, silent=True) # Check that the imported nodes are correctly imported and that # the user assigned to the nodes is the right one for uuid in uuids1: self.assertEqual(orm.load_node(uuid).user.email, new_email) # Now we continue to generate more data based on the imported # data sd2_imp = orm.load_node(sd2_uuid) jc2 = orm.CalcJobNode() jc2.computer = self.computer jc2.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) jc2.label = 'jc2' jc2.add_incoming(sd2_imp, link_type=LinkType.INPUT_CALC, link_label='l2') jc2.store() sd3 = orm.StructureData() sd3.label = 'sd3' sd3.add_incoming(jc2, link_type=LinkType.CREATE, link_label='l3') sd3.store() jc2.seal() # Store the UUIDs of the nodes that should be checked # if they can be imported correctly. uuids2 = [jc2.uuid, sd3.uuid] filename2 = os.path.join(temp_dir, 'export2.aiida') export([sd3], filename=filename2, silent=True) self.clean_db() self.insert_data() import_data(filename2, silent=True) # Check that the imported nodes are correctly imported and that # the user assigned to the nodes is the right one for uuid in uuids1: self.assertEqual(orm.load_node(uuid).user.email, new_email) for uuid in uuids2: self.assertEqual( orm.load_node(uuid).user.email, manager.get_profile().default_user)
def _generate_calc_job_node(entry_point_name, computer, test_name=None, inputs=None, attributes=None): """Fixture to generate a mock `CalcJobNode` for testing parsers. :param entry_point_name: entry point name of the calculation class :param computer: a `Computer` instance :param test_name: relative path of directory :param inputs: any optional nodes to add as input links to the corrent CalcJobNode :param attributes: any optional attributes to set on the node :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node """ # pylint: disable=too-many-locals import os from aiida.common import LinkType from aiida.plugins.entry_point import format_entry_point_string entry_point = format_entry_point_string('aiida.calculations', entry_point_name) node = orm.CalcJobNode(computer=computer, process_type=entry_point) node.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) node.set_option('max_wallclock_seconds', 1800) if attributes: node.set_attribute_many(attributes) if inputs: metadata = inputs.pop('metadata', {}) options = metadata.get('options', {}) for name, option in options.items(): node.set_option(name, option) for link_label, input_node in flatten_inputs(inputs): input_node.store() node.add_incoming(input_node, link_type=LinkType.INPUT_CALC, link_label=link_label) node.store() if test_name is not None: basepath = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(basepath, 'parsers', 'fixtures', 'catmap', test_name) retrieved = orm.FolderData() retrieved.put_object_from_tree(filepath) retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') retrieved.store() remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp') remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder') remote_folder.store() return node
def test_nodes_belonging_to_different_users(self, temp_dir): """ This test checks that nodes belonging to different users are correctly exported & imported. """ from aiida.common.links import LinkType from aiida.manage.manager import get_manager manager = get_manager() # Create another user new_email = '[email protected]' user = orm.User(email=new_email).store() # Create a structure data node that has a calculation as output sd1 = orm.StructureData() sd1.user = user sd1.label = 'sd1' sd1.store() jc1 = orm.CalcJobNode() jc1.computer = self.computer jc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) jc1.user = user jc1.label = 'jc1' jc1.add_incoming(sd1, link_type=LinkType.INPUT_CALC, link_label='link') jc1.store() # Create some nodes from a different user sd2 = orm.StructureData() sd2.user = user sd2.label = 'sd2' sd2.store() sd2.add_incoming(jc1, link_type=LinkType.CREATE, link_label='l1') # I assume jc1 CREATED sd2 jc1.seal() jc2 = orm.CalcJobNode() jc2.computer = self.computer jc2.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) jc2.label = 'jc2' jc2.add_incoming(sd2, link_type=LinkType.INPUT_CALC, link_label='l2') jc2.store() sd3 = orm.StructureData() sd3.label = 'sd3' sd3.store() sd3.add_incoming(jc2, link_type=LinkType.CREATE, link_label='l3') jc2.seal() uuids_u1 = [sd1.uuid, jc1.uuid, sd2.uuid] uuids_u2 = [jc2.uuid, sd3.uuid] filename = os.path.join(temp_dir, 'export.aiida') export([sd3], filename=filename, silent=True) self.clean_db() self.create_user() import_data(filename, silent=True) # Check that the imported nodes are correctly imported and that # the user assigned to the nodes is the right one for uuid in uuids_u1: node = orm.load_node(uuid=uuid) self.assertEqual(node.user.email, new_email) for uuid in uuids_u2: self.assertEqual( orm.load_node(uuid).user.email, manager.get_profile().default_user)
def test_same_computer_different_name_import(self, temp_dir): """ This test checks that if the computer is re-imported with a different name to the same database, then the original computer will not be renamed. It also checks that the names were correctly imported (without any change since there is no computer name collision) """ # Get computer comp1 = self.computer # Store a calculation calc1_label = 'calc1' calc1 = orm.CalcJobNode() calc1.computer = self.computer calc1.set_option('resources', { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }) calc1.label = calc1_label calc1.store() calc1.seal() # Store locally the computer name comp1_name = str(comp1.label) # Export the first job calculation filename1 = os.path.join(temp_dir, 'export1.aiida') export([calc1], filename=filename1, silent=True) # Rename the computer comp1.label = comp1_name + '_updated' # Store a second calculation calc2_label = 'calc2' calc2 = orm.CalcJobNode() calc2.computer = self.computer calc2.set_option('resources', { 'num_machines': 2, 'num_mpiprocs_per_machine': 2 }) calc2.label = calc2_label calc2.store() calc2.seal() # Export the second job calculation filename2 = os.path.join(temp_dir, 'export2.aiida') export([calc2], filename=filename2, silent=True) # Clean the local database self.clean_db() self.create_user() # Check that there are no computers builder = orm.QueryBuilder() builder.append(orm.Computer, project=['*']) self.assertEqual( builder.count(), 0, 'There should not be any computers in the database at this point.') # Check that there are no calculations builder = orm.QueryBuilder() builder.append(orm.CalcJobNode, project=['*']) self.assertEqual( builder.count(), 0, 'There should not be any calculations in the database at this point.' ) # Import the first calculation import_data(filename1, silent=True) # Check that the calculation computer is imported correctly. builder = orm.QueryBuilder() builder.append(orm.CalcJobNode, project=['label']) self.assertEqual(builder.count(), 1, 'Only one calculation should be found.') self.assertEqual(str(builder.first()[0]), calc1_label, 'The calculation label is not correct.') # Check that the referenced computer is imported correctly. builder = orm.QueryBuilder() builder.append(orm.Computer, project=['name', 'uuid', 'id']) self.assertEqual(builder.count(), 1, 'Only one computer should be found.') self.assertEqual(str(builder.first()[0]), comp1_name, 'The computer name is not correct.') # Import the second calculation import_data(filename2, silent=True) # Check that the number of computers remains the same and its data # did not change. builder = orm.QueryBuilder() builder.append(orm.Computer, project=['name']) self.assertEqual( builder.count(), 1, 'Found {} computers' 'but only one computer should be found.'.format(builder.count())) self.assertEqual(str(builder.first()[0]), comp1_name, 'The computer name is not correct.')