Exemplo n.º 1
0
def test_parse_insufficient_data(generate_process):
    """Test the scheduler output parsing logic in `CalcJob.parse`.

    Here we check explicitly that the parsing does not except even if the required information is not available.
    """
    process = generate_process()

    retrieved = orm.FolderData().store()
    retrieved.add_incoming(process.node,
                           link_label='retrieved',
                           link_type=LinkType.CREATE)
    process.parse()

    filename_stderr = process.node.get_option('scheduler_stderr')
    filename_stdout = process.node.get_option('scheduler_stdout')

    # The scheduler parsing requires three resources of information, the `detailed_job_info` dictionary which is
    # stored as an attribute on the calculation job node and the output of the stdout and stderr which are both
    # stored in the repository. In this test, we haven't created these on purpose. This should not except the
    # process but should log a warning, so here we check that those expected warnings are attached to the node
    logs = [log.message for log in orm.Log.objects.get_logs_for(process.node)]
    expected_logs = [
        'could not parse scheduler output: the `detailed_job_info` attribute is missing',
        f'could not parse scheduler output: the `{filename_stderr}` file is missing',
        f'could not parse scheduler output: the `{filename_stdout}` file is missing'
    ]

    for log in expected_logs:
        assert log in logs
Exemplo n.º 2
0
    def test_parser_get_outputs_for_parsing(self):
        """Make sure that the `get_output_for_parsing` method returns the correct output nodes."""
        ArithmeticAddCalculation.define = CustomCalcJob.define
        node = orm.CalcJobNode(computer=self.computer,
                               process_type=CustomCalcJob.build_process_type())
        node.set_option('resources', {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        })
        node.set_option('max_wallclock_seconds', 1800)
        node.store()

        retrieved = orm.FolderData().store()
        retrieved.add_incoming(node,
                               link_type=LinkType.CREATE,
                               link_label='retrieved')

        output = orm.Data().store()
        output.add_incoming(node,
                            link_type=LinkType.CREATE,
                            link_label='output')

        parser = ArithmeticAddParser(node)
        outputs_for_parsing = parser.get_outputs_for_parsing()
        self.assertIn('retrieved', outputs_for_parsing)
        self.assertEqual(outputs_for_parsing['retrieved'].uuid, retrieved.uuid)
        self.assertIn('output', outputs_for_parsing)
        self.assertEqual(outputs_for_parsing['output'].uuid, output.uuid)
Exemplo n.º 3
0
def test_parse_not_implemented(generate_process):
    """Test the scheduler output parsing logic in `CalcJob.parse`.

    Here we check explicitly that the parsing does not except even if the scheduler does not implement the method.
    """
    process = generate_process()
    filename_stderr = process.node.get_option('scheduler_stderr')
    filename_stdout = process.node.get_option('scheduler_stdout')

    retrieved = orm.FolderData()
    retrieved.put_object_from_filelike(io.StringIO('\n'),
                                       filename_stderr,
                                       mode='w')
    retrieved.put_object_from_filelike(io.StringIO('\n'),
                                       filename_stdout,
                                       mode='w')
    retrieved.store()
    retrieved.add_incoming(process.node,
                           link_label='retrieved',
                           link_type=LinkType.CREATE)

    process.node.set_attribute('detailed_job_info', {})

    process.parse()

    # The `DirectScheduler` at this point in time does not implement the `parse_output` method. Instead of raising
    # a warning message should be logged. We verify here that said message is present.
    logs = [log.message for log in orm.Log.objects.get_logs_for(process.node)]
    expected_logs = [
        '`DirectScheduler` does not implement scheduler output parsing'
    ]

    for log in expected_logs:
        assert log in logs
Exemplo n.º 4
0
def test_parse_scheduler_excepted(process, monkeypatch):
    """Test the scheduler output parsing logic in `CalcJob.parse`.

    Here we check explicitly the case where the `Scheduler.parse_output` method excepts
    """
    from aiida.schedulers.plugins.direct import DirectScheduler

    retrieved = orm.FolderData().store()
    retrieved.add_incoming(process.node, link_label='retrieved', link_type=LinkType.CREATE)

    process.node.set_attribute('detailed_job_info', {})

    filename_stderr = process.node.get_option('scheduler_stderr')
    filename_stdout = process.node.get_option('scheduler_stdout')

    with retrieved.open(filename_stderr, 'w') as handle:
        handle.write('\n')

    with retrieved.open(filename_stdout, 'w') as handle:
        handle.write('\n')

    msg = 'crash'

    def raise_exception(*args, **kwargs):
        raise RuntimeError(msg)

    # Monkeypatch the `DirectScheduler.parse_output` to raise an exception
    monkeypatch.setattr(DirectScheduler, 'parse_output', raise_exception)
    process.parse()
    logs = [log.message for log in orm.Log.objects.get_logs_for(process.node)]
    expected_logs = [f'the `parse_output` method of the scheduler excepted: {msg}']

    for log in expected_logs:
        assert log in logs
Exemplo n.º 5
0
    def setUpClass(cls, *args, **kwargs):
        super().setUpClass(*args, **kwargs)

        node = orm.Data()

        cls.ATTR_KEY_ONE = 'a'
        cls.ATTR_VAL_ONE = '1'
        cls.ATTR_KEY_TWO = 'b'
        cls.ATTR_VAL_TWO = 'test'

        node.set_attribute_many({cls.ATTR_KEY_ONE: cls.ATTR_VAL_ONE, cls.ATTR_KEY_TWO: cls.ATTR_VAL_TWO})

        cls.EXTRA_KEY_ONE = 'x'
        cls.EXTRA_VAL_ONE = '2'
        cls.EXTRA_KEY_TWO = 'y'
        cls.EXTRA_VAL_TWO = 'other'

        node.set_extra_many({cls.EXTRA_KEY_ONE: cls.EXTRA_VAL_ONE, cls.EXTRA_KEY_TWO: cls.EXTRA_VAL_TWO})

        node.store()

        cls.node = node

        # Set up a FolderData for the node repo cp tests.
        folder_node = orm.FolderData()
        cls.content_file1 = 'nobody expects'
        cls.content_file2 = 'the minister of silly walks'
        cls.key_file1 = 'some/nested/folder/filename.txt'
        cls.key_file2 = 'some_other_file.txt'
        folder_node.put_object_from_filelike(io.StringIO(cls.content_file1), cls.key_file1)
        folder_node.put_object_from_filelike(io.StringIO(cls.content_file2), cls.key_file2)
        folder_node.store()
        cls.folder_node = folder_node
def generate_inputs():
    """Minimal input for pw2wannier90 calculations."""
    basepath = os.path.dirname(os.path.abspath(__file__))
    nnkp_filepath = os.path.join(basepath, 'fixtures', 'pw2wannier90', 'inputs', 'aiida.nnkp')

    parameters = {
        'inputpp': {
            'write_amn': False,
            'write_mmn': False,
            'write_unk': False,
            'scdm_proj': True,
            'scdm_entanglement': 'isolated',
        }
    }

    settings = {'ADDITIONAL_RETRIEVE_LIST': ['*.amn', '*.mmn', '*.eig']}

    # Since we don't actually run pw2wannier.x, we only pretend to have the output folder
    # of a parent pw.x calculation. The nnkp file, instead, is real.
    inputs = {
        'parent_folder': orm.FolderData().store(),
        'nnkp_file': orm.SinglefileData(file=nnkp_filepath).store(),
        'parameters': orm.Dict(dict=parameters),
        'settings': orm.Dict(dict=settings),
    }

    return AttributeDict(inputs)
Exemplo n.º 7
0
def populate_restapi_database(clear_database_before_test):
    """Populates the database with a considerable set of nodes to test the restAPI"""
    # pylint: disable=unused-argument
    from aiida import orm

    struct_forcif = orm.StructureData().store()
    orm.StructureData().store()
    orm.StructureData().store()

    orm.Dict().store()
    orm.Dict().store()

    orm.CifData(ase=struct_forcif.get_ase()).store()

    orm.KpointsData().store()

    orm.FolderData().store()

    orm.CalcFunctionNode().store()
    orm.CalcJobNode().store()
    orm.CalcJobNode().store()

    orm.WorkFunctionNode().store()
    orm.WorkFunctionNode().store()
    orm.WorkChainNode().store()
Exemplo n.º 8
0
def dos_inputs():
    # The DosParser doesn't really need to access the parent folder, but we'd like to make the inputs as realistic
    # as possible, so we create an empty FolderData and attach it as an input to the current CalcJobNode.
    inputs = {
        'parent_folder': orm.FolderData().store(),
    }

    return AttributeDict(inputs)
Exemplo n.º 9
0
def test_validate_transfer_inputs(aiida_localhost, tmp_path, temp_dir):
    """Test the `TransferCalculation` validators."""
    from aiida.orm import Computer
    from aiida.calculations.transfer import check_node_type, validate_transfer_inputs

    fake_localhost = Computer(
        label='localhost-fake',
        description='extra localhost computer set up by test',
        hostname='localhost-fake',
        workdir=temp_dir,
        transport_type='local',
        scheduler_type='direct'
    )
    fake_localhost.store()
    fake_localhost.set_minimum_job_poll_interval(0.)
    fake_localhost.configure()

    inputs = {
        'source_nodes': {
            'unused_node': orm.RemoteData(computer=aiida_localhost, remote_path=str(tmp_path)),
        },
        'instructions':
        orm.Dict(
            dict={
                'local_files': [('inexistent_node', None, None)],
                'remote_files': [('inexistent_node', None, None)],
                'symlink_files': [('inexistent_node', None, None)],
            }
        ),
        'metadata': {
            'computer': fake_localhost
        },
    }
    expected_list = []
    expected_list.append((
        f' > remote node `unused_node` points to computer `{aiida_localhost}`, '
        f'not the one being used (`{fake_localhost}`)'
    ))
    expected_list.append(check_node_type('local_files', 'inexistent_node', None, orm.FolderData))
    expected_list.append(check_node_type('remote_files', 'inexistent_node', None, orm.RemoteData))
    expected_list.append(check_node_type('symlink_files', 'inexistent_node', None, orm.RemoteData))
    expected_list.append(' > node `unused_node` provided as inputs is not being used')

    expected = '\n\n'
    for addition in expected_list:
        expected = expected + addition + '\n'

    result = validate_transfer_inputs(inputs, None)
    assert result == expected

    result = check_node_type('list_name', 'node_label', None, orm.RemoteData)
    expected = ' > node `node_label` requested on list `list_name` not found among inputs'
    assert result == expected

    result = check_node_type('list_name', 'node_label', orm.FolderData(), orm.RemoteData)
    expected_type = orm.RemoteData.class_node_type
    expected = f' > node `node_label`, requested on list `list_name` should be of type `{expected_type}`'
    assert result == expected
Exemplo n.º 10
0
def test_parse_exit_code_priority(
    exit_status_scheduler,
    exit_status_retrieved,
    final,
    generate_calc_job,
    fixture_sandbox,
    aiida_local_code_factory,
    monkeypatch,
):  # pylint: disable=too-many-arguments
    """Test the logic around exit codes in the `CalcJob.parse` method.

    The `parse` method will first call the `Scheduler.parse_output` method, which if implemented by the relevant
    scheduler plugin, will parse the scheduler output and potentially return an exit code. Next, the output parser
    plugin is called if defined in the inputs that can also optionally return an exit code. This test is designed
    to make sure the right logic is implemented in terms of which exit code should be dominant.

    Scheduler result | Retrieved result | Final result    | Scenario
    -----------------|------------------|-----------------|-----------------------------------------
    `None`           | `None`           | `ExitCode(0)`   | Neither parser found any problem
    `ExitCode(100)`  | `None`           | `ExitCode(100)` | Scheduler found issue, output parser does not override
    `None`           | `ExitCode(400)`  | `ExitCode(400)` | Only output parser found a problem
    `ExitCode(100)`  | `ExitCode(400)`  | `ExitCode(400)` | Scheduler found issue, but output parser overrides
                     |                  |                 | with a more specific error code
    `ExitCode(100)`  | `ExitCode(0)`    | `ExitCode(0)`   | Scheduler found issue but output parser overrides saying
                     |                  |                 | that despite that the calculation should be considered
                     |                  |                 | finished successfully.

    To test this, we just need to test the `CalcJob.parse` method and the easiest way is to simply mock the scheduler
    parser and output parser calls called `parse_scheduler_output` and `parse_retrieved_output`, respectively. We will
    just mock them by a simple method that returns `None` or an `ExitCode`. We then check that the final exit code
    returned by `CalcJob.parse` is the one we expect according to the table above.
    """
    from aiida.orm import Int

    def parse_scheduler_output(_, __):
        if exit_status_scheduler is not None:
            return ExitCode(exit_status_scheduler)

    def parse_retrieved_output(_, __):
        if exit_status_retrieved is not None:
            return ExitCode(exit_status_retrieved)

    monkeypatch.setattr(CalcJob, 'parse_scheduler_output', parse_scheduler_output)
    monkeypatch.setattr(CalcJob, 'parse_retrieved_output', parse_retrieved_output)

    inputs = {
        'code': aiida_local_code_factory('arithmetic.add', '/bin/bash'),
        'x': Int(1),
        'y': Int(2),
    }
    process = generate_calc_job(fixture_sandbox, 'arithmetic.add', inputs, return_process=True)
    retrieved = orm.FolderData().store()
    retrieved.add_incoming(process.node, link_label='retrieved', link_type=LinkType.CREATE)

    result = process.parse()
    assert isinstance(result, ExitCode)
    assert result.status == final
Exemplo n.º 11
0
    def test_parse_retrieved_folder(self):
        """Test the `CalcJob.parse` method when there is a retrieved folder."""
        process = self.instantiate_process()
        retrieved = orm.FolderData().store()
        retrieved.add_incoming(process.node, link_label='retrieved', link_type=LinkType.CREATE)
        exit_code = process.parse()

        # The following exit code is specific to the `ArithmeticAddCalculation` we are testing here and is returned
        # because the retrieved folder does not contain the output file it expects
        assert exit_code == process.exit_codes.ERROR_READING_OUTPUT_FILE
Exemplo n.º 12
0
def test_get_fleurinp_from_folder_data(folderpath, expected_files):
    from aiida import orm
    from aiida_fleur.data.fleurinp import get_fleurinp_from_folder_data

    folder = orm.FolderData()
    folder.put_object_from_tree(folderpath)

    fleurinp = get_fleurinp_from_folder_data(folder)

    assert set(fleurinp.files) == expected_files
Exemplo n.º 13
0
    def _generate_work_chain_node(entry_point_name,
                                  computer,
                                  test_name=None,
                                  inputs=None,
                                  attributes=None):
        """Fixture to generate a mock `WorkChainNode` for testing parsers.

        :param entry_point_name: entry point name of the calculation class
        :param computer: a `Computer` instance
        :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder.
        :param inputs: any optional nodes to add as input links to the corrent CalcJobNode
        :param attributes: any optional attributes to set on the node
        :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
        """
        from aiida import orm
        from aiida.common import LinkType
        from aiida.plugins.entry_point import format_entry_point_string

        entry_point = format_entry_point_string('aiida.workchains',
                                                entry_point_name)

        node = orm.WorkChainNode(computer=computer, process_type=entry_point)

        if attributes:
            node.set_attribute_many(attributes)

        if inputs:
            for link_label, input_node in flatten_inputs(inputs):
                input_node.store()
                node.add_incoming(input_node,
                                  link_type=LinkType.INPUT_WORK,
                                  link_label=link_label)

        if test_name is not None:
            basepath = os.path.dirname(os.path.abspath(__file__))
            filepath = os.path.join(basepath, 'parsers', 'fixtures',
                                    entry_point_name[len('quantumespresso.'):],
                                    test_name)

            retrieved = orm.FolderData()
            retrieved.put_object_from_tree(filepath)
            retrieved.add_incoming(node,
                                   link_type=LinkType.CREATE,
                                   link_label='retrieved')
            retrieved.store()

            remote_folder = orm.RemoteData(computer=computer,
                                           remote_path='/tmp')
            remote_folder.add_incoming(node,
                                       link_type=LinkType.CREATE,
                                       link_label='remote_folder')
            remote_folder.store()

        return node
Exemplo n.º 14
0
def test_process(deepmd_code):
    """Test running a calculation
    note this does not test that the expected outputs are created of output parsing"""
    from aiida.plugins import DataFactory, CalculationFactory
    from aiida.engine import run

    # Prepare input parameters
    DiffParameters = DataFactory('deepmd')
    parameters = DiffParameters({'ignore-case': True})

    from aiida.orm import SinglefileData
    file1 = SinglefileData(
        file=os.path.join(tests.TEST_DIR, "input_files", 'file1.txt'))
    file2 = SinglefileData(
        file=os.path.join(tests.TEST_DIR, "input_files", 'file2.txt'))

    # data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'input_folder')
    data_folder = orm.FolderData(
        tree=os.path.join(tests.TEST_DIR, "input_files"))
    # data_folder = './input_folder'
    # set up calculation
    inputs = {
        'code': deepmd_code,
        'model': orm.Dict(dict={}),
        'learning_rate': orm.Dict(dict={}),
        'loss': orm.Dict(dict={}),
        'training': orm.Dict(dict={}),
        'file': {
            'box_raw':
            SinglefileData(
                file=os.path.join(tests.TEST_DIR, "input_files", 'file1.txt')),
            'coord_raw':
            SinglefileData(
                file=os.path.join(tests.TEST_DIR, "input_files", 'file2.txt'))
        },
        'metadata': {
            'dry_run': True,
            'options': {
                'max_wallclock_seconds': 30,
                'resources': {
                    'num_machines': 1,
                    'num_mpiprocs_per_machine': 1
                }
            },
        },
    }

    result = run(CalculationFactory('dptrain'), **inputs)
    computed_diff = result['deepmd'].get_content()

    assert 'content1' in computed_diff
    assert 'content2' in computed_diff
Exemplo n.º 15
0
def test_parse_non_zero_retval(process):
    """Test the scheduler output parsing logic in `CalcJob.parse`.

    This is testing the case where the `detailed_job_info` is incomplete because the call failed. This is checked
    through the return value that is stored within the attribute dictionary.
    """
    retrieved = orm.FolderData().store()
    retrieved.add_incoming(process.node, link_label='retrieved', link_type=LinkType.CREATE)

    process.node.set_attribute('detailed_job_info', {'retval': 1, 'stderr': 'accounting disabled', 'stdout': ''})
    process.parse()

    logs = [log.message for log in orm.Log.objects.get_logs_for(process.node)]
    assert 'could not parse scheduler output: return value of `detailed_job_info` is non-zero' in logs
Exemplo n.º 16
0
    def get_unstored_folder_node(cls):
        """Get a "default" folder node with some data.

        The node is unstored so one can add more content to it before storing it.
        """
        folder_node = orm.FolderData()
        cls.content_file1 = 'nobody expects'
        cls.content_file2 = 'the minister of silly walks'
        cls.key_file1 = 'some/nested/folder/filename.txt'
        cls.key_file2 = 'some_other_file.txt'
        folder_node.put_object_from_filelike(io.StringIO(cls.content_file1),
                                             cls.key_file1)
        folder_node.put_object_from_filelike(io.StringIO(cls.content_file2),
                                             cls.key_file2)
        return folder_node
Exemplo n.º 17
0
    def _generate_calc_job_node(entry_point_name, computer, test_name=None, inputs=None, attributes=None):
        """Fixture to generate a mock `CalcJobNode` for testing parsers.

        :param entry_point_name: entry point name of the calculation class
        :param computer: a `Computer` instance
        :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder.
        :param inputs: any optional nodes to add as input links to the corrent CalcJobNode
        :param attributes: any optional attributes to set on the node
        :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
        """
        import os
        from aiida import orm
        from aiida.common import LinkType
        from aiida.plugins.entry_point import format_entry_point_string

        entry_point = format_entry_point_string('aiida.calculations', entry_point_name)

        node = orm.CalcJobNode(computer=computer, process_type=entry_point)
        node.set_attribute('input_filename', 'aiida.in')
        node.set_attribute('output_filename', 'aiida.out')
        node.set_attribute('error_filename', 'aiida.err')
        node.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})
        node.set_option('max_wallclock_seconds', 1800)

        if attributes:
            node.set_attributes(attributes)

        if inputs:
            for link_label, input_node in flatten_inputs(inputs):
                input_node.store()
                node.add_incoming(input_node, link_type=LinkType.INPUT_CALC, link_label=link_label)

        node.store()

        if test_name is not None:
            basepath = os.path.dirname(os.path.abspath(__file__))
            filepath = os.path.join(basepath, 'parsers', 'fixtures', entry_point_name[len('quantumespresso.'):], test_name)

            retrieved = orm.FolderData()
            retrieved.put_object_from_tree(filepath)
            retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved')
            retrieved.store()

            remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp')
            remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
            remote_folder.store()

        return node
Exemplo n.º 18
0
def test_integration_transfer(aiida_localhost, tmp_path):
    """Test a default `TransferCalculation`."""
    from aiida.calculations.transfer import TransferCalculation
    from aiida.engine import run

    content_local = 'Content of local file'
    srcfile_local = tmp_path / 'file_local.txt'
    srcfile_local.write_text(content_local)
    srcnode_local = orm.FolderData(tree=str(tmp_path))

    content_remote = 'Content of remote file'
    srcfile_remote = tmp_path / 'file_remote.txt'
    srcfile_remote.write_text(content_remote)
    srcnode_remote = orm.RemoteData(computer=aiida_localhost, remote_path=str(tmp_path))

    list_of_nodes = {}
    list_of_nodes['source_local'] = srcnode_local
    list_for_local = [('source_local', 'file_local.txt', 'file_local.txt')]
    list_of_nodes['source_remote'] = srcnode_remote
    list_for_remote = [('source_remote', 'file_remote.txt', 'file_remote.txt')]

    instructions = orm.Dict(
        dict={
            'retrieve_files': True,
            'local_files': list_for_local,
            'remote_files': list_for_remote,
        }
    )
    inputs = {'instructions': instructions, 'source_nodes': list_of_nodes, 'metadata': {'computer': aiida_localhost}}

    output_nodes = run(TransferCalculation, **inputs)

    output_remotedir = output_nodes['remote_folder']
    output_retrieved = output_nodes['retrieved']

    # Check the retrieved folder
    assert sorted(output_retrieved.list_object_names()) == sorted(['file_local.txt', 'file_remote.txt'])
    assert output_retrieved.get_object_content('file_local.txt') == content_local
    assert output_retrieved.get_object_content('file_remote.txt') == content_remote

    # Check the remote folder
    assert 'file_local.txt' in output_remotedir.listdir()
    assert 'file_remote.txt' in output_remotedir.listdir()
    output_remotedir.getfile(relpath='file_local.txt', destpath=str(tmp_path / 'retrieved_local.txt'))
    output_remotedir.getfile(relpath='file_remote.txt', destpath=str(tmp_path / 'retrieved_remote.txt'))
    assert (tmp_path / 'retrieved_local.txt').read_text() == content_local
    assert (tmp_path / 'retrieved_remote.txt').read_text() == content_remote
Exemplo n.º 19
0
def generate_inputs_3d():
    """Return inputs that parser will expect for the 3D case."""

    inputs = {
        'parent_folder':
        orm.FolderData().store(),
        'parameters':
        orm.Dict(dict={
            'INPUTPP': {
                'plot_num': 11
            },
            'PLOT': {
                'iflag': 3,
            }
        })
    }
    return AttributeDict(inputs)
Exemplo n.º 20
0
    def test_parser_retrieved(self):
        """Verify that the `retrieved` property returns the retrieved `FolderData` node."""
        node = orm.CalcJobNode(
            computer=self.computer,
            process_type=ArithmeticAddCalculation.build_process_type())
        node.set_option('resources', {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        })
        node.set_option('max_wallclock_seconds', 1800)
        node.store()

        retrieved = orm.FolderData().store()
        retrieved.add_incoming(node,
                               link_type=LinkType.CREATE,
                               link_label='retrieved')

        parser = ArithmeticAddParser(node)
        self.assertEqual(parser.node.uuid, node.uuid)
        self.assertEqual(parser.retrieved.uuid, retrieved.uuid)
Exemplo n.º 21
0
def generate_inputs_polar():
    """Return inputs that parser will expect for the polar case."""

    inputs = {
        'parent_folder':
        orm.FolderData().store(),
        'parameters':
        orm.Dict(
            dict={
                'INPUTPP': {
                    'plot_num': 11
                },
                'PLOT': {
                    'iflag': 4,
                    'e1': [[1, 1.0], [2, 0.0], [3, 0.0]],
                    'x0': [[1, 0.], [2, 0.], [3, 0.]],
                    'nx': 100
                }
            })
    }
    return AttributeDict(inputs)
Exemplo n.º 22
0
    def test_parse_from_node(self):
        """Test that the `parse_from_node` returns a tuple of the parsed output nodes and a calculation node.

        The calculation node represents the parsing process
        """
        summed = 3
        output_filename = 'aiida.out'

        # Mock the `CalcJobNode` which should have the `retrieved` folder containing the sum in the outputfile file
        # This is the value that should be parsed into the `sum` output node
        node = orm.CalcJobNode(
            computer=self.computer,
            process_type=ArithmeticAddCalculation.build_process_type())
        node.set_option('resources', {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        })
        node.set_option('max_wallclock_seconds', 1800)
        node.set_option('output_filename', output_filename)
        node.store()

        retrieved = orm.FolderData()
        retrieved.put_object_from_filelike(io.StringIO('{}'.format(summed)),
                                           output_filename)
        retrieved.store()
        retrieved.add_incoming(node,
                               link_type=LinkType.CREATE,
                               link_label='retrieved')

        for cls in [ArithmeticAddParser, SimpleArithmeticAddParser]:
            result, calcfunction = cls.parse_from_node(node)

            self.assertIsInstance(result['sum'], orm.Int)
            self.assertEqual(result['sum'].value, summed)
            self.assertIsInstance(calcfunction, orm.CalcFunctionNode)
            self.assertEqual(calcfunction.exit_status, 0)

        # Verify that the `retrieved_temporary_folder` keyword can be passed, there is no validation though
        result, calcfunction = ArithmeticAddParser.parse_from_node(
            node, retrieved_temporary_folder='/some/path')
Exemplo n.º 23
0
def test_put_transfer(fixture_sandbox, aiida_localhost, generate_calc_job, tmp_path):
    """Test a default `TransferCalculation`."""

    file1 = tmp_path / 'file1.txt'
    file1.write_text('file 1 content')
    folder = tmp_path / 'folder'
    folder.mkdir()
    file2 = folder / 'file2.txt'
    file2.write_text('file 2 content')
    data_source = orm.FolderData(tree=str(tmp_path))

    entry_point_name = 'core.transfer'
    list_of_files = [
        ('data_source', 'file1.txt', 'folder/file1.txt'),
        ('data_source', 'folder/file2.txt', 'file2.txt'),
    ]
    list_of_nodes = {'data_source': data_source}
    instructions = orm.Dict(dict={'retrieve_files': False, 'local_files': list_of_files})
    inputs = {'instructions': instructions, 'source_nodes': list_of_nodes, 'metadata': {'computer': aiida_localhost}}

    # Generate calc_info and verify basics
    calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs)
    assert isinstance(calc_info, datastructures.CalcInfo)
    assert isinstance(calc_info.codes_info, list)
    assert len(calc_info.codes_info) == 0
    assert calc_info.skip_submit

    # Check that the lists were set correctly
    copy_list = [
        (data_source.uuid, 'file1.txt', 'folder/file1.txt'),
        (data_source.uuid, 'folder/file2.txt', 'file2.txt'),
    ]
    assert sorted(calc_info.remote_symlink_list) == sorted(list())
    assert sorted(calc_info.remote_copy_list) == sorted(list())
    assert sorted(calc_info.local_copy_list) == sorted(copy_list)
    assert sorted(calc_info.retrieve_list) == sorted(list())
Exemplo n.º 24
0
    def test_cif_structure_roundtrip(self):
        from aiida.tools.dbexporters.tcod import export_cif, export_values
        from aiida.common.folders import SandboxFolder
        import tempfile

        with tempfile.NamedTemporaryFile(mode='w+') as tmpf:
            tmpf.write('''
                data_test
                _cell_length_a    10
                _cell_length_b    10
                _cell_length_c    10
                _cell_angle_alpha 90
                _cell_angle_beta  90
                _cell_angle_gamma 90
                loop_
                _atom_site_label
                _atom_site_fract_x
                _atom_site_fract_y
                _atom_site_fract_z
                C 0 0 0
                O 0.5 0.5 0.5
            ''')
            tmpf.flush()
            a = orm.CifData(filepath=tmpf.name)

        c = a.get_structure()
        c.store()
        pd = orm.Dict()

        code = orm.Code(local_executable='test.sh')
        with tempfile.NamedTemporaryFile(mode='w+') as tmpf:
            tmpf.write("#/bin/bash\n\necho test run\n")
            tmpf.flush()
            code.put_object_from_filelike(tmpf, 'test.sh')

        code.store()

        calc = orm.CalcJobNode(computer=self.computer)
        calc.set_option('resources', {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        })
        calc.add_incoming(code, LinkType.INPUT_CALC, "code")
        calc.set_option('environment_variables', {
            'PATH': '/dev/null',
            'USER': '******'
        })

        with tempfile.NamedTemporaryFile(mode='w+', prefix="Fe") as tmpf:
            tmpf.write("<UPF version=\"2.0.1\">\nelement=\"Fe\"\n")
            tmpf.flush()
            upf = orm.UpfData(filepath=tmpf.name)
            upf.store()
            calc.add_incoming(upf, LinkType.INPUT_CALC, "upf")

        with tempfile.NamedTemporaryFile(mode='w+') as tmpf:
            tmpf.write("data_test")
            tmpf.flush()
            cif = orm.CifData(filepath=tmpf.name)
            cif.store()
            calc.add_incoming(cif, LinkType.INPUT_CALC, "cif")

        with SandboxFolder() as fhandle:
            calc.put_object_from_tree(fhandle.abspath)
        calc.store()

        fd = orm.FolderData()
        with fd.open('_scheduler-stdout.txt', 'w') as fhandle:
            fhandle.write(u"standard output")

        with fd.open('_scheduler-stderr.txt', 'w') as fhandle:
            fhandle.write(u"standard error")

        fd.store()
        fd.add_incoming(calc, LinkType.CREATE, calc.link_label_retrieved)

        pd.add_incoming(calc, LinkType.CREATE, "create1")
        pd.store()

        with self.assertRaises(ValueError):
            export_cif(c, parameters=pd)

        c.add_incoming(calc, LinkType.CREATE, "create2")
        export_cif(c, parameters=pd)

        values = export_values(c, parameters=pd)
        values = values['0']

        self.assertEquals(values['_tcod_computation_environment'],
                          ['PATH=/dev/null\nUSER=unknown'])
        self.assertEquals(values['_tcod_computation_command'],
                          ['cd 1; ./_aiidasubmit.sh'])
Exemplo n.º 25
0
    def create_provenance(self):
        """create an example provenance graph
        """
        pd0 = orm.Dict()
        pd0.label = 'pd0'
        pd0.store()

        pd1 = orm.Dict()
        pd1.label = 'pd1'
        pd1.store()

        wc1 = orm.WorkChainNode()
        wc1.set_process_state(ProcessState.RUNNING)
        wc1.add_incoming(pd0,
                         link_type=LinkType.INPUT_WORK,
                         link_label='input1')
        wc1.add_incoming(pd1,
                         link_type=LinkType.INPUT_WORK,
                         link_label='input2')
        wc1.store()

        calc1 = orm.CalcJobNode()
        calc1.computer = self.computer
        calc1.set_option('resources', {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        })
        calc1.label = 'calc1'
        calc1.set_process_state(ProcessState.FINISHED)
        calc1.set_exit_status(0)
        calc1.add_incoming(pd0,
                           link_type=LinkType.INPUT_CALC,
                           link_label='input1')
        calc1.add_incoming(pd1,
                           link_type=LinkType.INPUT_CALC,
                           link_label='input2')
        calc1.add_incoming(wc1,
                           link_type=LinkType.CALL_CALC,
                           link_label='call1')
        calc1.store()

        rd1 = orm.RemoteData()
        rd1.label = 'rd1'
        rd1.set_remote_path('/x/y.py')
        rd1.computer = self.computer
        rd1.store()
        rd1.add_incoming(calc1, link_type=LinkType.CREATE, link_label='output')

        pd2 = orm.Dict()
        pd2.label = 'pd2'
        pd2.store()

        calcf1 = orm.CalcFunctionNode()
        calcf1.label = 'calcf1'
        calcf1.set_process_state(ProcessState.FINISHED)
        calcf1.set_exit_status(200)
        calcf1.add_incoming(rd1,
                            link_type=LinkType.INPUT_CALC,
                            link_label='input1')
        calcf1.add_incoming(pd2,
                            link_type=LinkType.INPUT_CALC,
                            link_label='input2')
        calcf1.add_incoming(wc1,
                            link_type=LinkType.CALL_CALC,
                            link_label='call2')
        calcf1.store()

        pd3 = orm.Dict()
        pd3.label = 'pd3'

        fd1 = orm.FolderData()
        fd1.label = 'fd1'

        pd3.add_incoming(calcf1,
                         link_type=LinkType.CREATE,
                         link_label='output1')
        pd3.store()
        fd1.add_incoming(calcf1,
                         link_type=LinkType.CREATE,
                         link_label='output2')
        fd1.store()

        pd3.add_incoming(wc1, link_type=LinkType.RETURN, link_label='output1')
        fd1.add_incoming(wc1, link_type=LinkType.RETURN, link_label='output2')

        return AttributeDict({
            'pd0': pd0,
            'pd1': pd1,
            'calc1': calc1,
            'rd1': rd1,
            'pd2': pd2,
            'calcf1': calcf1,
            'pd3': pd3,
            'fd1': fd1,
            'wc1': wc1
        })
Exemplo n.º 26
0
    def _generate_calc_job_node(
        entry_point_name='base', computer=None, test_name=None, inputs=None, attributes=None, retrieve_temporary=None
    ):
        """Fixture to generate a mock `CalcJobNode` for testing parsers.

        :param entry_point_name: entry point name of the calculation class
        :param computer: a `Computer` instance
        :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder.
        :param inputs: any optional nodes to add as input links to the corrent CalcJobNode
        :param attributes: any optional attributes to set on the node
        :param retrieve_temporary: optional tuple of an absolute filepath of a temporary directory and a list of
            filenames that should be written to this directory, which will serve as the `retrieved_temporary_folder`.
            For now this only works with top-level files and does not support files nested in directories.
        :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node.
        """
        from aiida import orm
        from aiida.common import LinkType
        from aiida.plugins.entry_point import format_entry_point_string

        if computer is None:
            computer = fixture_localhost

        filepath_folder = None

        if test_name is not None:
            basepath = os.path.dirname(os.path.abspath(__file__))
            filename = os.path.join(entry_point_name[len('quantumespresso.'):], test_name)
            filepath_folder = os.path.join(basepath, 'parsers', 'fixtures', filename)
            filepath_input = os.path.join(filepath_folder, 'aiida.in')

        entry_point = format_entry_point_string('aiida.calculations', entry_point_name)

        node = orm.CalcJobNode(computer=computer, process_type=entry_point)
        node.set_attribute('input_filename', 'aiida.in')
        node.set_attribute('output_filename', 'aiida.out')
        node.set_attribute('error_filename', 'aiida.err')
        node.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})
        node.set_option('max_wallclock_seconds', 1800)

        if attributes:
            node.set_attribute_many(attributes)

        if filepath_folder:
            from qe_tools.utils.exceptions import ParsingError
            from aiida_quantumespresso.tools.pwinputparser import PwInputFile
            try:
                parsed_input = PwInputFile(filepath_input)
            except ParsingError:
                pass
            else:
                inputs['structure'] = parsed_input.get_structuredata()
                inputs['parameters'] = orm.Dict(dict=parsed_input.namelists)

        if inputs:
            metadata = inputs.pop('metadata', {})
            options = metadata.get('options', {})

            for name, option in options.items():
                node.set_option(name, option)

            for link_label, input_node in flatten_inputs(inputs):
                input_node.store()
                node.add_incoming(input_node, link_type=LinkType.INPUT_CALC, link_label=link_label)

        node.store()

        if retrieve_temporary:
            dirpath, filenames = retrieve_temporary
            for filename in filenames:
                shutil.copy(os.path.join(filepath_folder, filename), os.path.join(dirpath, filename))

        if filepath_folder:
            retrieved = orm.FolderData()
            retrieved.put_object_from_tree(filepath_folder)

            # Remove files that are supposed to be only present in the retrieved temporary folder
            if retrieve_temporary:
                for filename in filenames:
                    retrieved.delete_object(filename)

            retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved')
            retrieved.store()

            remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp')
            remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
            remote_folder.store()

        return node
Exemplo n.º 27
0
    def test_complex_graph_import_export(self, temp_dir):
        """
        This test checks that a small and bit complex graph can be correctly
        exported and imported.

        It will create the graph, store it to the database, export it to a file
        and import it. In the end it will check if the initial nodes are present
        at the imported graph.
        """
        from aiida.common.exceptions import NotExistent

        calc1 = orm.CalcJobNode()
        calc1.computer = self.computer
        calc1.set_option('resources', {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        })
        calc1.label = 'calc1'
        calc1.store()

        pd1 = orm.Dict()
        pd1.label = 'pd1'
        pd1.store()

        pd2 = orm.Dict()
        pd2.label = 'pd2'
        pd2.store()

        rd1 = orm.RemoteData()
        rd1.label = 'rd1'
        rd1.set_remote_path('/x/y.py')
        rd1.computer = self.computer
        rd1.store()
        rd1.add_incoming(calc1, link_type=LinkType.CREATE, link_label='link')

        calc2 = orm.CalcJobNode()
        calc2.computer = self.computer
        calc2.set_option('resources', {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        })
        calc2.label = 'calc2'
        calc2.add_incoming(pd1,
                           link_type=LinkType.INPUT_CALC,
                           link_label='link1')
        calc2.add_incoming(pd2,
                           link_type=LinkType.INPUT_CALC,
                           link_label='link2')
        calc2.add_incoming(rd1,
                           link_type=LinkType.INPUT_CALC,
                           link_label='link3')
        calc2.store()

        fd1 = orm.FolderData()
        fd1.label = 'fd1'
        fd1.store()
        fd1.add_incoming(calc2, link_type=LinkType.CREATE, link_label='link')

        calc1.seal()
        calc2.seal()

        node_uuids_labels = {
            calc1.uuid: calc1.label,
            pd1.uuid: pd1.label,
            pd2.uuid: pd2.label,
            rd1.uuid: rd1.label,
            calc2.uuid: calc2.label,
            fd1.uuid: fd1.label
        }

        filename = os.path.join(temp_dir, 'export.aiida')
        export([fd1], filename=filename, silent=True)

        self.clean_db()
        self.create_user()

        import_data(filename, silent=True, ignore_unknown_nodes=True)

        for uuid, label in node_uuids_labels.items():
            try:
                orm.load_node(uuid)
            except NotExistent:
                self.fail(
                    'Node with UUID {} and label {} was not found.'.format(
                        uuid, label))
Exemplo n.º 28
0
def window_search_builder(test_data_dir, code_wannier90):  # pylint: disable=too-many-locals,useless-suppression
    """
    Sets up the process builder for window_search tests, and adds the inputs.
    """

    builder = WindowSearch.get_builder()

    input_folder = orm.FolderData()
    input_folder_path = test_data_dir / 'wannier_input_folder'
    for filename in os.listdir(input_folder_path):
        input_folder.put_object_from_file(
            path=str((input_folder_path / filename).resolve()), key=filename
        )
    builder.wannier.local_input_folder = input_folder

    builder.wannier.code = code_wannier90
    builder.code_tbmodels = orm.Code.get_from_string('tbmodels')

    builder.model_evaluation_workflow = BandDifferenceModelEvaluation
    # print(builder.model_evaluation.dynamic)
    builder.model_evaluation = {
        'code_bands_inspect': orm.Code.get_from_string('bands_inspect'),
    }
    builder.reference_bands = read(test_data_dir / 'bands.hdf5')

    initial_window = orm.List()
    initial_window.extend([-4.5, -4, 6.5, 16])
    builder.initial_window = initial_window
    builder.window_tol = orm.Float(1.5)

    a = 3.2395  # pylint: disable=invalid-name
    structure = orm.StructureData()
    structure.set_pymatgen_structure(
        pymatgen.Structure(
            lattice=[[0, a, a], [a, 0, a], [a, a, 0]],
            species=['In', 'Sb'],
            coords=[[0] * 3, [0.25] * 3]
        )
    )
    builder.structure = structure
    wannier_parameters = orm.Dict(
        dict=dict(
            num_wann=14,
            num_bands=36,
            dis_num_iter=1000,
            num_iter=0,
            spinors=True,
            mp_grid=[6, 6, 6],
        )
    )
    builder.wannier.parameters = wannier_parameters
    builder.wannier.metadata.options = {
        'resources': {
            'num_machines': 1,
            'tot_num_mpiprocs': 1
        },
        'withmpi': False
    }

    builder.symmetries = orm.SinglefileData(
        file=str((test_data_dir / 'symmetries.hdf5').resolve())
    )
    slice_idx = orm.List()
    slice_idx.extend([0, 2, 3, 1, 5, 6, 4, 7, 9, 10, 8, 12, 13, 11])
    builder.slice_idx = slice_idx

    k_values = [
        x if x <= 0.5 else -1 + x
        for x in np.linspace(0, 1, 6, endpoint=False)
    ]
    k_points = [
        list(reversed(k)) for k in itertools.product(k_values, repeat=3)
    ]
    wannier_bands = orm.BandsData()
    wannier_bands.set_kpoints(k_points)
    # Just let every energy window be valid.
    wannier_bands.set_bands(np.array([[0] * 14] * len(k_points)))
    builder.wannier_bands = wannier_bands
    return builder
Exemplo n.º 29
0
    def inner(window_values, slice_, symmetries):
        builder = RunWindow.get_builder()

        input_folder = orm.FolderData()
        input_folder_path = test_data_dir / 'wannier_input_folder'
        for filename in os.listdir(input_folder_path):
            input_folder.put_object_from_file(path=str(
                (input_folder_path / filename).resolve()),
                                              key=filename)
        builder.wannier.local_input_folder = input_folder

        builder.wannier.code = code_wannier90
        builder.code_tbmodels = orm.Code.get_from_string('tbmodels')
        builder.model_evaluation_workflow = BandDifferenceModelEvaluation
        builder.reference_bands = read(test_data_dir / 'bands.hdf5')
        builder.model_evaluation = {
            'code_bands_inspect': orm.Code.get_from_string('bands_inspect'),
        }

        window = orm.List(list=window_values)
        builder.window = window

        k_values = [
            x if x <= 0.5 else -1 + x
            for x in np.linspace(0, 1, 6, endpoint=False)
        ]
        k_points = [
            list(reversed(k)) for k in itertools.product(k_values, repeat=3)
        ]
        wannier_kpoints = orm.KpointsData()
        wannier_kpoints.set_kpoints(k_points)
        builder.wannier.kpoints = wannier_kpoints

        wannier_bands = orm.BandsData()
        wannier_bands.set_kpoints(k_points)
        # Just let every energy window be valid.
        wannier_bands.set_bands(
            np.array([[-20] * 10 + [-0.5] * 7 + [0.5] * 7 + [20] * 12] *
                     len(k_points)))
        builder.wannier_bands = wannier_bands

        a = 3.2395  # pylint: disable=invalid-name
        structure = orm.StructureData()
        structure.set_pymatgen_structure(
            pymatgen.Structure(lattice=[[0, a, a], [a, 0, a], [a, a, 0]],
                               species=['In', 'Sb'],
                               coords=[[0] * 3, [0.25] * 3]))
        builder.structure = structure
        wannier_parameters = orm.Dict(dict=dict(
            num_wann=14,
            num_bands=36,
            dis_num_iter=1000,
            num_iter=0,
            spinors=True,
            mp_grid=[6, 6, 6],
        ))
        builder.wannier.parameters = wannier_parameters
        builder.wannier.metadata.options = {
            'resources': {
                'num_machines': 1,
                'tot_num_mpiprocs': 1
            },
            'withmpi': False
        }
        if symmetries:
            builder.symmetries = orm.SinglefileData(
                file=str(test_data_dir / 'symmetries.hdf5'))
        if slice_:
            slice_idx = orm.List()
            slice_idx.extend([0, 2, 3, 1, 5, 6, 4, 7, 9, 10, 8, 12, 13, 11])
            builder.slice_idx = slice_idx
        return builder
Exemplo n.º 30
0
    def setUpClass(cls, *args, **kwargs):  # pylint: disable=too-many-locals, too-many-statements
        """
        Basides the standard setup we need to add few more objects in the
        database to be able to explore different requests/filters/orderings etc.
        """
        # call parent setUpClass method
        super(RESTApiTestCase, cls).setUpClass()

        # connect the app and the api
        # Init the api by connecting it the the app (N.B. respect the following
        # order, api.__init__)
        kwargs = dict(PREFIX=cls._url_prefix,
                      PERPAGE_DEFAULT=cls._PERPAGE_DEFAULT,
                      LIMIT_DEFAULT=cls._LIMIT_DEFAULT)

        cls.app = App(__name__)
        cls.app.config['TESTING'] = True
        AiidaApi(cls.app, **kwargs)

        # create test inputs
        cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.))
        structure = orm.StructureData(cell=cell)
        structure.append_atom(position=(0., 0., 0.), symbols=['Ba'])
        structure.store()
        structure.add_comment('This is test comment.')
        structure.add_comment('Add another comment.')

        cif = orm.CifData(ase=structure.get_ase())
        cif.store()

        parameter1 = orm.Dict(dict={'a': 1, 'b': 2})
        parameter1.store()

        parameter2 = orm.Dict(dict={'c': 3, 'd': 4})
        parameter2.store()

        kpoint = orm.KpointsData()
        kpoint.set_kpoints_mesh([4, 4, 4])
        kpoint.store()

        resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}

        calcfunc = orm.CalcFunctionNode(computer=cls.computer)
        calcfunc.store()

        calc = orm.CalcJobNode(computer=cls.computer)
        calc.set_option('resources', resources)
        calc.set_attribute('attr1', 'OK')
        calc.set_attribute('attr2', 'OK')
        calc.set_extra('extra1', False)
        calc.set_extra('extra2', 'extra_info')

        calc.add_incoming(structure,
                          link_type=LinkType.INPUT_CALC,
                          link_label='link_structure')
        calc.add_incoming(parameter1,
                          link_type=LinkType.INPUT_CALC,
                          link_label='link_parameter')

        aiida_in = 'The input file\nof the CalcJob node'
        # Add the calcjob_inputs folder with the aiida.in file to the CalcJobNode repository
        with tempfile.NamedTemporaryFile(mode='w+') as handle:
            handle.write(aiida_in)
            handle.flush()
            handle.seek(0)
            calc.put_object_from_filelike(handle,
                                          key='calcjob_inputs/aiida.in',
                                          force=True)
        calc.store()

        # create log message for calcjob
        import logging
        from aiida.common.log import LOG_LEVEL_REPORT
        from aiida.common.timezone import now
        from aiida.orm import Log

        log_record = {
            'time': now(),
            'loggername': 'loggername',
            'levelname': logging.getLevelName(LOG_LEVEL_REPORT),
            'dbnode_id': calc.id,
            'message': 'This is a template record message',
            'metadata': {
                'content': 'test'
            },
        }
        Log(**log_record)

        aiida_out = 'The output file\nof the CalcJob node'
        retrieved_outputs = orm.FolderData()
        # Add the calcjob_outputs folder with the aiida.out file to the FolderData node
        with tempfile.NamedTemporaryFile(mode='w+') as handle:
            handle.write(aiida_out)
            handle.flush()
            handle.seek(0)
            retrieved_outputs.put_object_from_filelike(
                handle, key='calcjob_outputs/aiida.out', force=True)
        retrieved_outputs.store()
        retrieved_outputs.add_incoming(calc,
                                       link_type=LinkType.CREATE,
                                       link_label='retrieved')

        kpoint.add_incoming(calc,
                            link_type=LinkType.CREATE,
                            link_label='create')

        calc1 = orm.CalcJobNode(computer=cls.computer)
        calc1.set_option('resources', resources)
        calc1.store()

        dummy_computers = [{
            'name': 'test1',
            'hostname': 'test1.epfl.ch',
            'transport_type': 'ssh',
            'scheduler_type': 'pbspro',
        }, {
            'name': 'test2',
            'hostname': 'test2.epfl.ch',
            'transport_type': 'ssh',
            'scheduler_type': 'torque',
        }, {
            'name': 'test3',
            'hostname': 'test3.epfl.ch',
            'transport_type': 'local',
            'scheduler_type': 'slurm',
        }, {
            'name': 'test4',
            'hostname': 'test4.epfl.ch',
            'transport_type': 'ssh',
            'scheduler_type': 'slurm',
        }]

        for dummy_computer in dummy_computers:
            computer = orm.Computer(**dummy_computer)
            computer.store()

        # Prepare typical REST responses
        cls.process_dummy_data()