示例#1
0
    def check_calculation(
            calc_node,
            expected_outgoing_labels,
            error_include=(("results", "errors"), ("results",
                                                   "parser_errors")),
    ):
        """Check a calculation has completed successfully."""
        from aiida.cmdline.utils.common import get_calcjob_report

        exit_status = calc_node.get_attribute("exit_status")
        proc_state = calc_node.get_attribute("process_state")
        if exit_status != 0 or proc_state != "finished":
            text = yaml.dump(calc_node.attributes)
            message = "Process Failed:\n{}".format(text)
            out_link_manager = calc_node.get_outgoing()
            out_links = out_link_manager.all_link_labels()
            message += "\noutgoing_nodes: {}".format(out_links)
            for name, attribute in error_include:
                if name not in out_links:
                    continue
                value = out_link_manager.get_node_by_label(name).get_attribute(
                    attribute, None)
                if value is None:
                    continue
                message += "\n{}.{}: {}".format(name, attribute, value)
            message += "\n\nReport:\n{}".format(get_calcjob_report(calc_node))
            raise AssertionError(message)

        link_labels = calc_node.get_outgoing().all_link_labels()
        for outgoing in expected_outgoing_labels:
            if outgoing not in link_labels:
                raise AssertionError(
                    "missing outgoing node link '{}': {}".format(
                        outgoing, link_labels))
示例#2
0
def test_optimize_process(
    db_test_app,
    get_potential_data,
    potential_type,
    data_regression,
):
    """Test the functionality of the optimization calculation type"""
    calc_plugin = 'lammps.optimize'
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory('lammps.potential')(
        potential_type=pot_data.type,
        data=pot_data.data,
    )
    parameters = get_calc_parameters(
        get_lammps_version(code),
        calc_plugin,
        potential.default_units,
        potential_type,
    )
    builder = code.get_builder()
    builder._update({ # pylint: disable=protected-access
        'metadata': tests.get_default_metadata(),
        'code': code,
        'structure': pot_data.structure,
        'potential': potential,
        'parameters': parameters,
    })

    output = run_get_node(builder)
    calc_node = output.node

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception(
            f'finished with exit message: {calc_node.exit_message}')

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(
        ['results', 'trajectory_data', 'structure'])

    trajectory_data = calc_node.outputs.trajectory_data.attributes
    # optimization steps may differ between lammps versions
    trajectory_data = {
        k: v
        for k, v in trajectory_data.items() if k != 'number_steps'
    }
    data_regression.check({
        'results':
        sanitize_results(calc_node.outputs.results.get_dict(), 1),
        'trajectory_data':
        trajectory_data,
        'structure': {
            'kind_names': calc_node.outputs.structure.get_kind_names()
        }
        # "structure": tests.recursive_round(
        #     calc_node.outputs.structure.attributes, 1, apply_lists=True
        # ),
    })
示例#3
0
def test_md_process(
    db_test_app,
    get_potential_data,
    potential_type,
    data_regression,
):
    """Test the functionality of the md calculation type"""
    calc_plugin = 'lammps.md'
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory('lammps.potential')(
        potential_type=pot_data.type,
        data=pot_data.data,
    )
    version = get_lammps_version(code)
    version_year = version[-4:]
    parameters = get_calc_parameters(
        version,
        calc_plugin,
        potential.default_units,
        potential_type,
    )
    builder = code.get_builder()
    builder._update({ # pylint: disable=protected-access
        'metadata': tests.get_default_metadata(),
        'code': code,
        'structure': pot_data.structure,
        'potential': potential,
        'parameters': parameters,
    })

    output = run_get_node(builder)
    calc_node = output.node

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception(
            f'finished with exit message: {calc_node.exit_message}')

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(
        ['results', 'trajectory_data', 'system_data'])

    data_regression.check(
        {
            'results':
            sanitize_results(
                calc_node.outputs.results.get_dict(),
                round_energy=1,
            ),
            'system_data':
            calc_node.outputs.system_data.attributes,
            'trajectory_data':
            calc_node.outputs.trajectory_data.attributes,
        },
        basename=f'test_md_process-{potential_type}-{version_year}',
    )
def test_md_multi_process(
    db_test_app, get_potential_data, potential_type, data_regression
):
    calc_plugin = "lammps.md.multi"
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory("lammps.potential")(type=pot_data.type, data=pot_data.data)
    parameters = get_calc_parameters(
        get_lammps_version(code), calc_plugin, potential.default_units, potential_type
    )
    builder = code.get_builder()
    builder._update(
        {
            "metadata": tests.get_default_metadata(),
            "code": code,
            "structure": pot_data.structure,
            "potential": potential,
            "parameters": parameters,
        }
    )

    output = run_get_node(builder)
    calc_node = output.node

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception("finished with exit message: {}".format(calc_node.exit_message))

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(
        [
            "results",
            "retrieved",
            "trajectory__thermalise",
            "trajectory__equilibrate",
            "system__thermalise",
            "system__equilibrate",
        ]
    )

    data_regression.check(
        {
            "retrieved": calc_node.outputs.retrieved.list_object_names(),
            "results": sanitize_results(
                calc_node.outputs.results.get_dict(), round_energy=1
            ),
            "system__thermalise": calc_node.outputs.system__thermalise.attributes,
            "system__equilibrate": calc_node.outputs.system__equilibrate.attributes,
            "trajectory__thermalise": calc_node.outputs.trajectory__thermalise.attributes,
            "trajectory__equilibrate": calc_node.outputs.trajectory__equilibrate.attributes,
        }
    )
def test_fleur_scf_fleurinp_Si(
        #run_with_cache,
        with_export_cache,
        fleur_local_code,
        create_fleurinp,
        clear_database,
        clear_spec):
    """
    full example using scf workflow with just a fleurinp data as input.
    Several fleur runs needed till convergence
    """
    options = {
        'resources': {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 1
        },
        'max_wallclock_seconds': 5 * 60,
        'withmpi': False,
        'custom_scheduler_commands': ''
    }

    FleurCode = fleur_local_code

    # create process builder to set parameters
    builder = FleurScfWorkChain.get_builder()
    builder.metadata.description = 'Simple Fleur SCF test for Si bulk with fleurinp data given'
    builder.metadata.label = 'FleurSCF_test_Si_bulk'
    builder.fleurinp = create_fleurinp(TEST_INP_XML_PATH).store()
    builder.options = Dict(dict=options).store()
    builder.fleur = FleurCode
    #print(builder)

    # now run calculation
    #run_with_cache(builder)
    data_dir_path = os.path.join(
        aiida_path, '../tests/workflows/caches/fleur_scf_fleurinp_Si.tar.gz')
    with with_export_cache(data_dir_abspath=data_dir_path):
        out, node = run_get_node(builder)
    #print(out)
    #print(node)

    print(get_workchain_report(node, 'REPORT'))

    #assert node.is_finished_ok
    # check output
    n = out['output_scf_wc_para']
    n = n.get_dict()

    print(get_calcjob_report(load_node(n['last_calc_uuid'])))

    #print(n)
    assert abs(n.get('distance_charge') - 9.8993e-06) < 2.0e-6
    assert n.get('errors') == []
示例#6
0
def test_force_process(
    db_test_app,
    get_potential_data,
    potential_type,
    data_regression,
):
    """Test the functionality of the force calculation type"""
    calc_plugin = 'lammps.force'
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory('lammps.potential')(
        potential_type=pot_data.type,
        data=pot_data.data,
    )
    parameters = get_calc_parameters(
        get_lammps_version(code),
        calc_plugin,
        potential.default_units,
        potential_type,
    )
    builder = code.get_builder()
    builder._update({ # pylint: disable=protected-access
        'metadata': tests.get_default_metadata(),
        'code': code,
        'structure': pot_data.structure,
        'potential': potential,
        'parameters': parameters,
    })

    output = run_get_node(builder)
    calc_node = output.node

    # raise ValueError(calc_node.get_object_content('input.in'))
    # raise ValueError(calc_node.outputs.retrieved.get_object_content('_scheduler-stdout.txt'))
    # raise ValueError(calc_node.outputs.retrieved.get_object_content('trajectory.lammpstrj'))

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception('finished with exit message: {}'.format(
            calc_node.exit_message))

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(['results', 'arrays'])

    data_regression.check({
        'results':
        sanitize_results(calc_node.outputs.results.get_dict(), 1),
        'arrays':
        calc_node.outputs.arrays.attributes,
    })
示例#7
0
def test_force_process(db_test_app, get_potential_data, potential_type):
    calc_plugin = 'lammps.force'
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory("lammps.potential")(structure=pot_data.structure,
                                                type=pot_data.type,
                                                data=pot_data.data)
    parameters = get_calc_parameters(calc_plugin, potential.default_units,
                                     potential_type)
    builder = code.get_builder()
    builder._update({
        "metadata": tests.get_default_metadata(),
        "code": code,
        "structure": pot_data.structure,
        "potential": potential,
        "parameters": parameters
    })

    output = run_get_node(builder)
    calc_node = output.node

    # raise ValueError(calc_node.get_object_content('input.in'))
    # raise ValueError(calc_node.outputs.retrieved.get_object_content('_scheduler-stdout.txt'))
    # raise ValueError(calc_node.outputs.retrieved.get_object_content('trajectory.lammpstrj'))

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception("finished with exit message: {}".format(
            calc_node.exit_message))

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(['results', 'arrays'])

    pdict = calc_node.outputs.results.get_dict()
    assert set(pdict.keys()).issuperset([
        'energy', 'warnings', 'final_variables', 'units_style', 'energy_units',
        'force_units', 'parser_class', 'parser_version'
    ])
    assert pdict['warnings'].strip() == pot_data.output["warnings"]
    assert pdict['energy'] == pytest.approx(pot_data.output['initial_energy'])

    if potential_type == "reaxff":
        assert set(calc_node.outputs.arrays.get_arraynames()) == set(
            ['forces', 'charges'])
    else:
        assert set(calc_node.outputs.arrays.get_arraynames()) == set(
            ['forces'])
    assert calc_node.outputs.arrays.get_shape('forces') == (
        1, len(pot_data.structure.sites), 3)
    def update(self):
        """Update report that is shown."""
        if self.process is None:
            return

        if isinstance(self.process, CalcJobNode):
            string = get_calcjob_report(self.process)
        elif isinstance(self.process, WorkChainNode):
            string = get_workchain_report(self.process, self.levelname, self.indent_size, self.max_depth)
        elif isinstance(self.process, (CalcFunctionNode, WorkFunctionNode)):
            string = get_process_function_report(self.process)
        else:
            string = 'Nothing to show for node type {}'.format(self.process.__class__)
        self.value = string.replace('\n', '<br/>')
def test_optimize_process(db_test_app, get_potential_data, potential_type,
                          data_regression):
    calc_plugin = "lammps.optimize"
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory("lammps.potential")(type=pot_data.type,
                                                data=pot_data.data)
    parameters = get_calc_parameters(get_lammps_version(code), calc_plugin,
                                     potential.default_units, potential_type)
    builder = code.get_builder()
    builder._update({
        "metadata": tests.get_default_metadata(),
        "code": code,
        "structure": pot_data.structure,
        "potential": potential,
        "parameters": parameters,
    })

    output = run_get_node(builder)
    calc_node = output.node

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception("finished with exit message: {}".format(
            calc_node.exit_message))

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(
        ["results", "trajectory_data", "structure"])

    trajectory_data = calc_node.outputs.trajectory_data.attributes
    # optimization steps may differ between lammps versions
    trajectory_data = {
        k: v
        for k, v in trajectory_data.items() if k != "number_steps"
    }
    data_regression.check({
        "results":
        sanitize_results(calc_node.outputs.results.get_dict(), 1),
        "trajectory_data":
        trajectory_data,
        "structure": {
            "kind_names": calc_node.outputs.structure.get_kind_names()
        }
        # "structure": tests.recursive_round(
        #     calc_node.outputs.structure.attributes, 1, apply_lists=True
        # ),
    })
示例#10
0
def test_md_process(db_test_app, get_potential_data, potential_type):
    calc_plugin = 'lammps.md'
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory("lammps.potential")(structure=pot_data.structure,
                                                type=pot_data.type,
                                                data=pot_data.data)
    parameters = get_calc_parameters(calc_plugin, potential.default_units,
                                     potential_type)
    builder = code.get_builder()
    builder._update({
        "metadata": tests.get_default_metadata(),
        "code": code,
        "structure": pot_data.structure,
        "potential": potential,
        "parameters": parameters,
    })

    output = run_get_node(builder)
    calc_node = output.node

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception("finished with exit message: {}".format(
            calc_node.exit_message))

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(
        ['results', 'trajectory_data', 'system_data'])

    pdict = calc_node.outputs.results.get_dict()
    assert set(pdict.keys()).issuperset(
        ['warnings', 'parser_class', 'parser_version'])
    assert pdict['warnings'].strip() == pot_data.output["warnings"]

    if potential_type == "reaxff":
        assert set(calc_node.outputs.trajectory_data.get_arraynames()) == set(
            ['cells', 'positions', 'steps', 'times', 'charges'])
        assert set(calc_node.outputs.system_data.get_arraynames()) == set(
            ['step', 'temp', 'etotal', 'c_reax_1_'])
    else:
        assert set(calc_node.outputs.trajectory_data.get_arraynames()) == set(
            ['cells', 'positions', 'steps', 'times'])
        assert set(calc_node.outputs.system_data.get_arraynames()) == set(
            ['step', 'temp', 'etotal'])
    assert calc_node.outputs.trajectory_data.numsteps == 101
    assert calc_node.outputs.system_data.get_shape('temp') == (100, )
示例#11
0
def process_report(processes, levelname, indent_size, max_depth):
    """Show the log report for one or multiple processes."""
    from aiida.cmdline.utils.common import get_calcjob_report, get_workchain_report, get_process_function_report
    from aiida.orm import CalcJobNode, WorkChainNode, CalcFunctionNode, WorkFunctionNode

    for process in processes:
        if isinstance(process, CalcJobNode):
            echo.echo(get_calcjob_report(process))
        elif isinstance(process, WorkChainNode):
            echo.echo(
                get_workchain_report(process, levelname, indent_size,
                                     max_depth))
        elif isinstance(process, (CalcFunctionNode, WorkFunctionNode)):
            echo.echo(get_process_function_report(process))
        else:
            echo.echo(f'Nothing to show for node type {process.__class__}')
示例#12
0
    def do_report(self, arg):  # pylint: disable=unused-argument
        """Show the report, if the node is a ProcessNode"""
        from aiida.cmdline.utils.common import get_calcjob_report, get_workchain_report, get_process_function_report
        from aiida.orm import CalcJobNode, WorkChainNode, CalcFunctionNode, WorkFunctionNode

        process = self._current_node
        if isinstance(process, CalcJobNode):
            print(get_calcjob_report(process), file=self.stdout)
        elif isinstance(process, WorkChainNode):
            print(get_workchain_report(process, arg.levelname, arg.indent_size,
                                       arg.max_depth),
                  file=self.stdout)
        elif isinstance(process, (CalcFunctionNode, WorkFunctionNode)):
            print(get_process_function_report(process), file=self.stdout)
        else:
            print('Nothing to show for node type {}'.format(process.__class__),
                  file=self.stdout)
示例#13
0
def test_force_process(
    db_test_app, get_potential_data, potential_type, data_regression
):
    calc_plugin = "lammps.force"
    code = db_test_app.get_or_create_code(calc_plugin)
    pot_data = get_potential_data(potential_type)
    potential = DataFactory("lammps.potential")(type=pot_data.type, data=pot_data.data)
    parameters = get_calc_parameters(
        get_lammps_version(code), calc_plugin, potential.default_units, potential_type
    )
    builder = code.get_builder()
    builder._update(
        {
            "metadata": tests.get_default_metadata(),
            "code": code,
            "structure": pot_data.structure,
            "potential": potential,
            "parameters": parameters,
        }
    )

    output = run_get_node(builder)
    calc_node = output.node

    # raise ValueError(calc_node.get_object_content('input.in'))
    # raise ValueError(calc_node.outputs.retrieved.get_object_content('_scheduler-stdout.txt'))
    # raise ValueError(calc_node.outputs.retrieved.get_object_content('trajectory.lammpstrj'))

    if not calc_node.is_finished_ok:
        print(calc_node.attributes)
        print(get_calcjob_report(calc_node))
        raise Exception("finished with exit message: {}".format(calc_node.exit_message))

    link_labels = calc_node.get_outgoing().all_link_labels()
    assert set(link_labels).issuperset(["results", "arrays"])

    data_regression.check(
        {
            "results": sanitize_results(calc_node.outputs.results.get_dict(), 1),
            "arrays": calc_node.outputs.arrays.attributes,
        }
    )
示例#14
0
def test_run_error(db_test_app, plugin_name):

    retrieved = FolderData()
    with retrieved.open('log.lammps', 'w') as handle:
        handle.write(get_log())
    with retrieved.open('trajectory.lammpstrj', 'w') as handle:
        handle.write(get_traj_force())
    with retrieved.open('_scheduler-stdout.txt', 'w') as handle:
        handle.write(six.ensure_text('ERROR description'))
    with retrieved.open('_scheduler-stderr.txt', 'w'):
        pass

    calc_node = db_test_app.generate_calcjob_node(plugin_name, retrieved)
    parser = db_test_app.get_parser_cls(plugin_name)
    results, calcfunction = parser.parse_from_node(calc_node)

    print(get_calcjob_report(calc_node))

    assert calcfunction.is_finished, calcfunction.exception
    assert calcfunction.is_failed, calcfunction.exit_status
    assert calcfunction.exit_status == calc_node.process_class.exit_codes.ERROR_LAMMPS_RUN.status
示例#15
0
def test_run_error(db_test_app, plugin_name):
    """Check if the parser runs without producing errors."""
    retrieved = FolderData()
    retrieved.put_object_from_filelike(
        io.StringIO(get_log()),
        'log.lammps',
    )
    retrieved.put_object_from_filelike(
        io.StringIO(get_traj_force()),
        'x-trajectory.lammpstrj',
    )
    retrieved.put_object_from_filelike(
        io.StringIO('ERROR description'),
        '_scheduler-stdout.txt',
    )
    retrieved.put_object_from_filelike(
        io.StringIO(''),
        '_scheduler-stderr.txt',
    )

    calc_node = db_test_app.generate_calcjob_node(plugin_name, retrieved)
    parser = ParserFactory(plugin_name)

    with db_test_app.sandbox_folder() as temp_path:
        with temp_path.open('x-trajectory.lammpstrj', 'w') as handle:
            handle.write(get_traj_force())
        results, calcfunction = parser.parse_from_node(  # pylint: disable=unused-variable
            calc_node,
            retrieved_temporary_folder=temp_path.abspath,
        )

    print(get_calcjob_report(calc_node))

    assert calcfunction.is_finished, calcfunction.exception
    assert calcfunction.is_failed, calcfunction.exit_status
    assert (calcfunction.exit_status ==
            calc_node.process_class.exit_codes.ERROR_LAMMPS_RUN.status)
示例#16
0
def test_FleurJobCalc_full_mock(aiida_profile, mock_code_factory,
                                create_fleurinp, clear_database,
                                hash_code_by_entrypoint):  # pylint: disable=redefined-outer-name
    """
    Tests the fleur inputgenerate with a mock executable if the datafiles are their,
    otherwise runs inpgen itself if a executable was specified

    """

    mock_code = mock_code_factory(
        label='fleur',
        data_dir_abspath=os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'data_dir/'),
        entry_point=CALC_ENTRY_POINT,
        ignore_files=[
            '_aiidasubmit.sh',
            'cdnc',
            'out',
            'FleurInputSchema.xsd',
            'cdn.hdf',
            'usage.json',  # 'cdn??']
            'cdn00',
            'cdn01',
            'cdn02',
            'cdn03',
            'cdn04',
            'cdn05',
            'cdn06',
            'cdn07',
            'cdn08',
            'cdn09',
            'cdn10',
            'cdn11'
        ])
    #mock_code.append_text = 'rm cdn?? broyd* wkf2 inf cdnc stars pot* FleurInputSchema* cdn.hdf'

    inputs = {
        'fleurinpdata': create_fleurinp(TEST_INP_XML_PATH),
        #'parameters': orm.Dict(dict=parameters),
        'metadata': {
            'options': {
                'resources': {
                    'num_machines': 1,
                    'tot_num_mpiprocs': 1
                },
                'max_wallclock_seconds': int(600),
                'withmpi': False
            }
        }
    }
    #calc = CalculationFactory(CALC_ENTRY_POINT, code=mock_code, **inputs)

    res, node = run_get_node(CalculationFactory(CALC_ENTRY_POINT),
                             code=mock_code,
                             **inputs)

    print(get_calcjob_report(node))
    print((res['remote_folder'].list_object_names()))
    print((res['retrieved'].list_object_names()))

    if 'out.error' in res['retrieved'].list_object_names():
        with res['retrieved'].open('out.error', 'r') as efile:
            print(f'Error Output: \n {efile.read()}')

    assert node.is_finished_ok
示例#17
0
def test_FleurinpgenJobCalc_full_mock(aiida_profile, mock_code_factory,
                                      generate_structure_W):  # pylint: disable=redefined-outer-name
    """
    Tests the fleur inputgenerate with a mock executable if the datafiles are their,
    otherwise runs inpgen itself if a executable was specified

    """
    CALC_ENTRY_POINT = 'fleur.inpgen'

    parameters = {
        'atom': {
            'element': 'W',
            'rmt': 2.1,
            'jri': 981,
            'lmax': 12,
            'lnonsph': 6,
            'econfig': '[Kr] 4d10 4f14 | 5s2 5p6 6s2 5d4',
            'lo': '5s 5p'
        },
        'comp': {
            'kmax': 5.0,
            'gmaxxc': 12.5,
            'gmax': 15.0
        },
        'kpt': {
            'div1': 3,
            'div2': 3,
            'div3': 3,
            'tkb': 0.0005
        }
    }

    mock_code = mock_code_factory(
        label='inpgen',
        data_dir_abspath=os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'data_dir/'),
        entry_point=CALC_ENTRY_POINT,
        ignore_files=['_aiidasubmit.sh'])
    print(mock_code)
    inputs = {
        'structure': generate_structure_W(),
        'parameters': orm.Dict(dict=parameters),
        'metadata': {
            'options': {
                'resources': {
                    'num_machines': 1,
                    'tot_num_mpiprocs': 1
                },
                'max_wallclock_seconds': int(100),
                'withmpi': False
            }
        }
    }
    calc = CalculationFactory(CALC_ENTRY_POINT)  # (code=mock_code, **inputs)
    print(calc)
    res, node = run_get_node(CalculationFactory(CALC_ENTRY_POINT),
                             code=mock_code,
                             **inputs)
    print(node)
    print(get_calcjob_report(node))
    print((res['remote_folder'].list_object_names()))
    print((res['retrieved'].list_object_names()))
    assert bool(node.is_finished_ok)