예제 #1
0
def run_base_restart_workchain():
    """Run the `AddArithmeticBaseWorkChain` a few times for various inputs."""
    code = load_code(CODENAME_ADD)
    inputs = {
        'add': {
            'x': Int(1),
            'y': Int(2),
            'code': code,
            'settings': Dict(dict={'allow_negative': False}),
            'metadata': {
                'options': {
                    'resources': {
                        'num_machines': 1,
                        'num_mpiprocs_per_machine': 1
                    }
                }
            }
        }
    }

    # Normal inputs should run just fine
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 1
    assert 'sum' in results
    assert results['sum'].value == 3

    # With one input negative, the sum will be negative which will fail the calculation, but the error handler should
    # fix it, so the second calculation should finish successfully
    inputs['add']['y'] = Int(-4)
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 2
    assert 'sum' in results
    assert results['sum'].value == 5

    # The silly sanity check aborts the workchain if the sum is bigger than 10
    inputs['add']['y'] = Int(10)
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert not node.is_finished_ok, node.process_state
    assert node.exit_status == ArithmeticAddBaseWorkChain.exit_codes.ERROR_TOO_BIG.status, node.exit_status  # pylint: disable=no-member
    assert len(node.called) == 1

    # Check that overriding default handler enabled status works
    inputs['add']['y'] = Int(1)
    inputs['handler_overrides'] = Dict(dict={'disabled_handler': True})
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert not node.is_finished_ok, node.process_state
    assert node.exit_status == ArithmeticAddBaseWorkChain.exit_codes.ERROR_ENABLED_DOOM.status, node.exit_status  # pylint: disable=no-member
    assert len(node.called) == 1
예제 #2
0
def test_immigrant_additional(fresh_aiida_env, potcar_family, phonondb_run,
                              localhost, mock_vasp):
    """Provide process class and inputs for importing a AiiDA-external VASP run."""
    from aiida_vasp.calcs.vasp import VaspCalculation
    create_authinfo(localhost, store=True)
    proc, builder = VaspCalculation.immigrant(
        code=mock_vasp,
        remote_path=phonondb_run,
        potential_family=POTCAR_FAMILY_NAME,
        potential_mapping=POTCAR_MAP,
        use_chgcar=True,
        use_wavecar=True)
    expected_inputs = {
        'parameters', 'structure', 'kpoints', 'potential', 'charge_density',
        'wavefunctions'
    }
    for input_link in expected_inputs:
        assert builder.get(
            input_link,
            None) is not None, 'input link "{}" was not set!'.format(
                input_link)

    result, node = run.get_node(proc, **builder)
    assert node.exit_status == 0

    # We should not have any POTCAR here
    expected_files = ['CONTCAR', 'DOSCAR', 'EIGENVAL', 'OUTCAR', 'vasprun.xml']
    retrieved_files = result['retrieved'].list_object_names()
    assert set(expected_files) == set(retrieved_files)
예제 #3
0
파일: calcs.py 프로젝트: wangvei/aiida-vasp
 def inner(inputs=None, settings=None):
     from aiida.plugins import CalculationFactory
     from aiida.engine import run
     calculation = CalculationFactory('vasp.vasp')
     mock_vasp.store()
     create_authinfo(computer=mock_vasp.computer, store=True)
     kpoints, _ = vasp_kpoints
     inpts = AttributeDict()
     inpts.code = Code.get_from_string('mock-vasp@localhost')
     inpts.structure = vasp_structure
     inpts.parameters = vasp_params
     inpts.kpoints = kpoints
     inpts.potential = get_data_class(
         'vasp.potcar').get_potcars_from_structure(
             structure=inpts.structure,
             family_name=POTCAR_FAMILY_NAME,
             mapping=POTCAR_MAP)
     options = {
         'withmpi': False,
         'queue_name': 'None',
         'resources': {
             'num_machines': 1,
             'num_mpiprocs_per_machine': 1
         },
         'max_wallclock_seconds': 3600
     }
     inpts.metadata = {}
     inpts.metadata['options'] = options
     if inputs is not None:
         inpts.update(inputs)
     results_and_node = run.get_node(calculation, **inpts)
     return results_and_node
예제 #4
0
def run_calculation(code, counter, inputval):
    """
    Run a calculation through the Process layer.
    """
    process, inputs, expected_result = create_calculation_process(
        code=code, inputval=inputval)
    _, calc = run.get_node(process, **inputs)
    print(f'[{counter}] ran calculation {calc.uuid}, pk={calc.pk}')
    return calc, expected_result
예제 #5
0
def test_vasp_immigrant(immigrant_with_builder):
    """Test importing a calculation from the folder of a completed VASP run."""
    immigrant, inputs = immigrant_with_builder

    # We need to set the parser explicitly
    inputs.metadata['options']['parser_name'] = 'vasp.vasp'
    result, node = run.get_node(immigrant, **inputs)
    assert node.exit_status == 0

    expected_output_nodes = {'misc', 'remote_folder', 'retrieved'}
    assert expected_output_nodes.issubset(set(result))
예제 #6
0
def test_relax_wc(fresh_aiida_env, vasp_params, potentials, mock_vasp):
    # def test_relax_wc(fresh_aiida_env, vasp_params, potentials, mock_vasp, mock_relax_wc):
    """Test submitting only, not correctness, with mocked vasp code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.relax')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    structure = PoscarParser(file_path=data_path('test_relax_wc', 'inp', 'POSCAR')).structure
    kpoints = KpointsParser(file_path=data_path('test_relax_wc', 'inp', 'KPOINTS')).kpoints
    parameters = IncarParser(file_path=data_path('test_relax_wc', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_relax_wc'
    parameters = {'incar': {k: v for k, v in parameters.items() if k not in ['isif', 'ibrion', 'nsw', 'ediffg']}}
    parameters['relax'] = {}
    parameters['relax']['perform'] = True
    parameters['relax']['algo'] = 'cg'
    parameters['relax']['force_cutoff'] = 0.01

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.kpoints = kpoints
    inputs.parameters = get_data_node('dict', dict=parameters)
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'max_wallclock_seconds': 1,
                                       'import_sys_environment': True,
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'relax' in results
    relax = results['relax']
    assert 'structure' in relax
    sites = relax['structure'].sites
    assert sites[0].kind_name == 'Si'
    assert sites[1].kind_name == 'Si'
    np.testing.assert_allclose(sites[0].position, [4.8125, 4.8125, 4.8125])
    np.testing.assert_allclose(sites[1].position, [0.6875, 0.6875, 0.715])
예제 #7
0
def run_arithmetic_add():
    """Run the `ArithmeticAddCalculation`."""
    ArithmeticAddCalculation = CalculationFactory('arithmetic.add')

    code = load_code(CODENAME_ADD)
    inputs = {
        'x': Int(1),
        'y': Int(2),
        'code': code,
    }

    # Normal inputs should run just fine
    results, node = run.get_node(ArithmeticAddCalculation, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert results['sum'] == 3
예제 #8
0
def test_vasp_wc_chgcar(fresh_aiida_env, vasp_params, potentials, vasp_kpoints,
                        vasp_structure, mock_vasp):
    """Test submitting only, not correctness, with mocked vasp code, test fetching of the CHGCAR."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.vasp')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    kpoints, _ = vasp_kpoints
    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = vasp_structure
    inputs.parameters = get_data_node('dict',
                                      dict={'incar': vasp_params.get_dict()})
    inputs.kpoints = kpoints
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.settings = get_data_node('dict',
                                    dict={
                                        'ADDITIONAL_RETRIEVE_LIST': ['CHGCAR'],
                                        'parser_settings': {
                                            'add_chgcar': True
                                        }
                                    })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'chgcar' in results
    assert results['chgcar'].get_content() == 'This is a test CHGCAR file.\n'
예제 #9
0
def run_multiply_add_workchain():
    """Run the `MultiplyAddWorkChain`."""
    MultiplyAddWorkChain = WorkflowFactory('arithmetic.multiply_add')

    code = load_code(CODENAME_ADD)
    inputs = {
        'x': Int(1),
        'y': Int(2),
        'z': Int(3),
        'code': code,
    }

    # Normal inputs should run just fine
    results, node = run.get_node(MultiplyAddWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 2
    assert 'result' in results
    assert results['result'].value == 5
예제 #10
0
def test_vasp_wc(fresh_aiida_env, vasp_params, potentials, vasp_kpoints,
                 vasp_structure, mock_vasp):
    """Test submitting only, not correctness, with mocked vasp code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.vasp')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    kpoints, _ = vasp_kpoints
    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = vasp_structure
    inputs.parameters = get_data_node('dict',
                                      dict={'incar': vasp_params.get_dict()})
    inputs.kpoints = kpoints
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)

    assert node.exit_status == 0
    assert 'retrieved' in results
    assert 'misc' in results
    assert 'remote_folder' in results
    misc = results['misc'].get_dict()
    assert misc['maximum_stress'] == 22.8499295
    assert misc['total_energies']['energy_extrapolated'] == -14.16209692
예제 #11
0
def main():
    # Log to the console
    console = logging.StreamHandler()
    console.setFormatter(logging.Formatter("[%(levelname)s] %(name)s : %(message)s"))
    logging.getLogger("add").addHandler(console)
    logging.getLogger("add").setLevel(logging.DEBUG)

    code = load_code(label="add@localhost")
    calculation = CalculationFactory("add.calculation")

    builder = calculation.get_builder()
    builder.code = code
    builder.x = Float(3.0)
    builder.y = Float(3.5)

    builder.metadata.options = {"resources": {"num_machines": 1}}

    results, node = run.get_node(builder)
    print(results, node, sep="\n")
예제 #12
0
from aiida import orm
from aiida.engine import run

ArithmeticAddCalculation = CalculationFactory('arithmetic.add')
result = run(ArithmeticAddCalculation, x=orm.Int(1), y=orm.Int(2))
result, node = run.get_node(ArithmeticAddCalculation,
                            x=orm.Int(1),
                            y=orm.Int(2))
result, pk = run.get_pk(ArithmeticAddCalculation, x=orm.Int(1), y=orm.Int(2))
예제 #13
0
def main():
    """Launch a bunch of calculation jobs and workchains."""
    # pylint: disable=too-many-locals,too-many-statements,too-many-branches
    expected_results_process_functions = {}
    expected_results_calculations = {}
    expected_results_workchains = {}
    code_doubler = load_code(CODENAME_DOUBLER)

    # Run the `ArithmeticAddCalculation`
    print('Running the `ArithmeticAddCalculation`')
    run_arithmetic_add()

    # Run the `AddArithmeticBaseWorkChain`
    print('Running the `AddArithmeticBaseWorkChain`')
    run_base_restart_workchain()

    # Run the `MultiplyAddWorkChain`
    print('Running the `MultiplyAddWorkChain`')
    run_multiply_add_workchain()

    # Submitting the calcfunction through the launchers
    print('Submitting calcfunction to the daemon')
    proc, expected_result = launch_calcfunction(inputval=1)
    expected_results_process_functions[proc.pk] = expected_result

    # Submitting the workfunction through the launchers
    print('Submitting workfunction to the daemon')
    proc, expected_result = launch_workfunction(inputval=1)
    expected_results_process_functions[proc.pk] = expected_result

    # Submitting the Calculations the new way directly through the launchers
    print(f'Submitting {NUMBER_CALCULATIONS} calculations to the daemon')
    for counter in range(1, NUMBER_CALCULATIONS + 1):
        inputval = counter
        calc, expected_result = launch_calculation(code=code_doubler,
                                                   counter=counter,
                                                   inputval=inputval)
        expected_results_calculations[calc.pk] = expected_result

    # Submitting the Workchains
    print(f'Submitting {NUMBER_WORKCHAINS} workchains to the daemon')
    for index in range(NUMBER_WORKCHAINS):
        inp = Int(index)
        _, node = run.get_node(NestedWorkChain, inp=inp)
        expected_results_workchains[node.pk] = index

    print("Submitting a workchain with 'submit'.")
    builder = NestedWorkChain.get_builder()
    input_val = 4
    builder.inp = Int(input_val)
    proc = submit(builder)
    expected_results_workchains[proc.pk] = input_val

    print('Submitting a workchain with a nested input namespace.')
    value = Int(-12)
    pk = submit(NestedInputNamespace, foo={'bar': {'baz': value}}).pk

    print('Submitting a workchain with a dynamic non-db input.')
    value = [4, 2, 3]
    pk = submit(DynamicNonDbInput, namespace={'input': value}).pk
    expected_results_workchains[pk] = value

    print('Submitting a workchain with a dynamic db input.')
    value = 9
    pk = submit(DynamicDbInput, namespace={'input': Int(value)}).pk
    expected_results_workchains[pk] = value

    print('Submitting a workchain with a mixed (db / non-db) dynamic input.')
    value_non_db = 3
    value_db = Int(2)
    pk = submit(DynamicMixedInput,
                namespace={
                    'inputs': {
                        'input_non_db': value_non_db,
                        'input_db': value_db
                    }
                }).pk
    expected_results_workchains[pk] = value_non_db + value_db

    print('Submitting the serializing workchain')
    pk = submit(SerializeWorkChain, test=Int).pk
    expected_results_workchains[pk] = ObjectLoader().identify_object(Int)

    print('Submitting the ListEcho workchain.')
    list_value = List()
    list_value.extend([1, 2, 3])
    pk = submit(ListEcho, list=list_value).pk
    expected_results_workchains[pk] = list_value

    print('Submitting a WorkChain which contains a workfunction.')
    value = Str('workfunction test string')
    pk = submit(WorkFunctionRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = value

    print('Submitting a WorkChain which contains a calcfunction.')
    value = Int(1)
    pk = submit(CalcFunctionRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = Int(2)

    calculation_pks = sorted(expected_results_calculations.keys())
    workchains_pks = sorted(expected_results_workchains.keys())
    process_functions_pks = sorted(expected_results_process_functions.keys())
    pks = calculation_pks + workchains_pks + process_functions_pks

    print('Wating for end of execution...')
    start_time = time.time()
    exited_with_timeout = True
    while time.time() - start_time < TIMEOUTSECS:
        time.sleep(15)  # Wait a few seconds

        # Print some debug info, both for debugging reasons and to avoid
        # that the test machine is shut down because there is no output

        print('#' * 78)
        print(f'####### TIME ELAPSED: {time.time() - start_time} s')
        print('#' * 78)
        print("Output of 'verdi process list -a':")
        try:
            print(
                subprocess.check_output(
                    ['verdi', 'process', 'list', '-a'],
                    stderr=subprocess.STDOUT,
                ))
        except subprocess.CalledProcessError as exception:
            print(f'Note: the command failed, message: {exception}')

        print("Output of 'verdi daemon status':")
        try:
            print(
                subprocess.check_output(
                    ['verdi', 'daemon', 'status'],
                    stderr=subprocess.STDOUT,
                ))
        except subprocess.CalledProcessError as exception:
            print(f'Note: the command failed, message: {exception}')

        if jobs_have_finished(pks):
            print('Calculation terminated its execution')
            exited_with_timeout = False
            break

    if exited_with_timeout:
        print_daemon_log()
        print('')
        print(
            f'Timeout!! Calculation did not complete after {TIMEOUTSECS} seconds'
        )
        sys.exit(2)
    else:
        # Launch the same calculations but with caching enabled -- these should be FINISHED immediately
        cached_calcs = []
        with enable_caching(identifier='aiida.calculations:templatereplacer'):
            for counter in range(1, NUMBER_CALCULATIONS + 1):
                inputval = counter
                calc, expected_result = run_calculation(code=code_doubler,
                                                        counter=counter,
                                                        inputval=inputval)
                cached_calcs.append(calc)
                expected_results_calculations[calc.pk] = expected_result

        if (validate_calculations(expected_results_calculations)
                and validate_workchains(expected_results_workchains)
                and validate_cached(cached_calcs)
                and validate_process_functions(
                    expected_results_process_functions)):
            print_daemon_log()
            print('')
            print('OK, all calculations have the expected parsed result')
            sys.exit(0)
        else:
            print_daemon_log()
            print('')
            print(
                'ERROR! Some return values are different from the expected value'
            )
            sys.exit(3)
예제 #14
0
def launch_all():
    """Launch a bunch of calculation jobs and workchains.

    :returns: dictionary with expected results and pks of all launched calculations and workchains
    """
    # pylint: disable=too-many-locals,too-many-statements
    expected_results_process_functions = {}
    expected_results_calculations = {}
    expected_results_workchains = {}
    code_doubler = load_code(CODENAME_DOUBLER)

    # Run the `ArithmeticAddCalculation`
    print('Running the `ArithmeticAddCalculation`')
    run_arithmetic_add()

    # Run the `AddArithmeticBaseWorkChain`
    print('Running the `AddArithmeticBaseWorkChain`')
    run_base_restart_workchain()

    # Run the `MultiplyAddWorkChain`
    print('Running the `MultiplyAddWorkChain`')
    run_multiply_add_workchain()

    # Testing the stashing functionality
    process, inputs, expected_result = create_calculation_process(
        code=code_doubler, inputval=1)
    with tempfile.TemporaryDirectory() as tmpdir:

        # Delete the temporary directory to test that the stashing functionality will create it if necessary
        shutil.rmtree(tmpdir, ignore_errors=True)

        source_list = ['output.txt', 'triple_value.tmp']
        inputs['metadata']['options']['stash'] = {
            'target_base': tmpdir,
            'source_list': source_list
        }
        _, node = run.get_node(process, **inputs)
        assert node.is_finished_ok
        assert 'remote_stash' in node.outputs
        remote_stash = node.outputs.remote_stash
        assert remote_stash.stash_mode == StashMode.COPY
        assert remote_stash.target_basepath.startswith(tmpdir)
        assert sorted(remote_stash.source_list) == sorted(source_list)
        assert sorted(p for p in os.listdir(
            remote_stash.target_basepath)) == sorted(source_list)

    # Submitting the calcfunction through the launchers
    print('Submitting calcfunction to the daemon')
    proc, expected_result = launch_calcfunction(inputval=1)
    expected_results_process_functions[proc.pk] = expected_result

    # Submitting the workfunction through the launchers
    print('Submitting workfunction to the daemon')
    proc, expected_result = launch_workfunction(inputval=1)
    expected_results_process_functions[proc.pk] = expected_result

    # Submitting the Calculations the new way directly through the launchers
    print(f'Submitting {NUMBER_CALCULATIONS} calculations to the daemon')
    for counter in range(1, NUMBER_CALCULATIONS + 1):
        inputval = counter
        calc, expected_result = launch_calculation(code=code_doubler,
                                                   counter=counter,
                                                   inputval=inputval)
        expected_results_calculations[calc.pk] = expected_result

    # Submitting the Workchains
    print(f'Submitting {NUMBER_WORKCHAINS} workchains to the daemon')
    for index in range(NUMBER_WORKCHAINS):
        inp = Int(index)
        _, node = run.get_node(NestedWorkChain, inp=inp)
        expected_results_workchains[node.pk] = index

    print("Submitting a workchain with 'submit'.")
    builder = NestedWorkChain.get_builder()
    input_val = 4
    builder.inp = Int(input_val)
    pk = submit(builder).pk
    expected_results_workchains[pk] = input_val

    print('Submitting a workchain with a nested input namespace.')
    value = Int(-12)
    pk = submit(NestedInputNamespace, foo={'bar': {'baz': value}}).pk

    print('Submitting a workchain with a dynamic non-db input.')
    value = [4, 2, 3]
    pk = submit(DynamicNonDbInput, namespace={'input': value}).pk
    expected_results_workchains[pk] = value

    print('Submitting a workchain with a dynamic db input.')
    value = 9
    pk = submit(DynamicDbInput, namespace={'input': Int(value)}).pk
    expected_results_workchains[pk] = value

    print('Submitting a workchain with a mixed (db / non-db) dynamic input.')
    value_non_db = 3
    value_db = Int(2)
    pk = submit(DynamicMixedInput,
                namespace={
                    'inputs': {
                        'input_non_db': value_non_db,
                        'input_db': value_db
                    }
                }).pk
    expected_results_workchains[pk] = value_non_db + value_db

    print('Submitting the serializing workchain')
    pk = submit(SerializeWorkChain, test=Int).pk
    expected_results_workchains[pk] = ObjectLoader().identify_object(Int)

    print('Submitting the ListEcho workchain.')
    list_value = List()
    list_value.extend([1, 2, 3])
    pk = submit(ListEcho, list=list_value).pk
    expected_results_workchains[pk] = list_value

    print('Submitting a WorkChain which contains a workfunction.')
    value = Str('workfunction test string')
    pk = submit(WorkFunctionRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = value

    print('Submitting a WorkChain which contains a calcfunction.')
    value = Int(1)
    pk = submit(CalcFunctionRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = Int(2)

    calculation_pks = sorted(expected_results_calculations.keys())
    workchains_pks = sorted(expected_results_workchains.keys())
    process_functions_pks = sorted(expected_results_process_functions.keys())

    return {
        'pks': calculation_pks + workchains_pks + process_functions_pks,
        'calculations': expected_results_calculations,
        'process_functions': expected_results_process_functions,
        'workchains': expected_results_workchains,
    }
예제 #15
0
def test_converge_wc(fresh_aiida_env, potentials, mock_vasp):
    """Test submitting only, not correctness, with mocked vasp code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.converge')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    structure = PoscarParser(
        file_path=data_path('test_converge_wc', 'inp', 'POSCAR')).structure
    parameters = IncarParser(
        file_path=data_path('test_converge_wc', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_converge_wc'
    parameters = {
        k: v
        for k, v in parameters.items()
        if k not in ['isif', 'ibrion', 'encut', 'nsw']
    }

    restart_clean_workdir = get_data_node('bool', False)
    restart_clean_workdir.store()

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.parameters = get_data_node('dict', dict={'incar': parameters})
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    relax = AttributeDict()
    converge = AttributeDict()
    converge.relax = get_data_node('bool', False)
    converge.compress = get_data_node('bool', False)
    converge.displace = get_data_node('bool', False)
    converge.pwcutoff_samples = get_data_node('int', 3)
    converge.k_samples = get_data_node('int', 3)
    relax.perform = get_data_node('bool', True)
    inputs.relax = relax
    inputs.converge = converge
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    converge = results['converge']
    assert 'data' in converge

    conv_data = converge['data']
    try:
        conv_data.get_array('pw_regular')
    except KeyError:
        pytest.fail('Did not find pw_regular in converge.data')
    try:
        conv_data.get_array('kpoints_regular')
    except KeyError:
        pytest.fail('Did not find kpoints_regular in converge.data')

    assert 'pwcutoff_recommended' in converge
    try:
        _encut = converge['pwcutoff_recommended'].value
    except AttributeError:
        pytest.fail('pwcutoff_recommended does not have the expected format')
    assert 'kpoints_recommended' in converge
    try:
        _kpoints = converge['kpoints_recommended'].get_kpoints_mesh()
    except AttributeError:
        pytest.fail('kpoints_recommended does not have the expected format')
예제 #16
0
def test_converge_wc_pw(fresh_aiida_env, vasp_params, potentials, mock_vasp):
    """Test convergence workflow using mock code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.converge')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer).store()

    structure = PoscarParser(file_path=data_path('test_converge_wc/pw/200',
                                                 'inp', 'POSCAR')).structure
    parameters = IncarParser(
        file_path=data_path('test_converge_wc/pw/200', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_converge_wc'
    parameters = {
        k: v
        for k, v in parameters.items()
        if k not in ['isif', 'ibrion', 'encut', 'nsw']
    }
    kpoints = KpointsParser(file_path=data_path('test_converge_wc/pw/200',
                                                'inp', 'KPOINTS')).kpoints

    restart_clean_workdir = get_data_node('bool', False)
    restart_clean_workdir.store()

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.kpoints = kpoints
    inputs.parameters = get_data_node('dict', dict={'incar': parameters})
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    relax = AttributeDict()
    converge = AttributeDict()
    relax.perform = get_data_node('bool', False)
    converge.relax = get_data_node('bool', False)
    converge.testing = get_data_node('bool', True)
    converge.compress = get_data_node('bool', False)
    converge.displace = get_data_node('bool', False)
    converge.pwcutoff_samples = get_data_node('int', 3)
    converge.k_samples = get_data_node('int', 3)
    inputs.relax = relax
    inputs.converge = converge
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'converge' in results
    converge = results['converge']
    assert 'data' in converge
    conv_data = converge['data']
    try:
        conv_data = conv_data.get_array('pw_regular')
    except KeyError:
        pytest.fail('Did not find pw_regular in converge.data')
    conv_data_test = np.array([[200.0, -10.77974998, 0.0, 0.0, 0.5984],
                               [250.0, -10.80762044, 0.0, 0.0, 0.5912],
                               [300.0, -10.82261992, 0.0, 0.0, 0.5876]])
    np.testing.assert_allclose(conv_data, conv_data_test)

    assert 'pwcutoff_recommended' in converge
    try:
        _encut = converge['pwcutoff_recommended'].value
        np.testing.assert_equal(_encut, 300)
    except AttributeError:
        pytest.fail('pwcutoff_recommended does not have the expected format')
예제 #17
0
def test_bands_wc(fresh_aiida_env, potentials, mock_vasp):
    """Test with mocked vasp code."""
    from aiida.orm import Code, Log, RemoteData
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.bands')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    structure = PoscarParser(file_path=data_path('test_bands_wc', 'inp', 'POSCAR')).structure
    parameters = IncarParser(file_path=data_path('test_bands_wc', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_bands_wc'
    # Make sure we replace encut with pwcutoff
    del parameters['encut']
    parameters = {'vasp': parameters}
    parameters['electronic'] = {'pwcutoff': 200}

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.parameters = get_data_node('dict', dict=parameters)
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    # Also set the restart folder as we assume a bands data will start from
    # a previous calculation that is sitting in the restart folder
    inputs.restart_folder = RemoteData(computer=inputs.code.computer, remote_path=data_path('test_bands_wc', 'inp'))
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'bands' in results
    kpoints = results['bands'].get_kpoints()
    test_array = np.array([[0., 0., 0.], [0.02272727, 0., 0.02272727], [0.04545454, 0., 0.04545454], [0.06818182, 0., 0.06818182],
                           [0.09090909, 0., 0.09090909], [0.11363636, 0., 0.11363636], [0.13636364, 0., 0.13636364],
                           [0.15909091, 0., 0.15909091], [0.18181818, 0., 0.18181818], [0.20454545, 0., 0.20454545],
                           [0.22727273, 0., 0.22727273], [0.25, 0., 0.25], [0.27272727, 0., 0.27272727], [0.29545455, 0., 0.29545455],
                           [0.31818182, 0., 0.31818182], [0.34090909, 0., 0.34090909], [0.36363636, 0., 0.36363636],
                           [0.38636364, 0., 0.38636364], [0.40909091, 0., 0.40909091], [0.43181818, 0., 0.43181818],
                           [0.45454545, 0., 0.45454545], [0.47727273, 0., 0.47727273], [0.5, 0., 0.5], [0.51785714, 0.03571429, 0.51785714],
                           [0.53571429, 0.07142857, 0.53571429], [0.55357143, 0.10714286, 0.55357143], [0.57142857, 0.14285714, 0.57142857],
                           [0.58928571, 0.17857143, 0.58928571], [0.60714286, 0.21428571, 0.60714286], [0.625, 0.25, 0.625],
                           [0.375, 0.375, 0.75], [0.35869565, 0.35869565, 0.7173913], [0.3423913, 0.3423913, 0.68478261],
                           [0.32608696, 0.32608696, 0.65217391], [0.30978261, 0.30978261, 0.61956522], [0.29347826, 0.29347826, 0.58695652],
                           [0.27717391, 0.27717391, 0.55434783], [0.26086957, 0.26086957, 0.52173913], [0.24456522, 0.24456522, 0.48913043],
                           [0.22826087, 0.22826087, 0.45652174], [0.21195652, 0.21195652, 0.42391304], [0.19565217, 0.19565217, 0.39130435],
                           [0.17934783, 0.17934783, 0.35869565], [0.16304348, 0.16304348, 0.32608696], [0.14673913, 0.14673913, 0.29347826],
                           [0.13043478, 0.13043478, 0.26086957], [0.11413044, 0.11413044, 0.22826087], [0.09782609, 0.09782609, 0.19565217],
                           [0.08152174, 0.08152174, 0.16304348], [0.06521739, 0.06521739, 0.13043478], [0.04891304, 0.04891304, 0.09782609],
                           [0.0326087, 0.0326087, 0.06521739], [0.01630435, 0.01630435, 0.0326087], [0., 0., 0.],
                           [0.02631579, 0.02631579, 0.02631579], [0.05263158, 0.05263158, 0.05263158], [0.07894737, 0.07894737, 0.07894737],
                           [0.10526316, 0.10526316, 0.10526316], [0.13157895, 0.13157895, 0.13157895], [0.15789474, 0.15789474, 0.15789474],
                           [0.18421053, 0.18421053, 0.18421053], [0.21052632, 0.21052632, 0.21052632], [0.2368421, 0.2368421, 0.2368421],
                           [0.26315789, 0.26315789, 0.26315789], [0.28947368, 0.28947368, 0.28947368], [0.31578947, 0.31578947, 0.31578947],
                           [0.34210526, 0.34210526, 0.34210526], [0.36842105, 0.36842105, 0.36842105], [0.39473684, 0.39473684, 0.39473684],
                           [0.42105263, 0.42105263, 0.42105263], [0.44736842, 0.44736842, 0.44736842], [0.47368421, 0.47368421, 0.47368421],
                           [0.5, 0.5, 0.5], [0.5, 0.48333333, 0.51666667], [0.5, 0.46666667, 0.53333333], [0.5, 0.45, 0.55],
                           [0.5, 0.43333333, 0.56666667], [0.5, 0.41666667, 0.58333333], [0.5, 0.4, 0.6], [0.5, 0.38333333, 0.61666667],
                           [0.5, 0.36666667, 0.63333333], [0.5, 0.35, 0.65], [0.5, 0.33333333, 0.66666667], [0.5, 0.31666667, 0.68333333],
                           [0.5, 0.3, 0.7], [0.5, 0.28333333, 0.71666667], [0.5, 0.26666667, 0.73333333], [0.5, 0.25, 0.75],
                           [0.5, 0.225, 0.725], [0.5, 0.2, 0.7], [0.5, 0.175, 0.675], [0.5, 0.15, 0.65], [0.5, 0.125, 0.625],
                           [0.5, 0.1, 0.6], [0.5, 0.075, 0.575], [0.5, 0.05, 0.55], [0.5, 0.025, 0.525], [0.5, 0., 0.5]])
    np.testing.assert_allclose(kpoints, test_array)
    bands = results['bands'].get_bands()
    assert bands.shape == (1, 98, 20)
    np.testing.assert_allclose(bands[0, 0, 0:3], np.array([-6.0753, 6.0254, 6.0254]))
    np.testing.assert_allclose(bands[0, 2, 0:3], np.array([-6.0386, 5.7955, 5.8737]))
    np.testing.assert_allclose(bands[0, 97, 0:3], np.array([-1.867, -1.867, 3.1102]))