Ejemplo n.º 1
0
    def test_workfunction_caching(self):
        """Verify that a workfunction cannot be cached."""
        _ = self.test_workfunction(self.default_int)

        # Caching should always be disabled for a WorkFunctionNode
        with enable_caching(identifier=WorkFunctionNode):
            _, cached = self.test_workfunction.run_get_node(self.default_int)
            self.assertFalse(cached.is_created_from_cache)
Ejemplo n.º 2
0
    def test_valid_cache_hook(self):
        """
        Test that the is_valid_cache behavior can be specified from
        the method in the Process sub-class.
        """
        # Sanity check that caching works when the hook returns True.
        with enable_caching():
            _, node1 = run_get_node(test_processes.IsValidCacheHook)
            _, node2 = run_get_node(test_processes.IsValidCacheHook)
            self.assertEqual(node1.get_extra('_aiida_hash'), node2.get_extra('_aiida_hash'))
            self.assertIn('_aiida_cached_from', node2.extras)

        with enable_caching():
            _, node3 = run_get_node(test_processes.IsValidCacheHook, not_valid_cache=orm.Bool(True))
            _, node4 = run_get_node(test_processes.IsValidCacheHook, not_valid_cache=orm.Bool(True))
            self.assertEqual(node3.get_extra('_aiida_hash'), node4.get_extra('_aiida_hash'))
            self.assertNotIn('_aiida_cached_from', node4.extras)
Ejemplo n.º 3
0
    def test_exit_codes_invalidate_cache(self):
        """
        Test that returning an exit code with 'invalidates_cache' set to ``True``
        indeed means that the ProcessNode will not be cached from.
        """
        # Sanity check that caching works when the exit code is not returned.
        with enable_caching():
            _, node1 = run_get_node(test_processes.InvalidateCaching, return_exit_code=orm.Bool(False))
            _, node2 = run_get_node(test_processes.InvalidateCaching, return_exit_code=orm.Bool(False))
            self.assertEqual(node1.get_extra('_aiida_hash'), node2.get_extra('_aiida_hash'))
            self.assertIn('_aiida_cached_from', node2.extras)

        with enable_caching():
            _, node3 = run_get_node(test_processes.InvalidateCaching, return_exit_code=orm.Bool(True))
            _, node4 = run_get_node(test_processes.InvalidateCaching, return_exit_code=orm.Bool(True))
            self.assertEqual(node3.get_extra('_aiida_hash'), node4.get_extra('_aiida_hash'))
            self.assertNotIn('_aiida_cached_from', node4.extras)
Ejemplo n.º 4
0
def test_enable_caching_global(configure_caching):
    """
    Check that using enable_caching for a specific identifier works.
    """
    specific_identifier = 'some_ident'
    with configure_caching(config_dict={'default': False, 'disabled': [specific_identifier]}):
        with enable_caching():
            assert get_use_cache(identifier='some_other_ident')
            assert get_use_cache(identifier=specific_identifier)
Ejemplo n.º 5
0
def test_enable_disable_invalid(identifier):
    """
    Test that the enable and disable context managers raise when given
    an invalid identifier.
    """
    with pytest.raises(ValueError):
        with enable_caching(identifier=identifier):
            pass
    with pytest.raises(ValueError):
        with disable_caching(identifier=identifier):
            pass
Ejemplo n.º 6
0
    def test_calcfunction_caching_change_code(self):
        """Verify that changing the source codde of a calcfunction invalidates any existing cached nodes."""
        result_original = self.test_calcfunction(self.default_int)

        with enable_caching(CalcFunctionNode):

            @calcfunction
            def test_calcfunction(data):
                """This calcfunction has a different source code from the one setup in the setUp method."""
                return Int(data.value + 2)

            result_cached, cached = test_calcfunction.run_get_node(
                self.default_int)
            self.assertNotEqual(result_original, result_cached)
            self.assertFalse(cached.is_created_from_cache)
Ejemplo n.º 7
0
def test_difference_cache(
    configure_with_daemon,  # pylint: disable=unused-argument
    get_bands_builder,
):
    from aiida.engine.launch import run_get_node
    from aiida.manage.caching import enable_caching

    builder = get_bands_builder

    # Fast-forwarding is enabled in the configuration for DifferenceCalculation
    with enable_caching():  # pylint: disable=not-context-manager
        output1, c1 = run_get_node(builder)
        output2, c2 = run_get_node(builder)
    c1.get_hash(ignore_errors=False)
    assert '_aiida_cached_from' in c2.extras
    assert output1['difference'] == output2['difference']
Ejemplo n.º 8
0
    def test_calcfunction_caching(self):
        """Verify that a calcfunction can be cached."""

        self.assertEqual(EXECUTION_COUNTER, 0)
        _, original = execution_counter_calcfunction.run_get_node(Int(5))
        self.assertEqual(EXECUTION_COUNTER, 1)

        # Caching a CalcFunctionNode should be possible
        with enable_caching(identifier='*.execution_counter_calcfunction'):
            input_node = Int(5)
            result, cached = execution_counter_calcfunction.run_get_node(input_node)

            self.assertEqual(EXECUTION_COUNTER, 1)  # Calculation function body should not have been executed
            self.assertTrue(result.is_stored)
            self.assertTrue(cached.is_created_from_cache)
            self.assertIn(cached.get_cache_source(), original.uuid)
            self.assertEqual(cached.get_incoming().one().node.uuid, input_node.uuid)
Ejemplo n.º 9
0
    def test_calcfunction_caching_change_code(self):
        """Verify that changing the source codde of a calcfunction invalidates any existing cached nodes."""
        result_original = self.test_calcfunction(self.default_int)

        # Intentionally using the same name, to check that caching anyway
        # distinguishes between the calcfunctions.
        @calcfunction
        def add_calcfunction(data):  # pylint: disable=redefined-outer-name
            """This calcfunction has a different source code from the one created at the module level."""
            return Int(data.value + 2)

        with enable_caching(identifier='*.add_calcfunction'):
            result_cached, cached = add_calcfunction.run_get_node(self.default_int)
            self.assertNotEqual(result_original, result_cached)
            self.assertFalse(cached.is_created_from_cache)
            # Test that the locally-created calcfunction can be cached in principle
            result2_cached, cached2 = add_calcfunction.run_get_node(self.default_int)
            self.assertNotEqual(result_original, result2_cached)
            self.assertTrue(cached2.is_created_from_cache)
Ejemplo n.º 10
0
 def test_contextmanager_enable_explicit(self):
     """Test the `enable_caching` context manager."""
     with enable_caching(identifier='aiida.calculations:templatereplacer'):
         self.assertTrue(
             get_use_cache(
                 identifier='aiida.calculations:templatereplacer'))
Ejemplo n.º 11
0
def main():
    """Launch a bunch of calculation jobs and workchains."""
    # pylint: disable=too-many-locals,too-many-statements,too-many-branches
    expected_results_process_functions = {}
    expected_results_calculations = {}
    expected_results_workchains = {}
    code_doubler = load_code(CODENAME_DOUBLER)

    # Run the `ArithmeticAddCalculation`
    print('Running the `ArithmeticAddCalculation`')
    run_arithmetic_add()

    # Run the `AddArithmeticBaseWorkChain`
    print('Running the `AddArithmeticBaseWorkChain`')
    run_base_restart_workchain()

    # Run the `MultiplyAddWorkChain`
    print('Running the `MultiplyAddWorkChain`')
    run_multiply_add_workchain()

    # Submitting the calcfunction through the launchers
    print('Submitting calcfunction to the daemon')
    proc, expected_result = launch_calcfunction(inputval=1)
    expected_results_process_functions[proc.pk] = expected_result

    # Submitting the workfunction through the launchers
    print('Submitting workfunction to the daemon')
    proc, expected_result = launch_workfunction(inputval=1)
    expected_results_process_functions[proc.pk] = expected_result

    # Submitting the Calculations the new way directly through the launchers
    print(f'Submitting {NUMBER_CALCULATIONS} calculations to the daemon')
    for counter in range(1, NUMBER_CALCULATIONS + 1):
        inputval = counter
        calc, expected_result = launch_calculation(code=code_doubler,
                                                   counter=counter,
                                                   inputval=inputval)
        expected_results_calculations[calc.pk] = expected_result

    # Submitting the Workchains
    print(f'Submitting {NUMBER_WORKCHAINS} workchains to the daemon')
    for index in range(NUMBER_WORKCHAINS):
        inp = Int(index)
        _, node = run.get_node(NestedWorkChain, inp=inp)
        expected_results_workchains[node.pk] = index

    print("Submitting a workchain with 'submit'.")
    builder = NestedWorkChain.get_builder()
    input_val = 4
    builder.inp = Int(input_val)
    proc = submit(builder)
    expected_results_workchains[proc.pk] = input_val

    print('Submitting a workchain with a nested input namespace.')
    value = Int(-12)
    pk = submit(NestedInputNamespace, foo={'bar': {'baz': value}}).pk

    print('Submitting a workchain with a dynamic non-db input.')
    value = [4, 2, 3]
    pk = submit(DynamicNonDbInput, namespace={'input': value}).pk
    expected_results_workchains[pk] = value

    print('Submitting a workchain with a dynamic db input.')
    value = 9
    pk = submit(DynamicDbInput, namespace={'input': Int(value)}).pk
    expected_results_workchains[pk] = value

    print('Submitting a workchain with a mixed (db / non-db) dynamic input.')
    value_non_db = 3
    value_db = Int(2)
    pk = submit(DynamicMixedInput,
                namespace={
                    'inputs': {
                        'input_non_db': value_non_db,
                        'input_db': value_db
                    }
                }).pk
    expected_results_workchains[pk] = value_non_db + value_db

    print('Submitting the serializing workchain')
    pk = submit(SerializeWorkChain, test=Int).pk
    expected_results_workchains[pk] = ObjectLoader().identify_object(Int)

    print('Submitting the ListEcho workchain.')
    list_value = List()
    list_value.extend([1, 2, 3])
    pk = submit(ListEcho, list=list_value).pk
    expected_results_workchains[pk] = list_value

    print('Submitting a WorkChain which contains a workfunction.')
    value = Str('workfunction test string')
    pk = submit(WorkFunctionRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = value

    print('Submitting a WorkChain which contains a calcfunction.')
    value = Int(1)
    pk = submit(CalcFunctionRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = Int(2)

    calculation_pks = sorted(expected_results_calculations.keys())
    workchains_pks = sorted(expected_results_workchains.keys())
    process_functions_pks = sorted(expected_results_process_functions.keys())
    pks = calculation_pks + workchains_pks + process_functions_pks

    print('Wating for end of execution...')
    start_time = time.time()
    exited_with_timeout = True
    while time.time() - start_time < TIMEOUTSECS:
        time.sleep(15)  # Wait a few seconds

        # Print some debug info, both for debugging reasons and to avoid
        # that the test machine is shut down because there is no output

        print('#' * 78)
        print(f'####### TIME ELAPSED: {time.time() - start_time} s')
        print('#' * 78)
        print("Output of 'verdi process list -a':")
        try:
            print(
                subprocess.check_output(
                    ['verdi', 'process', 'list', '-a'],
                    stderr=subprocess.STDOUT,
                ))
        except subprocess.CalledProcessError as exception:
            print(f'Note: the command failed, message: {exception}')

        print("Output of 'verdi daemon status':")
        try:
            print(
                subprocess.check_output(
                    ['verdi', 'daemon', 'status'],
                    stderr=subprocess.STDOUT,
                ))
        except subprocess.CalledProcessError as exception:
            print(f'Note: the command failed, message: {exception}')

        if jobs_have_finished(pks):
            print('Calculation terminated its execution')
            exited_with_timeout = False
            break

    if exited_with_timeout:
        print_daemon_log()
        print('')
        print(
            f'Timeout!! Calculation did not complete after {TIMEOUTSECS} seconds'
        )
        sys.exit(2)
    else:
        # Launch the same calculations but with caching enabled -- these should be FINISHED immediately
        cached_calcs = []
        with enable_caching(identifier='aiida.calculations:templatereplacer'):
            for counter in range(1, NUMBER_CALCULATIONS + 1):
                inputval = counter
                calc, expected_result = run_calculation(code=code_doubler,
                                                        counter=counter,
                                                        inputval=inputval)
                cached_calcs.append(calc)
                expected_results_calculations[calc.pk] = expected_result

        if (validate_calculations(expected_results_calculations)
                and validate_workchains(expected_results_workchains)
                and validate_cached(cached_calcs)
                and validate_process_functions(
                    expected_results_process_functions)):
            print_daemon_log()
            print('')
            print('OK, all calculations have the expected parsed result')
            sys.exit(0)
        else:
            print_daemon_log()
            print('')
            print(
                'ERROR! Some return values are different from the expected value'
            )
            sys.exit(3)
Ejemplo n.º 12
0
if args.inpgen is not None:
    inpgen_code = is_code(args.inpgen)
    inputs['inpgen'] = test_and_get_codenode(inpgen_code,
                                             expected_code_type='fleur.inpgen')

submit_wc = False
if args.submit is not None:
    submit_wc = submit
pprint(inputs)

print('##################### TEST FleurScfWorkChain #####################')

if submit_wc:
    res = submit(FleurScfWorkChain, **inputs)
    print(
        '##################### Submited FleurScfWorkChain #####################'
    )
    print(('Runtime info: {}'.format(res)))
    print(
        '##################### Finished submiting FleurScfWorkChain #####################'
    )
else:
    print(
        '##################### Running FleurScfWorkChain #####################'
    )
    with enable_caching():
        res = run(FleurScfWorkChain, **inputs)
    print(
        '##################### Finished running FleurScfWorkChain #####################'
    )