示例#1
0
    def test_calcfunction_caching(self):
        """Verify that a calcfunction can be cached."""

        self.assertEqual(EXECUTION_COUNTER, 0)
        _, original = execution_counter_calcfunction.run_get_node(Int(5))
        self.assertEqual(EXECUTION_COUNTER, 1)

        # Caching a CalcFunctionNode should be possible
        with enable_caching(identifier='*.execution_counter_calcfunction'):
            input_node = Int(5)
            result, cached = execution_counter_calcfunction.run_get_node(input_node)

            self.assertEqual(EXECUTION_COUNTER, 1)  # Calculation function body should not have been executed
            self.assertTrue(result.is_stored)
            self.assertTrue(cached.is_created_from_cache)
            self.assertIn(cached.get_cache_source(), original.uuid)
            self.assertEqual(cached.get_incoming().one().node.uuid, input_node.uuid)
示例#2
0
 def test_calcfunction_do_not_store_provenance(self):
     """Run the function without storing the provenance."""
     data = Int(1)
     result, node = self.test_calcfunction.run_get_node(data, metadata={'store_provenance': False})  # pylint: disable=unexpected-keyword-arg
     self.assertFalse(result.is_stored)
     self.assertFalse(data.is_stored)
     self.assertFalse(node.is_stored)
     self.assertEqual(result, data + 1)
示例#3
0
 def do_test(self):
     """Run the test."""
     input_non_db = self.inputs.namespace.inputs['input_non_db']
     input_db = self.inputs.namespace.inputs['input_db']
     assert isinstance(input_non_db, int)
     assert not isinstance(input_non_db, Int)
     assert isinstance(input_db, Int)
     self.out('output', Int(input_db + input_non_db).store())
def check_gemc_convergence(workchain,
                           calc,
                           conv_threshold=0.1,
                           additional_init_cycle=0,
                           additional_prod_cycle=0):
    """
    Checks whether a GCMC calc is converged.
    Checking is based on the error bar on average loading which is
    average number of molecules in each simulation box.
    """
    output_gemc = calc.outputs.output_parameters.get_dict()
    conv_stat = []

    for comp in calc.inputs.parameters['Component']:
        molec_per_box1_comp_average = output_gemc['box_one']["components"][
            comp]["loading_absolute_average"]
        molec_per_box2_comp_average = output_gemc['box_two']["components"][
            comp]["loading_absolute_average"]
        molec_per_box1_comp_dev = output_gemc['box_one']["components"][comp][
            "loading_absolute_dev"]
        molec_per_box2_comp_dev = output_gemc['box_two']["components"][comp][
            "loading_absolute_dev"]

        error_box1 = round(
            (molec_per_box1_comp_dev / molec_per_box1_comp_average), 2)
        error_box2 = round(
            (molec_per_box2_comp_dev / molec_per_box2_comp_average), 2)

        if (error_box1 <= conv_threshold) and (error_box2 <= conv_threshold):
            conv_stat.append(True)
        else:
            conv_stat.append(False)

    if not all(conv_stat):
        workchain.report(
            "GEMC calculation is NOT converged: continuing from restart...")
        workchain.ctx.inputs.retrieved_parent_folder = calc.outputs[
            'retrieved']
        workchain.ctx.inputs.parameters = modify_number_of_cycles(
            workchain.ctx.inputs.parameters,
            additional_init_cycle=Int(additional_init_cycle),
            additional_prod_cycle=Int(additional_prod_cycle))
        return ErrorHandlerReport(True, False)

    return None
示例#5
0
    def setUpClass(cls, *args, **kwargs):
        super().setUpClass(*args, **kwargs)
        from aiida.orm import Data, Bool, Float, Int

        cls.node_base = Data().store()
        cls.node_bool_true = Bool(True).store()
        cls.node_bool_false = Bool(False).store()
        cls.node_float = Float(1.0).store()
        cls.node_int = Int(1).store()
示例#6
0
def run_multiply_add_workchain():
    """Run the `MultiplyAddWorkChain`."""
    MultiplyAddWorkChain = WorkflowFactory('arithmetic.multiply_add')

    code = load_code(CODENAME_ADD)
    inputs = {
        'x': Int(1),
        'y': Int(2),
        'z': Int(3),
        'code': code,
    }

    # Normal inputs should run just fine
    results, node = run.get_node(MultiplyAddWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 2
    assert 'result' in results
    assert results['result'].value == 5
def check_gcmc_convergence(workchain,
                           calc,
                           conv_threshold=0.1,
                           additional_init_cycle=0,
                           additional_prod_cycle=0):
    """
    Checks whether a GCMC calc is converged.
    Checking is based on the error bar on average loading.
    """
    output_gcmc = calc.outputs.output_parameters.get_dict()
    structure_label = list(calc.get_incoming().nested()['framework'].keys())[0]
    conv_stat = []

    for comp in calc.inputs.parameters['Component']:

        loading_average_comp = output_gcmc[structure_label]["components"][
            comp]["loading_absolute_average"]
        loading_dev_comp = output_gcmc[structure_label]["components"][comp][
            "loading_absolute_dev"]

        # It can happen for weekly adsorbed species.
        # we need to think about a better way to handle it.
        # Currently, if it happens for five iterations, workchain will not continue.
        if loading_average_comp == 0:
            conv_stat.append(False)
        else:
            error = round((loading_dev_comp / loading_average_comp), 2)
            if error <= conv_threshold:
                conv_stat.append(True)
            else:
                conv_stat.append(False)

    if not all(conv_stat):
        workchain.report(
            "GCMC calculation is NOT converged: continuing from restart...")
        workchain.ctx.inputs.retrieved_parent_folder = calc.outputs[
            'retrieved']
        workchain.ctx.inputs.parameters = modify_number_of_cycles(
            workchain.ctx.inputs.parameters,
            additional_init_cycle=Int(additional_init_cycle),
            additional_prod_cycle=Int(additional_prod_cycle))
        return ErrorHandlerReport(True, False)

    return None
    def test_operator(self):
        a = Float(2.2)
        b = Int(3)

        for op in [operator.add, operator.mul, operator.pow, operator.lt, operator.le, operator.gt, operator.ge, operator.iadd, operator.imul]:
            for x, y in [(a, b), (b, a)]:
                c = op(x, y)
                c_val = op(x.value, y.value)
                self.assertEqual(c._type, type(c_val))
                self.assertEqual(c, op(x.value, y.value))
示例#9
0
    def test_validate_incoming_sealed(self):
        """Verify that trying to add a link to a sealed node will raise."""
        data = Int(1).store()
        node = CalculationNode().store()
        node.seal()

        with self.assertRaises(exceptions.ModificationNotAllowed):
            node.validate_incoming(data,
                                   link_type=LinkType.INPUT_CALC,
                                   link_label='input')
示例#10
0
    def define(cls, spec):
        super(ThermalPhono3py, cls).define(spec)
        spec.input("structure", valid_type=StructureData)
        spec.input("ph_settings", valid_type=Dict)
        spec.input("es_settings", valid_type=Dict)
        # Optional arguments
        spec.input("optimize",
                   valid_type=Bool,
                   required=False,
                   default=Bool(True))
        spec.input("pressure",
                   valid_type=Float,
                   required=False,
                   default=Float(0.0))
        spec.input("use_nac",
                   valid_type=Bool,
                   required=False,
                   default=Bool(False))
        spec.input("chunks", valid_type=Int, required=False, default=Int(100))
        spec.input("step",
                   valid_type=Float,
                   required=False,
                   default=Float(1.0))
        spec.input("initial_cutoff",
                   valid_type=Float,
                   required=False,
                   default=Float(2.0))
        spec.input("gp_chunks", valid_type=Int, required=False, default=Int(1))
        # Convergence criteria
        spec.input("rtol",
                   valid_type=Float,
                   required=False,
                   default=Float(0.3))
        spec.input("atol",
                   valid_type=Float,
                   required=False,
                   default=Float(0.1))

        spec.outline(
            cls.harmonic_calculation,
            _While(cls.not_converged)(cls.get_data_sets,
                                      cls.get_thermal_conductivity),
            cls.collect_data)
示例#11
0
    def setUp(self):
        super(TestCalcFunction, self).setUp()
        self.assertIsNone(Process.current())
        self.default_int = Int(256)

        @calcfunction
        def test_calcfunction(data):
            return Int(data.value + 1)

        self.test_calcfunction = test_calcfunction
示例#12
0
    def parse(self, **kwargs):
        """Parse the contents of the output files stored in the `retrieved` output node."""
        from aiida.orm import Int

        output_folder = self.retrieved

        with output_folder.open(self.node.get_option('output_filename'), 'r') as handle:
            result = int(handle.read())

        self.out('sum', Int(result))
示例#13
0
    def setUp(self):
        super().setUp()
        self.assertIsNone(Process.current())
        self.default_int = Int(256)

        @workfunction
        def test_workfunction(data):
            return data

        self.test_workfunction = test_workfunction
示例#14
0
    def test_validate_outgoing_sealed(self):
        """Verify that trying to add a link from a sealed node will raise."""
        data = Int(1).store()
        node = CalculationNode().store()
        node.seal()

        with self.assertRaises(exceptions.ModificationNotAllowed):
            node.validate_outgoing(data,
                                   link_type=LinkType.CREATE,
                                   link_label='create')
示例#15
0
def run_base_restart_workchain():
    """Run the `AddArithmeticBaseWorkChain` a few times for various inputs."""
    code = load_code(CODENAME_ADD)
    inputs = {
        'add': {
            'x': Int(1),
            'y': Int(2),
            'code': code,
            'settings': Dict(dict={'allow_negative': False}),
            'metadata': {
                'options': {
                    'resources': {
                        'num_machines': 1,
                        'num_mpiprocs_per_machine': 1
                    }
                }
            }
        }
    }

    # Normal inputs should run just fine
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 1
    assert 'sum' in results
    assert results['sum'].value == 3

    # With one input negative, the sum will be negative which will fail the calculation, but the error handler should
    # fix it, so the second calculation should finish successfully
    inputs['add']['y'] = Int(-4)
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 2
    assert 'sum' in results
    assert results['sum'].value == 5

    # The silly sanity check aborts the workchain if the sum is bigger than 10
    inputs['add']['y'] = Int(10)
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert not node.is_finished_ok, node.process_state
    assert node.exit_status == ArithmeticAddBaseWorkChain.exit_codes.ERROR_TOO_BIG.status, node.exit_status  # pylint: disable=no-member
    assert len(node.called) == 1
示例#16
0
 def define(cls, spec):
     super().define(spec)
     spec.input('value', default=lambda: Str('A'))
     spec.input('n', default=lambda: Int(3))
     spec.outputs.dynamic = True
     spec.outline(
         cls.step1,
         if_(cls.is_a)(cls.step2).elif_(cls.is_b)(cls.step3).else_(cls.step4), # pylint: disable=no-member
         cls.step5,
         while_(cls.larger_then_n)(cls.step6,),
     )
示例#17
0
    def define(cls, spec):
        super(CubegenCalculation, cls).define(spec)

        spec.input("parameters", valid_type=Dict, required=True, help='dictionary containing entries for cubes to be printed.')
        spec.input('parent_calc_folder', valid_type=RemoteData, required=True, help='the folder of a containing the .fchk')

        spec.input('retrieve_cubes', valid_type=Bool, required=False, default=Bool(False), help='should the cube be retrieved?')
        spec.input("gauss_memdef", valid_type=Int, required=False, default=Int(1024), help="Set the GAUSS_MEMDEF env variable to set the max memory in MB.")

        # Turn mpi off by default
        spec.input('metadata.options.withmpi', valid_type=bool, default=False)
示例#18
0
 def define(cls, spec):
     super().define(spec)
     spec.input('value', default=lambda: Str('A'))
     spec.input('n', default=lambda: Int(3))
     spec.outputs.dynamic = True
     spec.outline(
         cls.s1,
         if_(cls.isA)(cls.s2).elif_(cls.isB)(cls.s3).else_(cls.s4),
         cls.s5,
         while_(cls.ltN)(cls.s6),
     )
示例#19
0
    def report_wf(self):

        self.report('Final step. The workflow now will collect some info about the calculations in the "path" output node, and the relaxed scf calc')

        self.report('Relaxation scheme performed: {}'.format(self.ctx.conv_options['relaxation_scheme']))

        path = List(list=self.ctx.path).store()
        rel_scf = Int(self.ctx.scf.pk).store()
        self.out('path', path)

        self.out('relaxed_scf', rel_scf)
def example_multistage_al(cp2k_code):
    """Example usage: verdi run thistest.py cp2k@localhost"""

    print("Testing CP2K multistage workchain on Al (RKS, needs smearing)...")
    print(
        "EXPECTED: the OT (settings_0) will converge to a negative bandgap, then we switch to SMEARING (settings_1)"
    )

    thisdir = os.path.dirname(os.path.abspath(__file__))
    structure = StructureData(
        ase=ase.io.read(os.path.join(thisdir, '../data/Al.cif')))

    # testing user change of parameters and protocol
    parameters = Dict(
        dict={'FORCE_EVAL': {
            'DFT': {
                'MGRID': {
                    'CUTOFF': 250,
                }
            }
        }})
    protocol_mod = Dict(
        dict={
            'initial_magnetization': {
                'Al': 0
            },
            'settings_0': {
                'FORCE_EVAL': {
                    'DFT': {
                        'SCF': {
                            'OUTER_SCF': {
                                'MAX_SCF': 5,
                            }
                        }
                    }
                }
            }
        })

    # Construct process builder
    builder = Cp2kMultistageWorkChain.get_builder()
    builder.structure = structure
    builder.protocol_tag = Str('test')
    builder.starting_settings_idx = Int(0)
    builder.protocol_modify = protocol_mod
    builder.cp2k_base.cp2k.parameters = parameters
    builder.cp2k_base.cp2k.code = cp2k_code
    builder.cp2k_base.cp2k.metadata.options.resources = {
        "num_machines": 1,
        "num_mpiprocs_per_machine": 1,
    }
    builder.cp2k_base.cp2k.metadata.options.max_wallclock_seconds = 1 * 3 * 60

    run(builder)
示例#21
0
def run_base_restart_workchain():
    """Run the `AddArithmeticBaseWorkChain` a few times for various inputs."""
    code = load_code(CODENAME_ADD)
    inputs = {
        'add': {
            'x': Int(1),
            'y': Int(2),
            'code': code,
        }
    }

    # Normal inputs should run just fine
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 1
    assert 'sum' in results
    assert results['sum'].value == 3

    # With one input negative, the sum will be negative which will fail the calculation, but the error handler should
    # fix it, so the second calculation should finish successfully
    inputs['add']['y'] = Int(-4)
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert node.is_finished_ok, node.exit_status
    assert len(node.called) == 2
    assert 'sum' in results
    assert results['sum'].value == 5

    # The silly sanity check aborts the workchain if the sum is bigger than 10
    inputs['add']['y'] = Int(10)
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert not node.is_finished_ok, node.process_state
    assert node.exit_status == ArithmeticAddBaseWorkChain.exit_codes.ERROR_TOO_BIG.status, node.exit_status  # pylint: disable=no-member
    assert len(node.called) == 1

    # Check that overriding default handler enabled status works
    inputs['add']['y'] = Int(1)
    inputs['handler_overrides'] = Dict(dict={'disabled_handler': True})
    results, node = run.get_node(ArithmeticAddBaseWorkChain, **inputs)
    assert not node.is_finished_ok, node.process_state
    assert node.exit_status == ArithmeticAddBaseWorkChain.exit_codes.ERROR_ENABLED_DOOM.status, node.exit_status  # pylint: disable=no-member
    assert len(node.called) == 1
示例#22
0
    def define(cls, spec):
        #yapf: disable
        super(SqsCalculation, cls).define(spec)
        spec.input('metadata.options.resources', valid_type=dict, default={'num_machines':1, 'num_mpiprocs_per_machine':1}, non_db=True)
        spec.input('metadata.options.parser_name', valid_type=str, default='ce.gensqs', non_db=True)
        spec.input('metadata.options.input_filename', valid_type=str, default='aiida.json', non_db=True)
        spec.input('metadata.options.output_filename', valid_type=str, default='aiida.out', non_db=True)
        spec.input('structure', valid_type=StructureData, help='prototype structure to expand')
        spec.input('pbc', valid_type=List, default=List(list=[True, True, True]))
        spec.input('chemical_symbols', valid_type=List, help='An N elements list of which that each element is the possible symbol of the site.')
        spec.input('target_concentrations', valid_type=Dict, help='target concentration of elements of the sqs')
        spec.input('include_smaller_cells', valid_type=Bool, default=Bool(False), help='if false, only cell with >32 atoms will calculated')
        spec.input('cutoffs', valid_type=List, help='cutoffs of each NN distance')
        spec.input('max_size', valid_type=Int, default=Int(16), help='structures having up to max size times in the supercell')
        spec.input('n_steps', valid_type=Int, default=Int(10000), help='max annealing steps to run')

        spec.output('sqs', valid_type=StructureData, help='sqs structure')
        spec.output('cluster_vector', valid_type=List, help='cluster vector of sqs')
        spec.output('cluster_space', valid_type=ClusterSpaceData, help='cluster space used to generate sqs')

        spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
示例#23
0
    def setup(self):
        """Call the `setup` of the `BaseRestartWorkChain` and then create the inputs dictionary in `self.ctx.inputs`.
        This `self.ctx.inputs` dictionary will be used by the `BaseRestartWorkChain` to submit the calculations in the
        internal loop."""

        super().setup()
        self.ctx.inputs = AttributeDict(
            self.exposed_inputs(RaspaCalculation, 'raspa'))
        if "WriteBinaryRestartFileEvery" not in self.ctx.inputs.parameters[
                "GeneralSettings"]:
            self.ctx.inputs.parameters = add_write_binary_restart(
                self.ctx.inputs.parameters, Int(1000))
示例#24
0
 def define(cls, spec):
     super().define(spec)
     spec.input(
         'traj',
         required=False,
         valid_type=(TrajectoryData),
         help=
         'If the training is started from scratch, this will create the input file starting from this raw trajectories. The following arrays names must be present: forces, cells, positions and scf_total_energy (the potential energy that the network has to learn). If the training is restarted from a previous calculation, this input is not needed.'
     )
     spec.input(
         'param',
         required=True,
         valid_type=(Dict),
         help=
         'Input dictionary, as documented in the deepmd package. The parameters that set the names of the various input file are setted by this plugin.'
     )
     spec.input('nline_per_set',
                default=lambda: Int(2000),
                valid_type=(Int))
     spec.input('metadata.options.parser_name',
                valid_type=six.string_types,
                default='DeepMdTrainParser')
     spec.input('metadata.options.input_filename',
                valid_type=str,
                default='aiida.in')
     spec.input('metadata.options.output_filename',
                valid_type=str,
                default='aiida.out')
     spec.input(
         'restart_calculation_folder',
         valid_type=aiida.orm.RemoteData,
         required=False,
         help=
         'If provided the training procedure will restart using the data that is present in this folder (you can find the folder in the outputs of the completed calculation, as well as the calculated network weights)'
     )
     spec.output('lcurve', valid_type=ArrayData)
     spec.output('param', valid_type=Dict)
     spec.exit_code(
         400, 'ERROR_NO_TRAINING_DATA',
         'You must provide the training set with a restart or with a trajectory data'
     )
     spec.exit_code(
         300,
         'ERROR_NO_RETRIEVED_FOLDER',
         message='The retrieved folder data node could not be accessed.')
     spec.exit_code(
         310,
         'ERROR_READING_OUTPUT_FILE',
         message=
         'The output file could not be read from the retrieved folder.')
     spec.exit_code(320,
                    'ERROR_INVALID_OUTPUT',
                    message='The output file contains invalid output.')
def main(codelabel):
    """Example usage: verdi run thistest.py cp2k@localhost"""

    print("Testing CP2K multistage workchain on Al (RKS, needs smearing)...")
    print("EXPECTED: the OT (settings_0) will converge to a negative bandgap, then we switch to SMEARING (settings_1)")

    code = Code.get_from_string(codelabel)

    thisdir = os.path.dirname(os.path.abspath(__file__))
    structure = StructureData(ase=ase.io.read(os.path.join(thisdir, '../data/Al.cif')))

    # testing user change of parameters and protocol
    parameters = Dict(dict={'FORCE_EVAL': {'DFT': {'MGRID': {'CUTOFF': 250,}}}})
    protocol_mod = Dict(dict={
        'initial_magnetization': {
            'Al': 0
        },
        'settings_0': {
            'FORCE_EVAL': {
                'DFT': {
                    'SCF': {
                        'OUTER_SCF': {
                            'MAX_SCF': 5,
                        }
                    }
                }
            }
        }
    })
    options = {
        "resources": {
            "num_machines": 1,
            "num_mpiprocs_per_machine": 1,
        },
        "max_wallclock_seconds": 1 * 3 * 60,
    }
    inputs = {
        'structure': structure,
        'protocol_tag': Str('test'),
        'starting_settings_idx': Int(0),
        'protocol_modify': protocol_mod,
        'cp2k_base': {
            'cp2k': {
                'parameters': parameters,
                'code': code,
                'metadata': {
                    'options': options,
                }
            }
        }
    }

    run(Cp2kMultistageWorkChain, **inputs)
示例#26
0
    def extract_dos_data_from_folder(self, folder, last_calc):
        """
        Get DOS data and parse files.
        """

        # initialize in case dos data is not extracted
        dosXyDatas = None

        # get list of files in directory (needed since SandboxFolder does not have `list_object_names` method)
        # also extract absolute path of folder (needed by parse_impdosfiles since calcfunction does not work with SandboxFolder as input)
        if isinstance(folder, SandboxFolder):
            folder_abspath = folder.abspath
            filelist = os.listdir(folder_abspath)
        else:
            filelist = folder.list_object_names()
            with folder.open(filelist[0]) as tmpfile:
                folder_abspath = tmpfile.name.replace(filelist[0], '')

        # check if out_ldos* files are there and parse dos files
        if 'out_ldos.interpol.atom=01_spin1.dat' in filelist:
            # extract EF and number of atoms from kkrflex_writeout calculation
            kkrflex_writeout = load_node(self.ctx.pk_flexcalc)
            parent_calc_kkr_converged = kkrflex_writeout.inputs.parent_folder.get_incoming(
                node_class=CalcJobNode).first().node
            ef = parent_calc_kkr_converged.outputs.output_parameters.get_dict(
            ).get('fermi_energy')
            last_calc_output_params = last_calc.outputs.output_parameters
            natom = last_calc_output_params.get_dict().get(
                'number_of_atoms_in_unit_cell')
            # parse dosfiles using nspin, EF and Natom inputs
            dosXyDatas = parse_impdosfiles(Str(folder_abspath), Int(natom),
                                           Int(self.ctx.nspin), Float(ef))
            dos_extracted = True
        else:
            dos_extracted = False
            dosXyDatas = None

        return dos_extracted, dosXyDatas
示例#27
0
    def define(cls, spec):

        super(YamboRestart, cls).define(spec)
        spec.expose_inputs(YamboCalculation, namespace='yambo', namespace_options={'required': True}, \
                            exclude = ['parent_folder'])
        spec.input("parent_folder", valid_type=RemoteData, required=True)
        spec.input("max_walltime", valid_type=Int, default=lambda: Int(86400))
        spec.input(
            "max_number_of_nodes",
            valid_type=Int,
            default=lambda: Int(0),
            help=
            'max number of nodes for restarts; if 0, it does not increase the number of nodes'
        )
        spec.input("code_version", valid_type=Str, default=lambda: Str('4.5'))

        ##################################### OUTLINE ####################################

        spec.outline(
            cls.setup,
            cls.validate_parameters,
            cls.validate_resources,
            cls.validate_parent,
            while_(cls.should_run_process)(
                cls.run_process,
                cls.inspect_process,
            ),
            cls.results,
        )

        ###################################################################################

        spec.expose_outputs(YamboCalculation)

        spec.exit_code(
            300,
            'ERROR_UNRECOVERABLE_FAILURE',
            message='The calculation failed with an unrecoverable error.')
    def test_operator(self):
        """Test all binary operators."""
        term_a = Float(2.2)
        term_b = Int(3)

        for oper in [
            operator.add, operator.mul, operator.pow, operator.lt, operator.le, operator.gt, operator.ge, operator.iadd,
            operator.imul
        ]:
            for term_x, term_y in [(term_a, term_b), (term_b, term_a)]:
                res = oper(term_x, term_y)
                c_val = oper(term_x.value, term_y.value)
                self.assertEqual(res._type, type(c_val))  # pylint: disable=protected-access
                self.assertEqual(res, oper(term_x.value, term_y.value))
def check_widom_convergence(workchain,
                            calc,
                            conv_threshold=0.1,
                            additional_cycle=0):
    """
    Checks whether a Widom particle insertion is converged.
    Checking is based on the error bar on Henry coefficient.
    """
    output_widom = calc.outputs.output_parameters.get_dict()
    structure_label = list(calc.get_incoming().nested()['framework'].keys())[0]
    conv_stat = []

    for comp in calc.inputs.parameters['Component']:
        kh_average_comp = output_widom[structure_label]["components"][comp][
            "henry_coefficient_average"]
        kh_dev_comp = output_widom[structure_label]["components"][comp][
            "henry_coefficient_dev"]

        error = round((kh_dev_comp / kh_average_comp), 2)
        if error <= conv_threshold:
            conv_stat.append(True)
        else:
            conv_stat.append(False)

    if not all(conv_stat):
        workchain.report(
            "Widom particle insertion calculation is NOT converged: repeating with more trials..."
        )
        workchain.ctx.inputs.retrieved_parent_folder = calc.outputs[
            'retrieved']
        workchain.ctx.inputs.parameters = modify_number_of_cycles(
            workchain.ctx.inputs.parameters,
            additional_init_cycle=Int(0),
            additional_prod_cycle=Int(additional_cycle))
        return ErrorHandlerReport(True, False)

    return None
示例#30
0
 def define(cls, spec):
     super().define(spec)
     spec.expose_inputs(PhonopyWorkChain,
                        exclude=['immigrant_calculation_folders',
                                 'calculation_nodes', 'dry_run'])
     spec.input('max_iteration', valid_type=Int, default=lambda: Int(10))
     spec.input('number_of_snapshots', valid_type=Int,
                default=lambda: Int(100))
     spec.input('number_of_steps_for_fitting', valid_type=Int,
                default=lambda: Int(4))
     spec.input('temperature', valid_type=Float,
                default=lambda: Float(300.0))
     spec.input('include_ratio', valid_type=Float, default=lambda: Float(1))
     spec.input('linear_decay', valid_type=Bool,
                default=lambda: Bool(False))
     spec.input('random_seed', valid_type=Int, required=False)
     spec.input('initial_nodes', valid_type=Dict, required=False)
     spec.outline(
         cls.initialize,
         if_(cls.import_initial_nodes)(
             cls.set_initial_nodes,
         ).else_(
             cls.run_initial_phonon,
         ),
         while_(cls.is_loop_finished)(
             cls.collect_displacements_and_forces,
             if_(cls.remote_phonopy)(
                 cls.run_force_constants_calculation_remote,
                 cls.generate_displacements,
             ).else_(
                 cls.generate_displacements_local,
             ),
             cls.run_phonon,
         ),
         cls.finalize,
     )