コード例 #1
0
ファイル: test_launch.py プロジェクト: CasperWA/aiida_core
 def test_submit_store_provenance_false(self):
     """Verify that submitting with `store_provenance=False` raises."""
     with self.assertRaises(exceptions.InvalidOperation):
         launch.submit(AddWorkChain,
                       term_a=self.term_a,
                       term_b=self.term_b,
                       metadata={'store_provenance': False})
コード例 #2
0
def launch_process(process, daemon, **inputs):
    """Launch a process with the given inputs.

    If not sent to the daemon, the results will be displayed after the calculation finishes.

    :param process: the process class
    :param daemon: boolean, if True will submit to the daemon instead of running in current interpreter
    :param inputs: inputs for the process
    """
    from aiida.engine import launch, Process, ProcessBuilder

    if isinstance(process, ProcessBuilder):
        process_name = process.process_class.__name__
    elif issubclass(process, Process):
        process_name = process.__name__
    else:
        raise TypeError('invalid type for process: {}'.format(process))

    if daemon:
        node = launch.submit(process, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(
            process_name, node.pk))
    else:
        if inputs.get('metadata', {}).get('dry_run', False):
            click.echo('Running a dry run for {}...'.format(process_name))
        else:
            click.echo('Running a {}...'.format(process_name))
        _, node = launch.run_get_node(process, **inputs)
        echo_process_results(node)
コード例 #3
0
    def test_run_base_class(self):
        """Verify that it is impossible to run, submit or instantiate a base `WorkChain` class."""
        with self.assertRaises(exceptions.InvalidOperation):
            WorkChain()

        with self.assertRaises(exceptions.InvalidOperation):
            launch.run(WorkChain)

        with self.assertRaises(exceptions.InvalidOperation):
            launch.run.get_node(WorkChain)

        with self.assertRaises(exceptions.InvalidOperation):
            launch.run.get_pk(WorkChain)

        with self.assertRaises(exceptions.InvalidOperation):
            launch.submit(WorkChain)
コード例 #4
0
 def run(self, **kwargs):
     self._run_options(**kwargs)
     name = self.run_options.get('name', '')
     asynchronous = self.run_options.get('asynchronous', False)
     if self.run_options['skip'] and name in self.logfiles:
         return self.logfiles[name]
     else:
         run_args = self.pre_processing()
         if (asynchronous):
             node = launch.submit(self.job,
                                  code=self.code,
                                  metadata=self.metadata)
             run_results = {'node': node}
             dict_merge(dest=run_args, src=run_results)
             return run_args
         else:
             run_results = self.process_run(**run_args)
             #          safe_print('run_args',run_args,'run_results',run_results)
             dict_merge(dest=run_args, src=run_results)
             #          safe_print('run_updated, again',run_args)
             self.logfiles[name] = SystemCalculator.post_processing(
                 self, **run_args)
             print("setting data dir to " + self.outputs[name]
                   ['retrieved']._repository._get_base_folder().abspath)
             setattr(
                 self.logfiles[name], "data_directory", self.outputs[name]
                 ['retrieved']._repository._get_base_folder().abspath)
             return self.logfiles[name]
コード例 #5
0
ファイル: test_calc_job.py プロジェクト: CasperWA/aiida_core
    def test_run_base_class(self):
        """Verify that it is impossible to run, submit or instantiate a base `CalcJob` class."""
        with self.assertRaises(exceptions.InvalidOperation):
            CalcJob()

        with self.assertRaises(exceptions.InvalidOperation):
            launch.run(CalcJob)

        with self.assertRaises(exceptions.InvalidOperation):
            launch.run_get_node(CalcJob)

        with self.assertRaises(exceptions.InvalidOperation):
            launch.run_get_pk(CalcJob)

        with self.assertRaises(exceptions.InvalidOperation):
            launch.submit(CalcJob)
コード例 #6
0
 def submit(self, **kwargs):
     self._run_options(**kwargs)
     name = self.run_options.get('name', '')
     if self.run_options['skip'] and name in self.logfiles:
         return self.logfiles[name]
     else:
         run_args = self.pre_processing()
         node = launch.submit(self.job,
                              code=self.code,
                              metadata=self.metadata)
         run_results = {'node': node}
         dict_merge(dest=run_args, src=run_results)
         return run_args
コード例 #7
0
def test_window_search_submit(
    configure_with_daemon, window_search_builder, wait_for, assert_finished
):  # pylint: disable=unused-argument,redefined-outer-name
    """
    Submit a window_search workflow.
    """
    from aiida.orm import load_node
    from aiida.engine.launch import submit

    pk = submit(window_search_builder).pk
    wait_for(pk)
    assert_finished(pk)
    node = load_node(pk)
    assert all(
        key in node.outputs
        for key in ['cost_value', 'tb_model', 'window', 'plot']
    )
コード例 #8
0
ファイル: test_launch.py プロジェクト: CasperWA/aiida_core
    def test_launchers_dry_run_no_provenance(self):
        """Test the launchers in `dry_run` mode with `store_provenance=False`."""
        from aiida.plugins import CalculationFactory

        ArithmeticAddCalculation = CalculationFactory('arithmetic.add')  # pylint: disable=invalid-name

        code = orm.Code(input_plugin_name='arithmetic.add',
                        remote_computer_exec=[self.computer,
                                              '/bin/true']).store()

        inputs = {
            'code': code,
            'x': orm.Int(1),
            'y': orm.Int(1),
            'metadata': {
                'dry_run': True,
                'store_provenance': False,
                'options': {
                    'resources': {
                        'num_machines': 1,
                        'num_mpiprocs_per_machine': 1
                    }
                }
            }
        }

        result = launch.run(ArithmeticAddCalculation, **inputs)
        self.assertEqual(result, {})

        result, pk = launch.run_get_pk(ArithmeticAddCalculation, **inputs)
        self.assertEqual(result, {})
        self.assertIsNone(pk)

        result, node = launch.run_get_node(ArithmeticAddCalculation, **inputs)
        self.assertEqual(result, {})
        self.assertIsInstance(node, orm.CalcJobNode)
        self.assertFalse(node.is_stored)
        self.assertIsInstance(node.dry_run_info, dict)
        self.assertIn('folder', node.dry_run_info)
        self.assertIn('script_filename', node.dry_run_info)

        node = launch.submit(ArithmeticAddCalculation, **inputs)
        self.assertIsInstance(node, orm.CalcJobNode)
        self.assertFalse(node.is_stored)
コード例 #9
0
def test_parse_submit(
        configure_with_daemon,  # pylint: disable=unused-argument
        assert_finished,
        wait_for,
        get_tbmodels_parse_builder,  # pylint: disable=redefined-outer-name
        check_calc_ok):
    """
    Test the parse calculation when submitted to the daemon.
    """
    from aiida.orm import SinglefileData
    from aiida.engine.launch import submit

    builder = get_tbmodels_parse_builder
    calc = submit(builder)
    wait_for(calc.pk)
    assert_finished(calc.pk)
    check_calc_ok(calc)

    assert isinstance(calc.outputs.tb_model, SinglefileData)
    assert calc.get_hash() == calc.get_extra('_aiida_hash')
コード例 #10
0
ファイル: test_launch.py プロジェクト: CasperWA/aiida_core
    def test_launchers_dry_run(self):
        """All launchers should work with `dry_run=True`, even `submit` which forwards to `run`."""
        from aiida.plugins import CalculationFactory

        ArithmeticAddCalculation = CalculationFactory('arithmetic.add')  # pylint: disable=invalid-name

        code = orm.Code(input_plugin_name='arithmetic.add',
                        remote_computer_exec=[self.computer,
                                              '/bin/true']).store()

        inputs = {
            'code': code,
            'x': orm.Int(1),
            'y': orm.Int(1),
            'metadata': {
                'dry_run': True,
                'options': {
                    'resources': {
                        'num_machines': 1,
                        'num_mpiprocs_per_machine': 1
                    }
                }
            }
        }

        result = launch.run(ArithmeticAddCalculation, **inputs)
        self.assertEqual(result, {})

        result, pk = launch.run_get_pk(ArithmeticAddCalculation, **inputs)
        self.assertEqual(result, {})
        self.assertIsInstance(pk, int)

        result, node = launch.run_get_node(ArithmeticAddCalculation, **inputs)
        self.assertEqual(result, {})
        self.assertIsInstance(node, orm.CalcJobNode)
        self.assertIsInstance(node.dry_run_info, dict)
        self.assertIn('folder', node.dry_run_info)
        self.assertIn('script_filename', node.dry_run_info)

        node = launch.submit(ArithmeticAddCalculation, **inputs)
        self.assertIsInstance(node, orm.CalcJobNode)
コード例 #11
0
ファイル: conftest.py プロジェクト: zooks97/aiida-optimize
    def inner(engine, func_workchain, engine_kwargs, evaluate=None):  # pylint: disable=missing-docstring,useless-suppression
        from aiida_optimize import OptimizationWorkChain
        from aiida.orm import load_node
        from aiida.orm import Dict
        from aiida.engine.launch import run_get_node, submit

        inputs = dict(
            engine=engine,
            engine_kwargs=Dict(dict=dict(engine_kwargs)),
            evaluate_process=func_workchain,
            evaluate=evaluate if evaluate is not None else {},
        )

        if request.param == 'run':
            _, result_node = run_get_node(OptimizationWorkChain, **inputs)
        else:
            assert request.param == 'submit'
            pk = submit(OptimizationWorkChain, **inputs).pk
            wait_for(pk)
            result_node = load_node(pk)
        return result_node
コード例 #12
0
def test_fp_tb_submit(
    configure_with_daemon,  # pylint: disable=unused-argument
    get_fp_tb_input,  # pylint: disable=redefined-outer-name
    wait_for,
):
    """
    Submits the DFT tight-binding workflow on an InSb sample.
    """
    from aiida.orm import load_node
    from aiida.engine.launch import submit
    from aiida.orm.querybuilder import QueryBuilder
    from aiida_bands_inspect.calculations.difference import DifferenceCalculation
    from aiida_tbextraction.fp_tb import FirstPrinciplesTightBinding

    query = QueryBuilder()
    query.append(DifferenceCalculation)

    pk = submit(FirstPrinciplesTightBinding, **get_fp_tb_input).pk
    wait_for(pk)
    result = load_node(pk).get_outputs_dict()
    print(result)
    assert all(key in result for key in ['cost_value', 'tb_model'])
コード例 #13
0
ファイル: launch_add.py プロジェクト: borellim/aiida_add
                'num_machines': Int(1)
            },
            'max_wallclock_seconds': Float(30*60),  # 30 min
            'withmpi': False,
        }
    }
}

daemon = False  # set to True to use the daemon (non-blocking),
               # False to use a local runner (blocking)

if submit_test:
    raise NotImplementedError("Is there an equivalent for calc.submit_test?")
    # subfolder, script_filename = calc.submit_test()
    # print "Test submit file in {}".format(os.path.join(
    #     os.path.relpath(subfolder.abspath),
    #     script_filename
    #     ))
else:    
    if daemon:
        new_calc = launch.submit(AddCalculation, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(AddCalculation.__name__, new_calc.pk))
    else:
        click.echo('Running an add calculation... ')
        _, new_calc = launch.run_get_node(AddCalculation, **inputs)
        click.echo('AddCalculation<{}> terminated with state: {}'.format(new_calc.pk, new_calc.process_state))
        click.echo('\n{link:25s} {node}'.format(link='Output link', node='Node pk and type'))
        click.echo('{s}'.format(s='-' * 60))
        for triple in sorted(new_calc.get_outgoing().all(), key=lambda triple: triple.link_label):
            click.echo('{:25s} {}<{}> '.format(triple.link_label, triple.node.__class__.__name__, triple.node.pk))
コード例 #14
0
def launch_cif_clean(cif_filter, cif_select, group_cif_raw, group_cif_clean, group_structure, group_workchain, node,
    max_entries, skip_check, parse_engine, daemon):
    """Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes.

    It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if
    the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the
    cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if
    successful, will be added to the `group-structure` group.
    """
    # pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches
    import inspect
    from datetime import datetime

    from aiida import orm
    from aiida.engine import launch
    from aiida.plugins import DataFactory, WorkflowFactory
    from aiida_codtools.cli.utils.display import echo_utc
    from aiida_codtools.common.resources import get_default_options
    from aiida_codtools.common.utils import get_input_node

    CifData = DataFactory('cif')  # pylint: disable=invalid-name
    CifCleanWorkChain = WorkflowFactory('codtools.cif_clean')  # pylint: disable=invalid-name

    # Collect the dictionary of not None parameters passed to the launch script and print to screen
    local_vars = locals()
    launch_paramaters = {}
    for arg in inspect.getargspec(launch_cif_clean.callback).args:  # pylint: disable=deprecated-method
        if arg in local_vars and local_vars[arg]:
            launch_paramaters[arg] = local_vars[arg]

    click.echo('=' * 80)
    click.echo('Starting on {}'.format(datetime.utcnow().isoformat()))
    click.echo('Launch parameters: {}'.format(launch_paramaters))
    click.echo('-' * 80)

    if group_cif_raw is not None:

        # Get CifData nodes that should actually be submitted according to the input filters
        builder = orm.QueryBuilder()
        builder.append(orm.Group, filters={'id': {'==': group_cif_raw.pk}}, tag='group')

        if skip_check:
            builder.append(CifData, with_group='group', project=['*'])
        else:
            # Get CifData nodes that already have an associated workchain node in the `group_workchain` group.
            submitted = orm.QueryBuilder()
            submitted.append(orm.WorkChainNode, tag='workchain')
            submitted.append(orm.Group, filters={'id': {'==': group_workchain.pk}}, with_node='workchain')
            submitted.append(orm.CifData, with_outgoing='workchain', tag='data', project=['id'])
            submitted_nodes = set(pk for entry in submitted.all() for pk in entry)

            if submitted_nodes:
                filters = {'id': {'!in': submitted_nodes}}
            else:
                filters = {}

            # Get all CifData nodes that are not included in the submitted node list
            builder.append(CifData, with_group='group', filters=filters, project=['*'])

        if max_entries is not None:
            builder.limit(int(max_entries))

        nodes = [entry[0] for entry in builder.all()]

    elif node is not None:

        nodes = [node]

    else:
        raise click.BadParameter('you have to specify either --group-cif-raw or --node')

    counter = 0

    node_cif_filter_parameters = get_input_node(orm.Dict, {
        'fix-syntax-errors': True,
        'use-c-parser': True,
        'use-datablocks-without-coordinates': True,
    })

    node_cif_select_parameters = get_input_node(orm.Dict, {
        'canonicalize-tag-names': True,
        'dont-treat-dots-as-underscores': True,
        'invert': True,
        'tags': '_publ_author_name,_citation_journal_abbrev',
        'use-c-parser': True,
    })

    node_parse_engine = get_input_node(orm.Str, parse_engine)
    node_site_tolerance = get_input_node(orm.Float, 5E-4)
    node_symprec = get_input_node(orm.Float, 5E-3)

    for cif in nodes:

        inputs = {
            'cif': cif,
            'cif_filter': {
                'code': cif_filter,
                'parameters': node_cif_filter_parameters,
                'metadata': {
                    'options': get_default_options()
                }
            },
            'cif_select': {
                'code': cif_select,
                'parameters': node_cif_select_parameters,
                'metadata': {
                    'options': get_default_options()
                }
            },
            'parse_engine': node_parse_engine,
            'site_tolerance': node_site_tolerance,
            'symprec': node_symprec,
        }

        if group_cif_clean is not None:
            inputs['group_cif'] = group_cif_clean

        if group_structure is not None:
            inputs['group_structure'] = group_structure

        if daemon:
            workchain = launch.submit(CifCleanWorkChain, **inputs)
            echo_utc('CifData<{}> submitting: {}<{}>'.format(cif.pk, CifCleanWorkChain.__name__, workchain.pk))
        else:
            echo_utc('CifData<{}> running: {}'.format(cif.pk, CifCleanWorkChain.__name__))
            _, workchain = launch.run_get_node(CifCleanWorkChain, **inputs)

        if group_workchain is not None:
            group_workchain.add_nodes([workchain])

        counter += 1

        if max_entries is not None and counter >= max_entries:
            break

    click.echo('-' * 80)
    click.echo('Submitted {} new workchains'.format(counter))
    click.echo('Stopping on {}'.format(datetime.utcnow().isoformat()))
    click.echo('=' * 80)
コード例 #15
0
        },
        'comp': {
            'kmax': 3.0,
        },
        'kpt': {
            'nkpt': 100,
        }
    })

################################
# 3. submit the workchain with its inputs.

inputs = {}
inputs['wf_parameters'] = wf_para
inputs['structure'] = structure
inputs['calc_parameters'] = parameters
inputs['fleur'] = fleur_code
inputs['inpgen'] = inpgen_code
inputs['description'] = 'test fleur_eos_wc run on W'
inputs['label'] = 'eos test on W'
inputs['options'] = options

# submit workchain to the daemon
# Noice that the nodes we created before are not yet stored in the database,
# but AiiDA will do so automaticly when we launch the workchain.
# To reuse nodes it might be a good idea, to save them before by hand and then load them
res = submit(FleurEosWorkChain, **inputs)

# You can also run the workflow in the python interpreter as blocking
#res = run(fleur_eos_wc, **inputs)
コード例 #16
0
ファイル: submit.py プロジェクト: hezhengda/hzdplugins
def qePwOriginalSubmit(codename,
                       structure,
                       kpoints,
                       pseudo_family,
                       metadata,
                       pseudo_dict={},
                       add_parameters={},
                       del_parameters={},
                       cluster_options={},
                       settings_dict={}):
    """

    :code:`qePwOriginalSubmit` will submit an original computational task to the desired computer by using certain code.

    :param codename: (mandatory) A string represents the code for pw.x that you want to use.
    :type codename: python string object

    :param structure: (mandatory) The structure of your system.
    :type structure: aiida.orm.StructureData object

    :param add_parameters: (optional, default = {}) The desired parameters that you want to state, it can be
                           incomplete, because inside the function there is a default setting for parameters which can
                           be used in most cases, but if you have specific need, you can put that in parameters,
                           the format is similar as pw.x input file.

                           If you want to assign DFT+U and spin-polarization, you need to specify it on your own.

                           In Aiida, there is a very efficient way to specify the :code:`hubbard_u`,
                           :code:`starting_magnetization` and :code:`starting_ns_eigenvalue`. I give some examples
                           in below:

                           .. code-block:: python

                                # hubbard_u
                                'SYSTEM': {
                                    'hubbard_u': {
                                        'Fe': 5.0,
                                        'Fe3': 5.0 # if you have different spins of same atom, then you should use
                                        newStructure function to create the structure
                                    },
                                    'starting_magnetization': {
                                        'Fe': 0.1,
                                        'Fe3': 0.1,
                                    },
                                    'starting_ns_eigenvalue': [
                                        [1, 1, 'Fe', 1.0] # represent: starting_ns_eigenvalue(1, 1, 1)=1.0
                                        # others are the same, if you want to assign to Fe3, just replace Fe with Fe3.
                                    ]
                                }

    :type add_parameters: python dictionary

    :param del_parameters: (optional, default = {}) The tags that we would like to delete, for example if we do not
                           want to use spin-polarized simulation, then 'nspin' needs to be deleted. Same structure
                           as add_parameters.

                           e.g. :code:`{'CONTROL': [key1, key2, key3], 'SYSTEM': [key1, key2, key3]}`
    :type del_parameters: python dictionary object

    :param kpoints: (mandatory) The kpoints that you want to use, if the kpoints has only 1 list, then it is the kpoint
                    mesh, but if two lists are detected, then the first will be k-point mesh, the second one will be the
                    origin of k-point mesh.e.g. [[3, 3, 1]] or [[3, 3, 1],[0.5, 0.5, 0.5]]
    :type kpoints: python list object

    :param pseudo_family: (mandatory) The pseudopotential family that you want to use. Make sure that you already have
                          that configured, otherwise an error will occur.
    :type pseudo_family: python string object.

    :param pseudo_dict: (optional, default = {}) which contains the pseudopotential files that we want to use in the
                        simulation. In here it is very important to note that the path of the pseudopotential file
                        has to be in the absolute path.

                        e.g.

                        .. code-block:: python

                            pseudo_dict = {
                                'Fe': UpfData(absolute_path),
                                'Fe3': UpfData(absolute_path)
                            }
    :type pseudo_dict: python dictionary object.

    :param cluster_options: (optional, default = {}) The detailed option for the cluster. Different cluster may have
                            different settings. Only the following 3 keys can have effects: (1) resources (2) account
                            (3) queue_name
    :type cluster_options: python dictionary object

    :param metadata: (mandatory) The dictionary that contains information about metadata. For example: label and
                     description. label and description are mendatory.

                     e.g. :code:`{'label':{}, 'description':{}}`
    :type metadata: python dictionary object

    :param settings_dict: (optional, default = {}) which contains the additional information for the pw.x
                          calculation. e.g. Fixed atom, retrieving more files, parser options, etc. And the
                          command-line options.
    :type settings_dict: python dictionary object

    :returns: uuid of the new CalcJobNode

    """

    code = Code.get_from_string(codename)
    computer = codename.split('@')[1]  # get the name of the cluster
    pw_builder = code.get_builder()

    # pseudopotential
    # check whether pseudo_family and pseudo_dict are set at the same time, if true, then break
    if len(pseudo_family) > 0 and len(pseudo_dict) > 0:
        return ValueError(
            "You cannot set pseudo_family and pseudo_dict at the same time")
    if len(pseudo_family) == 0 and len(pseudo_dict) == 0:
        return ValueError(
            "You need to specify at least one in pseudo_family or pseudo_dict."
        )

    if len(pseudo_family) != 0:
        pw_builder.pseudos = get_pseudos_from_structure(
            structure, family_name=pseudo_family)
    if len(pseudo_dict) != 0:
        pw_builder.pseudos = pseudo_dict

    # set kpoints
    kpts = KpointsData()
    if len(kpoints) == 1:
        kpts.set_kpoints_mesh(mesh=kpoints[0])
    else:
        kpts.set_kpoints_mesh(mesh=kpoints[0], offset=kpoints[1])

    # parameters
    parameters_default = Dict(dict=pwParameter)

    # add parameters in add_parameters
    parameters_tmp = deepcopy(parameters_default)

    for key, value in add_parameters.items():
        for key2, value2 in value.items():
            parameters_tmp[key][key2] = value2

    # delete parameters in del_parameters
    for key, value in del_parameters.items():
        tmp = parameters_tmp[key]
        for key2 in value:
            if key2 in tmp.keys():
                tmp.pop(key2)
            else:
                pass

    parameters_default = parameters_tmp

    # set labels and description
    pw_builder.metadata.label = metadata['label']
    pw_builder.metadata.description = metadata['description']

    # set default options for slurm
    pw_builder.metadata.options['resources'] = slurm_options[computer]['qe'][
        'resources']  # in here machine = node
    pw_builder.metadata.options['max_wallclock_seconds'] = slurm_options[
        computer]['qe']['max_wallclock_seconds']  #in here machine = node
    pw_builder.metadata.options['account'] = slurm_options[computer]['qe'][
        'account']  # in here machine = node
    pw_builder.metadata.options['scheduler_stderr'] = slurm_options[computer][
        'qe']['scheduler_stderr']
    pw_builder.metadata.options['scheduler_stderr'] = slurm_options[computer][
        'qe']['scheduler_stderr']
    pw_builder.metadata.options['queue_name'] = slurm_options[computer]['qe'][
        'queue_name']

    # revised by cluster_options
    if len(cluster_options) > 0:
        if 'resources' in cluster_options.keys():
            pw_builder.metadata.options['resources'] = cluster_options[
                'resources']
        if 'account' in cluster_options.keys():
            pw_builder.metadata.options['account'] = cluster_options['account']
        if 'queue_name' in cluster_options.keys():
            pw_builder.metadata.options['queue_name'] = cluster_options[
                'queue_name']

    # initialize the settings_dict
    if len(settings_dict) == 0:
        settings_dict['cmdline'] = ['-nk', '4']
    else:
        pass  # do nothing

    # get atomic occupations
    if 'lda_plus_u' in parameters_default['SYSTEM']:
        if parameters_default['SYSTEM']['lda_plus_u']:
            settings_dict['parser_options'] = {
                'parse_atomic_occupations': True
            }

    # launch the simulation
    pw_builder.structure = structure
    pw_builder.kpoints = kpts
    pw_builder.parameters = parameters_default
    pw_builder.settings = Dict(dict=settings_dict)
    calc = submit(pw_builder)

    return calc.uuid
コード例 #17
0
ファイル: submit.py プロジェクト: hezhengda/hzdplugins
def qePwContinueSubmit(uuid,
                       pseudo_family,
                       pseudo_dict={},
                       codename='',
                       parent_folder=True,
                       add_parameters={},
                       del_parameters={},
                       kpoints=[],
                       cluster_options={},
                       metadata={},
                       settings_dict={}):
    """

    `qePwContinueSubmit` will continue a simulation with similar or modified input parameters. All the parameters are
    listed in the kwargs.

    :param uuid: (mandatory) The uuid of previous calculation. We will start our calculation from there. Because uuid
                 is the unique identification number for each CalcJobNode

                    **Notice**: The uuid must be in the results dictionary, if not the program will shout KeyError.
                    And if you are testing, you could use assignValue to quickly create a dictionary that contains
                    the uuid that you want to continue.
    :type uuid: python string object

    :param pseudo_family: (mandatory) The pseudopotential family that you want to use. Make sure that you already have
                          that configured, otherwise an error will occur. This is mendatory.
    :type pseudo_family: python string object

    :param pseudo_dict: (optional, default = {}) Which contains the pseudopotential files that we want to use in the
                        simulation.
    :type pseudo_dict: python dictionary object

    :param codename: (optional, default = '') Represent the code for pw.x that you want to use. If you want to use the
                     same as previous calculation, then you need to use Str('')
    :type codename: python string object

    :param parent_folder: (optional, default = True) If parent_folder is True, then the calculation will start with the
                          output files from previous calculations.
    :type parent_folder: python boolean object

    :param add_parameters: (optional, default = {}) The desired parameters that you want to state, it can be incomplete,
                           because inside the function there is a default setting for parameters which can be used in
                           most cases, but if you have specific need, you can put that in parameters, the format is
                           similar as pw.x input file.

                           If you want to assign DFT+U and spin-polarization, you need to specify it on your own.

                           e.g. :code:`{'CONTROL':{}, 'SYSTEM':{}}`

                           **Notice**: more options in qePwOriginalSubmit function. In qePwContinueSubmit,
                           we assume that the user wants to restart from previous converged wave functions and
                           charge density, so we set ['CONTROL']['restart_mode']='restart', ['ELECTRON'][
                           'startingwfc']='file and ['ELECTRON']['startingpot']='file'.
    :type add_parameters: python dictionary object

    :param del_parameters: (optional, default = {})The tags that we would like to delete, for example if we do not
                           want to use spin-polarized simulation, then 'nspin' needs to be deleted. Same structure as
                           add_parameters.

                           e.g. :code:`{'CONTROL': [key1, key2, key3], 'SYSTEM': [key1, key2, key3]}`
    :type del_parameters: python dictionary object

    :param kpoints: (optional, default = []), if you want to keep the k-points for previous calculation, just use an
                    empty list :code:`[]`. The kpoints that you want to use, if the kpoints has only 1 list,
                    then it is the kpoint mesh, but if two lists are detected, then the first will be k-point mesh,
                    the second one will be the origin of k-point mesh.e.g. [[3, 3, 1]] or [[3, 3, 1],[0.5, 0.5, 0.5]]
    :type kpoints: python list object

    :param cluster_options: (optional, default = {}) The detailed option for the cluster. Different cluster may have
                            different settings. Only the following 3 keys can have effects: (1) resources (2)
                            account (3) queue_name. If value is :code:`{}`, then it means we will use previous settings
    :type cluster_options: python dictionary object

    :param metadata: (optional, default = {}) The dictionary that contains information about metadata. For example:
                     label and description.label and description are mendatory. If value is :code:`{}`,
                     then it means we will use previous settings.
    :type metadata: python dictionary object

    :param settings_dict: (optional, default = {}) which contains the additional information for the pw.x calculation.
                          e.g. Fixed atom, retrieving more files, parser options, etc. And the command-line options.
                          If value is :code:`{}`, then it means we will use previous settings.
    :type settings_dict: python dictionary object

    :returns: uuid of the CalcJobNode of the newest calculation.

    """

    node = load_node(uuid=uuid)

    if len(codename) == 0:  # not going to change cluster
        computer = node.computer.label
        restart_builder = node.get_builder_restart()  # get the restart_builder
    else:
        computer = codename.split('@')[1]
        code = Code.get_from_string(codename)
        restart_builder = code.get_builder()

    parameters_tmp = deepcopy(node.inputs.parameters)

    parameters_dict = parameters_tmp.get_dict()
    calc_type = parameters_dict['CONTROL']['calculation']

    # change the parameters (since this is the continuation of the previous calculation)
    parameters_tmp['CONTROL']['restart_mode'] = 'restart'
    parameters_tmp['ELECTRONS'][
        'startingwfc'] = 'file'  # from wave function in aiida.save
    parameters_tmp['ELECTRONS'][
        'startingpot'] = 'file'  # from charge density in aiida.save

    if calc_type == 'relax' or calc_type == 'vc-relax':
        structure = node.outputs.output_structure
    elif calc_type == 'scf' or calc_type == 'nscf':
        structure = node.inputs.structure

    # assign parameters in add_parameters
    for key, value in add_parameters.items():
        for key2, value2 in value.items():
            parameters_tmp[key][key2] = value2

    # delete parameters in del_parameters
    for key, value in del_parameters.items():
        tmp = parameters_tmp[key]
        for key2 in value:
            if key2 in tmp.keys():
                tmp.pop(key2)

    parameters_default = parameters_tmp

    # reset the kpoints
    if len(kpoints) > 0:
        kpts = KpointsData()
        if len(kpoints) == 1:
            kpts.set_kpoints_mesh(mesh=kpoints[0])
        else:
            kpts.set_kpoints_mesh(mesh=kpoints[0], offset=kpoints[1])
    else:
        kpts = node.inputs.kpoints

    # pseudopotential
    # check whether pseudo_family and pseudo_dict are set at the same time, if true, then break
    if len(pseudo_family) > 0 and len(pseudo_dict) > 0:
        return ValueError(
            "You cannot set pseudo_family and pseudo_dict at the same time")
    if len(pseudo_family) == 0 and len(pseudo_dict) == 0:
        return ValueError(
            "You need to specify at least one in pseudo_family or pseudo_dict."
        )

    if len(pseudo_family) != 0:
        restart_builder.pseudos = get_pseudos_from_structure(
            structure, family_name=pseudo_family)
    if len(pseudo_dict) != 0:
        restart_builder.pseudos = pseudo_dict

    # set default options for slurm
    restart_builder.metadata.options['resources'] = slurm_options[computer][
        'qe']['resources']  # in here machine = node
    restart_builder.metadata.options['max_wallclock_seconds'] = slurm_options[
        computer]['qe']['max_wallclock_seconds']  # in here machine = node
    restart_builder.metadata.options['account'] = slurm_options[computer][
        'qe']['account']  # in here machine = node
    restart_builder.metadata.options['scheduler_stderr'] = slurm_options[
        computer]['qe']['scheduler_stderr']
    restart_builder.metadata.options['scheduler_stderr'] = slurm_options[
        computer]['qe']['scheduler_stderr']
    restart_builder.metadata.options['queue_name'] = slurm_options[computer][
        'qe']['queue_name']

    # reset cluster_options:
    if len(cluster_options) > 0:
        if 'resources' in cluster_options.keys():
            restart_builder.metadata.options['resources'] = cluster_options[
                'resources']
        if 'account' in cluster_options.keys():
            restart_builder.metadata.options['account'] = cluster_options[
                'account']
        if 'queue_name' in cluster_options.keys():
            restart_builder.metadata.options['queue_name'] = cluster_options[
                'queue_name']

    # reset metadata
    if len(metadata) > 0:
        if 'label' in metadata.keys():
            restart_builder.metadata.label = metadata['label']
        else:
            restart_builder.metadata.label = node.label

        if 'description' in metadata.keys():
            restart_builder.metadata.description = metadata['description']
        else:
            restart_builder.metadata.description = node.description
    else:
        restart_builder.metadata.label = node.label
        restart_builder.metadata.description = node.description

    # assign the parent_folder
    if parent_folder:
        restart_builder.parent_folder = node.outputs.remote_folder

    # set settings_dict
    if len(settings_dict) > 0:
        pass
    else:
        settings_dict = node.inputs.settings.get_dict()

    # submit the calculation
    restart_builder.structure = structure
    restart_builder.kpoints = kpts
    restart_builder.parameters = parameters_default
    restart_builder.settings = Dict(dict=settings_dict)
    calc = submit(restart_builder)

    return calc.uuid
コード例 #18
0
            num_wann=14,
            num_bands=36,
            dis_num_iter=100,
            num_iter=0,
            spinors=True,
            # exclude_bands=range(1, )
        )
    )
    # Choose the Wannier90 trial orbitals
    builder.wannier_projections = orm.List(
        list=['In : s; pz; px; py', 'Sb : pz; px; py']
    )
    # Set the resource requirements for the Wannier90 run
    builder.wannier.metadata = METADATA_WANNIER

    # Set the symmetry file
    builder.symmetries = orm.SinglefileData(
        file=os.path.abspath('inputs/symmetries.hdf5')
    )

    # Pick the relevant bands from the reference calculation
    builder.slice_reference_bands = orm.List(list=list(range(12, 26)))

    return builder


if __name__ == '__main__':
    builder = create_builder()
    node = submit(builder)
    print('Submitted workflow with pk={}'.format(node.pk))
コード例 #19
0
ファイル: submit.py プロジェクト: hezhengda/hzdplugins
def phOriginalSubmit(uuid,
                     codename,
                     natlist,
                     qpoints=[[0.0, 0.0, 0.0]],
                     add_parameters={},
                     del_parameters={},
                     metadata={},
                     cluster_options={}):
    """

    :code:`phOriginalSubmit` can submit a ph.x simulation to get the PDOS. It must follow a scf simulation.

    :param uuid: (mandatory) The uuid of previous calculation. We will start our calculation from there. Because uuid
                 is the unique identification number for each CalcJobNode
    :type uuid: python string object

    :param codename: (mandatory) Represent the code for pw.x that you want to use. If you want to use the same as
                     previous calculation, then you need to use Str('')
    :type codename: python string object

    :param natlist: (mandatory) Assign the atoms which we want to do the vibrational frequency analysis.
    :type natlist: python list object

    :param qpoints: (optional, default = [[0.0, 0.0, 0.0]] It is like k-points, but useful when calculating
                    vibrational frequencies.
    :type qpoints: python list object

    :param add_parameters: (optional, default = {}) The desired parameters that you want to state, it can be incomplete,
                           because inside the function there is a default setting for parameters which can be used in
                           most cases, but if you have specific need, you can put that in parameters, the format is
                           similar as pw.x input file.

                           e.g. :code:`{'PROJWFC':{}}`
    :type add_parameters: python dictionary object

    :param del_parameters: (optional, default = {}) The tags that we would like to delete, for example if we do not
                           want to use spin-polarized simulation, then 'nspin' needs to be deleted. Same structure
                           as add_parameters.

                           e.g. :code:`{'PROJWFC': [key1, key2, key3]}`
    :type del_parameters: python dictionary object

    :param metadata: (optional, default = {}) The dictionary that contains information about metadata. For example:
                     label and description. label and description are mendatory.
    :type metadata: python dictionary object

    :param cluster_options: (optional, default = {}) The detailed option for the cluster. Different cluster may have
                            different settings. Only the following 3 keys can have effects: (1) resources (2)
                            account (3) queue_name
    :type cluster_options: python dictionary object

    :returns: uuid of the CalcJobNode object of the newest calculation.


    """

    node = load_node(uuid=uuid)

    # check whether it is nscf simulation
    if node.inputs.parameters.get_dict()['CONTROL']['calculation'] != 'nscf':
        return ValueError(
            "You need to provide a nscf simulation with higher k-points.")

    computer = codename.split('@')[1]
    code = Code.get_from_string(codename)
    ph_builder = code.get_builder()

    # parameters
    ph_parameter = Dict(dict=phParameter)

    # add parameters in add_parameters
    for key, value in add_parameters.items():
        for key2, value2 in value.items():
            ph_parameter[key][key2] = value2

    # delete parameters in del_parameters
    for key, value in del_parameters.items():
        tmp = ph_parameter[key]
        for key2 in value:
            if key2 in tmp.keys():
                tmp.pop(key2)

    # set kpoints
    qpts = KpointsData()
    if len(qpoints) == 1:
        qpts.set_kpoints_mesh(mesh=qpoints[0])
    else:
        qpts.set_kpoints_mesh(mesh=qpoints[0], offset=qpoints[1])

    # set default options for slurm
    # set first, then modify
    ph_builder.metadata.options['resources'] = slurm_options[computer]['ph'][
        'resources']  # in here machine =
    # node
    ph_builder.metadata.options['max_wallclock_seconds'] = slurm_options[
        computer]['projwfc']['max_wallclock_seconds']  # in here machine = node
    ph_builder.metadata.options['account'] = slurm_options[computer]['ph'][
        'account']  # in here machine = node
    ph_builder.metadata.options['scheduler_stderr'] = slurm_options[computer][
        'ph']['scheduler_stderr']
    ph_builder.metadata.options['scheduler_stderr'] = slurm_options[computer][
        'ph']['scheduler_stderr']
    ph_builder.metadata.options['queue_name'] = slurm_options[computer]['ph'][
        'queue_name']

    # reset cluster_options:
    if len(cluster_options) > 0:
        if 'resources' in cluster_options.keys():
            ph_builder.metadata.options['resources'] = cluster_options[
                'resources']
        if 'account' in cluster_options.keys():
            ph_builder.metadata.options['account'] = cluster_options['account']
        if 'queue_name' in cluster_options.keys():
            ph_builder.metadata.options['queue_name'] = cluster_options[
                'queue_name']

    ph_builder.parameters = Dict(dict=ph_parameter)
    ph_builder.parent_folder = node.outputs.remote_folder
    ph_builder.metadata.label = metadata['label']
    ph_builder.metadata.description = metadata['description']
    ph_builder.qpoints = qpts

    calc = submit(ph_builder)

    return calc.uuid
コード例 #20
0
# Load the codes, thwy have to be setup in your database.
fleur_label = 'fleur@localhost'
fleur_code = Code.get_from_string(fleur_label)

### Create wf_parameters (optional) and options
wf_para = Dict(dict={'fleur_runmax': 4, 'density_criterion': 0.000001, 'serial': False})

options = Dict(dict={'resources': {'num_machines': 1}, 'queue_name': '', 'max_wallclock_seconds': 60 * 60})

# load a fleurino data object from a scf_wc before
################################
# 3. submit the workchain with its inputs.

inputs = {}
inputs['wf_parameters'] = wf_para
inputs['fleurinp'] = fleurinp
inputs['fleur'] = fleur_code
inputs['description'] = 'test fleur_dos_wc run on W'
inputs['label'] = 'dos test '
inputs['options'] = options

# submit workchain to the daemon
# Noice that the nodes we created before are not yet stored in the database,
# but AiiDA will do so automaticly when we launch the workchain.
# To reuse nodes it might be a good idea, to save them before by hand and then load them
res = submit(fleur_dos_wc, **inputs)

# You can also run the workflow in the python interpreter as blocking
#res = run(fleur_dos_wc, **inputs)
コード例 #21
0
        },
        'comp': {
            'kmax': 3.0,
        },
        'kpt': {
            'nkpt': 100,
        }
    })

################################
# 3. submit the workchain with its inputs.

inputs = {}
inputs['wf_parameters'] = wf_para
inputs['structure'] = structure
inputs['calc_parameters'] = parameters
inputs['fleur'] = fleur_code
inputs['inpgen'] = inpgen_code
inputs['description'] = 'test fleur_scf_wc run on W'
inputs['label'] = 'test on W'
inputs['options'] = options

# submit workchain to the daemon
# Noice that the nodes we created before are not yet stored in the database,
# but AiiDA will do so automaticly when we launch the workchain.
# To reuse nodes it might be a good idea, to save them before by hand and then load them
res = submit(FleurScfWorkChain, **inputs)

# You can also run the workflow in the python interpreter as blocking
#res = run(fleur_scf_wc, **inputs)