Esempio n. 1
0
def launch(code, calculation, kpoints, max_num_machines, max_wallclock_seconds,
           daemon, clean_workdir):
    """
    Run the PhBaseWorkChain for a previously completed PwCalculation
    """
    from aiida.orm.data.base import Bool
    from aiida.orm.data.parameter import ParameterData
    from aiida.orm.utils import WorkflowFactory
    from aiida.work.launch import run, submit
    from aiida_quantumespresso.utils.resources import get_default_options

    PhBaseWorkChain = WorkflowFactory('quantumespresso.ph.base')

    parameters = {'INPUTPH': {}}

    options = get_default_options(max_num_machines, max_wallclock_seconds)

    inputs = {
        'code': code,
        'qpoints': kpoints,
        'parent_folder': calculation.out.remote_folder,
        'parameters': ParameterData(dict=parameters),
        'options': ParameterData(dict=options),
    }

    if clean_workdir:
        inputs['clean_workdir'] = Bool(True)

    if daemon:
        workchain = submit(PhBaseWorkChain, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(
            PhBaseWorkChain.__name__, workchain.pk))
    else:
        run(PhBaseWorkChain, **inputs)
Esempio n. 2
0
def launch(code, structure, pseudo_family, daemon, protocol):
    """
    Run the PwBandStructureWorkChain for a given input structure 
    to compute the band structure for the relaxed structure
    """
    from aiida.orm.data.base import Str
    from aiida.orm.utils import WorkflowFactory
    from aiida.work.launch import run, submit

    PwBandStructureWorkChain = WorkflowFactory(
        'quantumespresso.pw.band_structure')

    inputs = {
        'code': code,
        'structure': structure,
        'pseudo_family': Str(pseudo_family),
        'protocol': Str(protocol),
    }

    if daemon:
        workchain = submit(PwBandStructureWorkChain, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(
            PwBandStructureWorkChain.__name__, workchain.pk))
    else:
        run(PwBandStructureWorkChain, **inputs)
Esempio n. 3
0
def main():
    DifferenceCalculation = CalculationFactory('wait.wait')
    builder = DifferenceCalculation.get_builder()
    builder.code = Code.get_from_string('wait')
    builder.options = {'resources': {'num_machines': 1}, 'withmpi': False}

    node = submit(builder)
    print(node.pk)
Esempio n. 4
0
 def launch_calculations(self):
     self.ctx.launched = {}
     for s in (0.96, 0.98, 1., 1.02, 1.04):
         scaled = rescale(self.inputs.structure, Float(s))
         inputs = generate_scf_input_params(scaled, self.inputs.code,
                                            self.inputs.pseudo_family)
         pid = submit(Proc, **inputs)
         self.ctx.launched[str(s)] = pid
         self.insert_barrier(Calc(pid))
Esempio n. 5
0
def launch_calculation(code, counter, inputval):
    """
    Launch calculations to the daemon through the Process layer
    """
    process, inputs, expected_result = create_calculation_process(
        code=code, inputval=inputval)
    calc = submit(process, **inputs)
    print "[{}] launched calculation {}, pk={}".format(counter, calc.uuid,
                                                       calc.dbnode.pk)
    return calc, expected_result
def launch(code, calculation, kpoints, max_num_machines, max_wallclock_seconds,
           daemon):
    """
    Run a HpCalculation for a previously completed PwCalculation
    """
    from aiida.orm import load_node
    from aiida.orm.data.parameter import ParameterData
    from aiida.orm.data.upf import get_pseudos_from_structure
    from aiida.orm.utils import CalculationFactory
    from aiida.work.launch import run_get_pid, submit
    from aiida_quantumespresso.utils.resources import get_default_options
    from aiida_quantumespresso_hp.utils.validation import validate_parent_calculation

    HpCalculation = CalculationFactory('quantumespresso.hp')

    try:
        validate_parent_calculation(calculation)
    except ValueError as exception:
        raise click.BadParameter(
            'invalid parent calculation: {}'.format(exception))

    parameters = {'INPUTHP': {}}

    inputs = {
        'code': code,
        'qpoints': kpoints,
        'parameters': ParameterData(dict=parameters),
        'parent_folder': calculation.out.remote_folder,
        'options': get_default_options(max_num_machines,
                                       max_wallclock_seconds),
    }

    click.echo('Running a hp.x calculation ... ')

    process = HpCalculation.process()

    if daemon:
        calculation = submit(process, **inputs)
        pk = calculation.pk
        click.echo('Submitted {}<{}> to the daemon'.format(
            HpCalculation.__name__, calculation.pk))
    else:
        results, pk = run_get_pid(process, **inputs)

    calculation = load_node(pk)

    click.echo('HpCalculation<{}> terminated with state: {}'.format(
        pk, calculation.get_state()))
    click.echo('\n{link:25s} {node}'.format(link='Output link',
                                            node='Node pk and type'))
    click.echo('{s}'.format(s='-' * 60))
    for link, node in sorted(calculation.get_outputs(also_labels=True)):
        click.echo('{:25s} <{}> {}'.format(link, node.pk,
                                           node.__class__.__name__))
Esempio n. 7
0
    def launch_calculations(self):
        launched = {}
        for s in (0.96, 0.98, 1., 1.02, 1.04):
            scaled = rescale(self.inputs.structure, Float(s))
            inputs = generate_scf_input_params(scaled, self.inputs.code,
                                               self.inputs.pseudo_family)
            pid = submit(Proc, **inputs)
            launched["s_" + str(s)] = pid

        return ToContext(r=Calc(1234), s=Outputs(1234))

        return ToContext(**launched)
Esempio n. 8
0
 def run_sub1(self):
     """
     Submiting this subwork chain is still buggy somehow...
     """
     print('run_sub1')
     n = 3
     allres = {}
     time.sleep(35)
     for i in range(0,n):
         inputs_sub = Str('This is wonderful 1.{}'.format(i))
         res = submit(sub_dummy_wc, str_display=inputs_sub)
         label = 'sub1_run{}'.format(i)
         allres[label] = res
         time.sleep(65)
         self.get_input()
     return ToContext(**allres)
Esempio n. 9
0
def launch(code, calculation, kpoints, max_num_machines, max_wallclock_seconds,
           daemon, parallelize_atoms):
    """
    Run the HpWorkChain for a completed Hubbard PwCalculation
    """
    from aiida.orm.data.base import Bool
    from aiida.orm.data.parameter import ParameterData
    from aiida.orm.utils import WorkflowFactory
    from aiida.work.launch import run, submit
    from aiida_quantumespresso.utils.resources import get_default_options

    HpWorkChain = WorkflowFactory('quantumespresso.hp.main')

    parameters = {'INPUTHP': {}}

    inputs = {
        'code':
        code,
        'parent_calculation':
        calculation,
        'qpoints':
        kpoints,
        'parameters':
        ParameterData(dict=parameters),
        'options':
        ParameterData(
            dict=get_default_options(max_num_machines, max_wallclock_seconds)),
        'clean_workdir':
        Bool(clean_workdir),
        'parallelize_atoms':
        Bool(parallelize_atoms),
    }

    if daemon:
        workchain = submit(HpWorkChain, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(
            HpWorkChain.__name__, workchain.pk))
    else:
        run(HpWorkChain, **inputs)
from aiida.orm.data.int import Int
from aiida.work.launch import submit
from aiida.work.workchain import WorkChain

class AddAndMultiplyWorkChain(WorkChain):
    ...

node = submit(AddAndMultiplyWorkChain, a=Int(1), b=Int(2), c=Int(3))
def launch(code_pw, code_hp, structure, pseudo_family, kpoints, qpoints,
           ecutwfc, ecutrho, hubbard_u, starting_magnetization,
           automatic_parallelization, clean_workdir, max_num_machines,
           max_wallclock_seconds, daemon, meta_convergence, is_insulator,
           parallelize_atoms):
    """
    Run the SelfConsistentHubbardWorkChain for a given input structure
    """
    from aiida.orm.data.base import Bool, Str
    from aiida.orm.data.parameter import ParameterData
    from aiida.orm.utils import WorkflowFactory
    from aiida.work.launch import run, submit
    from aiida_quantumespresso.utils.resources import get_default_options

    SelfConsistentHubbardWorkChain = WorkflowFactory(
        'quantumespresso.hp.hubbard')

    parameters = {
        'SYSTEM': {
            'ecutwfc': ecutwfc,
            'ecutrho': ecutrho,
            'lda_plus_u': True,
        },
    }

    parameters_hp = {'INPUTHP': {}}

    options = get_default_options(max_num_machines, max_wallclock_seconds)

    structure_kinds = structure.get_kind_names()
    hubbard_u_kinds = [kind for kind, value in hubbard_u]
    hubbard_u = {kind: value for kind, value in hubbard_u}

    if not set(hubbard_u_kinds).issubset(structure_kinds):
        raise click.BadParameter(
            'the kinds in the specified starting Hubbard U values {} is not a strict subset of the kinds in the structure {}'
            .format(hubbard_u_kinds, structure_kinds),
            param_hint='hubbard_u')

    if starting_magnetization:

        parameters['SYSTEM']['nspin'] = 2

        for kind, magnetization in starting_magnetization:

            if kind not in structure_kinds:
                raise click.BadParameter(
                    'the provided structure does not contain the kind {}'.
                    format(kind),
                    param_hint='starting_magnetization')

            parameters['SYSTEM'].setdefault('starting_magnetization',
                                            {})[kind] = magnetization

    inputs = {
        'structure': structure,
        'hubbard_u': ParameterData(dict=hubbard_u),
        'meta_convergence': Bool(meta_convergence),
        'is_insulator': Bool(is_insulator),
        'scf': {
            'code': code_pw,
            'pseudo_family': Str(pseudo_family),
            'kpoints': kpoints,
            'parameters': ParameterData(dict=parameters),
            'options': ParameterData(dict=options)
        },
        'hp': {
            'code': code_hp,
            'qpoints': qpoints,
            'parameters': ParameterData(dict=parameters_hp),
            'options': ParameterData(dict=options),
            'parallelize_atoms': Bool(parallelize_atoms),
        }
    }

    if daemon:
        workchain = submit(SelfConsistentHubbardWorkChain, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(
            SelfConsistentHubbardWorkChain.__name__, workchain.pk))
    else:
        run(SelfConsistentHubbardWorkChain, **inputs)
Esempio n. 12
0
def launch(code, structure, pseudo_family, kpoints, max_num_machines,
           max_wallclock_seconds, daemon, ecutwfc, ecutrho, hubbard_u,
           hubbard_v, hubbard_file_pk, starting_magnetization, smearing,
           automatic_parallelization, clean_workdir):
    """
    Run the PwBaseWorkChain for a given input structure
    """
    from aiida.orm.data.base import Bool, Str
    from aiida.orm.data.parameter import ParameterData
    from aiida.orm.utils import WorkflowFactory
    from aiida.work.launch import run, submit
    from aiida_quantumespresso.utils.resources import get_default_options, get_automatic_parallelization_options

    PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base')

    parameters = {
        'SYSTEM': {
            'ecutwfc': ecutwfc,
            'ecutrho': ecutrho,
        },
    }

    try:
        hubbard_file = validate.validate_hubbard_parameters(
            structure, parameters, hubbard_u, hubbard_v, hubbard_file_pk)
    except ValueError as exception:
        raise click.BadParameter(exception.message)

    try:
        validate.validate_starting_magnetization(structure, parameters,
                                                 starting_magnetization)
    except ValueError as exception:
        raise click.BadParameter(exception.message)

    try:
        validate.validate_smearing(parameters, smearing)
    except ValueError as exception:
        raise click.BadParameter(exception.message)

    inputs = {
        'code': code,
        'structure': structure,
        'pseudo_family': Str(pseudo_family),
        'kpoints': kpoints,
        'parameters': ParameterData(dict=parameters),
    }

    if automatic_parallelization:
        automatic_parallelization = get_automatic_parallelization_options(
            max_num_machines, max_wallclock_seconds)
        inputs['automatic_parallelization'] = ParameterData(
            dict=automatic_parallelization)
    else:
        options = get_default_options(max_num_machines, max_wallclock_seconds)
        inputs['options'] = ParameterData(dict=options)

    if clean_workdir:
        inputs['clean_workdir'] = Bool(True)

    if daemon:
        workchain = submit(PwBaseWorkChain, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(
            PwBaseWorkChain.__name__, workchain.pk))
    else:
        run(PwBaseWorkChain, **inputs)
Esempio n. 13
0
def launch(expression, code, use_calculations, use_workfunctions, sleep,
           timeout, modulo, dry_run, daemon):
    """
    Evaluate the expression in Reverse Polish Notation in both a normal way and by procedurally generating
    a workchain that encodes the sequence of operators and gets the stack of operands as an input. Multiplications
    are modelled by a 'while_' construct and addition will be done performed by an addition or a subtraction,
    depending on the sign, branched by the 'if_' construct. Powers will be simulated by nested workchains.

    The script will generate a file containing the workchains with the procedurally generated outlines. Unless the
    dry run option is specified, the workchain will also be run and its output compared with the normal evaluation
    of the expression. If the two values match the script will be exited with a zero exit status, otherwise a non-zero
    exit status will be returned, indicating failure.

    In addition to normal rules of Reverse Polish Notation, the following restrictions to the expression apply:

     \b
     * Only integers are supported
     * Only the addition, multiplication and power operators (+, * and ^, respectively) are supported
     * Every intermediate result should be an integer, so no raising a number to a negative power
     * Operands for power operator are limited to the range [1, 3]
     * Expression has only a single sequence of numbers followed by single continuous sequence of operators

    If no expression is specified, a random one will be generated that adheres to these rules
    """
    import importlib
    import sys
    import time
    import uuid
    from aiida.orm import Code
    from aiida.orm.data.int import Int
    from aiida.orm.data.str import Str
    from aiida.work.launch import run_get_node, submit
    from expression import generate, validate, evaluate
    from workchain import generate_outlines, format_outlines, write_workchain

    if use_calculations and not isinstance(code, Code):
        raise click.BadParameter(
            'if you specify the -C flag, you have to specify a code as well')

    if expression is None:
        expression = generate()

    valid, error = validate(expression)

    if not valid:
        click.echo("the expression '{}' is invalid: {}".format(
            expression, error))
        sys.exit(1)

    filename = 'polish_{}.py'.format(str(uuid.uuid4().hex))
    evaluated = evaluate(expression, modulo)
    outlines, stack = generate_outlines(expression)
    outlines_string = format_outlines(outlines, use_calculations,
                                      use_workfunctions)
    workchain_filename = write_workchain(outlines_string, filename=filename)

    click.echo('Expression: {}'.format(expression))

    if not dry_run:
        try:
            workchain_module = 'polish_workchains.{}'.format(
                filename.replace('.py', ''))
            workchains = importlib.import_module(workchain_module)
        except ImportError:
            click.echo(
                'could not import the {} module'.format(workchain_module))
            sys.exit(1)

        inputs = {'modulo': Int(modulo), 'operands': Str(' '.join(stack))}

        if code:
            inputs['code'] = code

        if daemon:
            workchain = submit(workchains.Polish00WorkChain, **inputs)
            start_time = time.time()
            timed_out = True

            while time.time() - start_time < timeout:
                time.sleep(sleep)

                if workchain.is_terminated:
                    timed_out = False
                    break

            if timed_out:
                click.secho('Failed: ', fg='red', bold=True, nl=False)
                click.secho(
                    'the workchain<{}> did not finish in time and the operation timed out'
                    .format(workchain.pk),
                    bold=True)
                sys.exit(1)

            try:
                result = workchain.out.result
            except AttributeError:
                click.secho('Failed: ', fg='red', bold=True, nl=False)
                click.secho(
                    'the workchain<{}> did not return a result output node'.
                    format(workchain.pk),
                    bold=True)
                sys.exit(1)

        else:
            results, workchain = run_get_node(workchains.Polish00WorkChain,
                                              **inputs)
            result = results['result']

    click.echo('Evaluated : {}'.format(evaluated))

    if not dry_run:
        click.echo('Workchain : {} <{}>'.format(result, workchain.pk))

        if result != evaluated:
            click.secho('Failed: ', fg='red', bold=True, nl=False)
            click.secho(
                'the workchain result did not match the evaluated value',
                bold=True)
            sys.exit(1)
        else:
            click.secho('Success: ', fg='green', bold=True, nl=False)
            click.secho(
                'the workchain accurately reproduced the evaluated value',
                bold=True)
            sys.exit(0)
Esempio n. 14
0
def main():
    expected_results_calculations = {}
    expected_results_workchains = {}
    code = Code.get_from_string(codename)

    # Submitting the Calculations the old way, creating and storing a JobCalc first and submitting it
    print "Submitting {} old style calculations to the daemon".format(
        number_calculations)
    for counter in range(1, number_calculations + 1):
        inputval = counter
        calc, expected_result = submit_calculation(code=code,
                                                   counter=counter,
                                                   inputval=inputval)
        expected_results_calculations[calc.pk] = expected_result

    # Submitting the Calculations the new way directly through the launchers
    print "Submitting {} new style calculations to the daemon".format(
        number_calculations)
    for counter in range(1, number_calculations + 1):
        inputval = counter
        calc, expected_result = launch_calculation(code=code,
                                                   counter=counter,
                                                   inputval=inputval)
        expected_results_calculations[calc.pk] = expected_result

    # Submitting the Workchains
    print "Submitting {} workchains to the daemon".format(number_workchains)
    for index in range(number_workchains):
        inp = Int(index)
        result, node = run_get_node(NestedWorkChain, inp=inp)
        expected_results_workchains[node.pk] = index

    print "Submitting a workchain with 'submit'."
    builder = NestedWorkChain.get_builder()
    input_val = 4
    builder.inp = Int(input_val)
    proc = submit(builder)
    expected_results_workchains[proc.pk] = input_val

    print "Submitting a workchain with a nested input namespace."
    value = Int(-12)
    pk = submit(NestedInputNamespace, foo={'bar': {'baz': value}}).pk

    print "Submitting a workchain with a dynamic non-db input."
    value = [4, 2, 3]
    pk = submit(DynamicNonDbInput, namespace={'input': value}).pk
    expected_results_workchains[pk] = value

    print "Submitting a workchain with a dynamic db input."
    value = 9
    pk = submit(DynamicDbInput, namespace={'input': Int(value)}).pk
    expected_results_workchains[pk] = value

    print "Submitting a workchain with a mixed (db / non-db) dynamic input."
    value_non_db = 3
    value_db = Int(2)
    pk = submit(DynamicMixedInput,
                namespace={
                    'inputs': {
                        'input_non_db': value_non_db,
                        'input_db': value_db
                    }
                }).pk
    expected_results_workchains[pk] = value_non_db + value_db

    print("Submitting the serializing workchain")
    pk = submit(SerializeWorkChain, test=Int).pk
    expected_results_workchains[pk] = ObjectLoader().identify_object(Int)

    print "Submitting the ListEcho workchain."
    list_value = List()
    list_value.extend([1, 2, 3])
    pk = submit(ListEcho, list=list_value).pk
    expected_results_workchains[pk] = list_value

    print "Submitting a WorkChain which contains a workfunction."
    value = Str('workfunction test string')
    pk = submit(WorkFunctionRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = value

    print "Submitting a WorkChain which contains an InlineCalculation."
    value = Str('test_string')
    pk = submit(InlineCalcRunnerWorkChain, input=value).pk
    expected_results_workchains[pk] = value

    calculation_pks = sorted(expected_results_calculations.keys())
    workchains_pks = sorted(expected_results_workchains.keys())
    pks = calculation_pks + workchains_pks

    print "Wating for end of execution..."
    start_time = time.time()
    exited_with_timeout = True
    while time.time() - start_time < timeout_secs:
        time.sleep(15)  # Wait a few seconds

        # Print some debug info, both for debugging reasons and to avoid
        # that the test machine is shut down because there is no output

        print "#" * 78
        print "####### TIME ELAPSED: {} s".format(time.time() - start_time)
        print "#" * 78
        print "Output of 'verdi calculation list -a':"
        try:
            print subprocess.check_output(
                ["verdi", "calculation", "list", "-a"],
                stderr=subprocess.STDOUT,
            )
        except subprocess.CalledProcessError as e:
            print "Note: the command failed, message: {}".format(e.message)

        print "Output of 'verdi work list':"
        try:
            print subprocess.check_output(
                ['verdi', 'work', 'list', '-a', '-p1'],
                stderr=subprocess.STDOUT,
            )
        except subprocess.CalledProcessError as e:
            print "Note: the command failed, message: {}".format(e.message)

        print "Output of 'verdi daemon status':"
        try:
            print subprocess.check_output(
                ["verdi", "daemon", "status"],
                stderr=subprocess.STDOUT,
            )
        except subprocess.CalledProcessError as e:
            print "Note: the command failed, message: {}".format(e.message)

        if jobs_have_finished(pks):
            print "Calculation terminated its execution"
            exited_with_timeout = False
            break

    if exited_with_timeout:
        print_daemon_log()
        print ""
        print "Timeout!! Calculation did not complete after {} seconds".format(
            timeout_secs)
        sys.exit(2)
    else:
        # create cached calculations -- these should be FINISHED immediately
        cached_calcs = []
        for counter in range(1, number_calculations + 1):
            calc, expected_result = create_cache_calc(code=code,
                                                      counter=counter,
                                                      inputval=counter)
            cached_calcs.append(calc)
            expected_results_calculations[calc.pk] = expected_result
        # new style cached calculations, with 'run'
        with enable_caching():
            for counter in range(1, number_calculations + 1):
                calc, expected_result = run_calculation(code=code,
                                                        counter=counter,
                                                        inputval=counter)
                cached_calcs.append(calc)
                expected_results_calculations[calc.pk] = expected_result

        if (validate_calculations(expected_results_calculations)
                and validate_workchains(expected_results_workchains)
                and validate_cached(cached_calcs)):
            print_daemon_log()
            print ""
            print "OK, all calculations have the expected parsed result"
            sys.exit(0)
        else:
            print_daemon_log()
            print ""
            print "ERROR! Some return values are different from the expected value"
            sys.exit(3)
Esempio n. 15
0
def launch(code, structure, pseudo_family, kpoints, max_num_machines,
           max_wallclock_seconds, daemon, ecutwfc, ecutrho, hubbard_u,
           hubbard_v, hubbard_file_pk, starting_magnetization, smearing, mode):
    """
    Run a PwCalculation for a given input structure
    """
    from aiida.orm.data.parameter import ParameterData
    from aiida.orm.data.upf import get_pseudos_from_structure
    from aiida.orm.utils import CalculationFactory
    from aiida.work.launch import run_get_node, submit
    from aiida_quantumespresso.utils.resources import get_default_options

    PwCalculation = CalculationFactory('quantumespresso.pw')

    parameters = {
        'CONTROL': {
            'calculation': mode,
        },
        'SYSTEM': {
            'ecutwfc': ecutwfc,
            'ecutrho': ecutrho,
        },
    }

    try:
        hubbard_file = validate.validate_hubbard_parameters(
            structure, parameters, hubbard_u, hubbard_v, hubbard_file_pk)
    except ValueError as exception:
        raise click.BadParameter(exception.message)

    try:
        validate.validate_starting_magnetization(structure, parameters,
                                                 starting_magnetization)
    except ValueError as exception:
        raise click.BadParameter(exception.message)

    try:
        validate.validate_smearing(parameters, smearing)
    except ValueError as exception:
        raise click.BadParameter(exception.message)

    inputs = {
        'code': code,
        'structure': structure,
        'pseudo': get_pseudos_from_structure(structure, pseudo_family),
        'kpoints': kpoints,
        'parameters': ParameterData(dict=parameters),
        'settings': ParameterData(dict={}),
        'options': get_default_options(max_num_machines,
                                       max_wallclock_seconds),
    }

    if hubbard_file:
        inputs['hubbard_file'] = hubbard_file

    if daemon:
        calculation = submit(PwCalculation, **inputs)
        click.echo('Submitted {}<{}> to the daemon'.format(
            PwCalculation.__name__, calculation.pk))
    else:
        click.echo(
            'Running a pw.x calculation in the {} mode... '.format(mode))
        results, calculation = run_get_node(PwCalculation, **inputs)
        click.echo('PwCalculation<{}> terminated with state: {}'.format(
            calculation.pk, calculation.get_state()))
        click.echo('\n{link:25s} {node}'.format(link='Output link',
                                                node='Node pk and type'))
        click.echo('{s}'.format(s='-' * 60))
        for link, node in sorted(calculation.get_outputs(also_labels=True)):
            click.echo('{:25s} <{}> {}'.format(link, node.pk,
                                               node.__class__.__name__))
from aiida.orm.data.int import Int
from aiida.work.launch import submit
from aiida.work.workchain import WorkChain


class AddAndMultiplyWorkChain(WorkChain):
    ...


builder = AddAndMultiplyWorkChain.get_builder()
builder.a = Int(1)
builder.b = Int(2)
builder.c = Int(3)

node = submit(builder)