Пример #1
0
 def inner(pid, timeout=1):
     from aiida.orm import load_node
     calc = load_node(pid)
     while not calc.is_terminated:
         time.sleep(timeout)
Пример #2
0
def runner(computer_name, test_set, group_name, potcar_family, dry_run,
           experiment):
    from aiida.orm import Code, Group, load_node
    from aiida.work import submit

    config = {}
    run_info_json = py_path.local('./run_info.json')
    cutoff = 'default'
    if experiment:
        config = read_experiment_yaml(experiment)
        if not computer_name:
            computer_name = config['computer']
        if not group_name:
            group_name = config['group_name']
        if not potcar_family:
            potcar_family = config['potcar_family']
        if 'outfile' in config:
            run_info_json = py_path.local(experiment).dirpath().join(
                config['outfile'])
        test_set = test_set or config.get('test_set', 'perturbed')
        cutoff = config.get('cutoff', 'default')

    cutoff_factor = 1
    if cutoff != 'default':
        cutoff_factor = int(cutoff)

    if not dry_run:
        run_info_json.ensure()
        run_info = json.loads(run_info_json.read()
                              or '{{ "{}": {{ }} }}'.format(computer_name))
    else:
        click.echo('run_info file would be created at {}'.format(
            run_info_json.strpath))

    vasp_proc = calc_cls('vasp.vasp').process()
    inputs = vasp_proc.get_inputs_template()

    computer.set_options(computer=computer_name,
                         options_template=inputs._options)
    inputs.code = Code.get_from_string('vasp@{}'.format(computer_name))
    inputs.settings = data_cls('parameter')(dict=TEST_SETTINGS)

    structures_group_name = PERTURBED_SET_GROUPNAME
    if test_set == 'non_perturbed':
        structures_group_name = UNPERTURBED_SET_GROUPNAME
    structures_group = Group.get(name=structures_group_name)

    if not dry_run:
        calc_group, created = Group.get_or_create(name=group_name)
    else:
        created = not bool(Group.query(name=group_name))
    calc_group_msg = 'Appending to {new_or_not} group {name}.'
    new_or_not = 'new' if created else 'existing'
    click.echo(calc_group_msg.format(new_or_not=new_or_not, name=group_name))

    ## limit structures if given in experiment yaml
    structures = list(structures_group.nodes)
    only_formulae = config.get('only_formulae', None)
    if only_formulae:
        structures = [
            structure for structure in structures
            if structure.get_formula() in only_formulae
        ]

    potcar_map = scf_potcar.POTCAR_MAP

    for structure in structures:

        inputs.structure = structure
        kpoints = data_cls('array.kpoints')()
        kpoints.set_cell_from_structure(structure)
        kpoints.set_kpoints_mesh_from_density(0.15, [0] * 3)
        inputs.kpoints = kpoints

        inputs.potential = data_cls('vasp.potcar').get_potcars_from_structure(
            structure=structure, family_name=potcar_family, mapping=potcar_map)

        ispin, magmom = magnetic_info(structure, potcar_family, potcar_map)
        incar_overrides = {}
        if ispin == 1:
            magnetism_string = "non-spin-polarized"
        elif ispin == 2:
            magnetism_string = "collinear-spin"
            incar_overrides['ispin'] = ispin
        else:
            raise Exception(
                "WTF"
            )  # This is not how you do non-collinear calcs! Set noncolin = True instead
        if magmom:
            incar_overrides['magmom'] = magmom

        if cutoff_factor != 1:
            default_enmax = cutoff_from_structure(structure=structure,
                                                  potcar_family=potcar_family,
                                                  mapping=potcar_map)
            incar_overrides['enmax'] = cutoff_factor * default_enmax

        inputs.parameters = scf_incar.get_scf_incar(inputs=inputs,
                                                    overrides=incar_overrides)

        cutoff_msg = 'default'
        if cutoff_factor != 1:
            cutoff_msg = 'cutoff factor: {}'.format(cutoff_factor)

        if not dry_run:
            running_info = submit(vasp_proc, **inputs)
            running_calc = load_node(running_info.pid)
            running_calc.set_extra('magnetism', magnetism_string)
            running_calc.set_extra('cutoff', cutoff_msg)
            calc_group.add_nodes(running_calc)
            run_info[computer_name][inputs.structure.pk] = running_calc.pk
        else:
            click.echo('not submitting {}'.format(structure.get_formula()))
            from pprint import pformat
            click.echo(pformat({k: v for k, v in inputs.items()}))

    if not dry_run:
        with run_info_json.open('w') as run_info_fo:
            json.dump(run_info, run_info_fo)
 def test_data_loaded(self):
     """
     Check that the data node is indeed in the DB when calling load_node
     """
     from aiida import orm
     self.assertEqual(orm.load_node(self.data_pk).uuid, self.data.uuid)
Пример #4
0
    def test_pause_play_kill(self):
        """
        Test the pause/play/kill commands
        """
        # pylint: disable=no-member
        from aiida.orm import load_node

        calc = self.runner.submit(test_processes.WaitProcess)
        start_time = time.time()
        while calc.process_state is not plumpy.ProcessState.WAITING:
            if time.time() - start_time >= self.TEST_TIMEOUT:
                self.fail(
                    'Timed out waiting for process to enter waiting state')

        # Make sure that calling any command on a non-existing process id will not except but print an error
        # To simulate a process without a corresponding task, we simply create a node and store it. This node will not
        # have an associated task at RabbitMQ, but it will be a valid `ProcessNode` so it will pass the initial
        # filtering of the `verdi process` commands
        orphaned_node = WorkFunctionNode().store()
        non_existing_process_id = str(orphaned_node.pk)
        for command in [
                cmd_process.process_pause, cmd_process.process_play,
                cmd_process.process_kill
        ]:
            result = self.cli_runner.invoke(command, [non_existing_process_id])
            self.assertClickResultNoException(result)
            self.assertIn('Error:', result.output)

        self.assertFalse(calc.paused)
        result = self.cli_runner.invoke(cmd_process.process_pause,
                                        [str(calc.pk)])
        self.assertIsNone(result.exception, result.output)

        # We need to make sure that the process is picked up by the daemon and put in the Waiting state before we start
        # running the CLI commands, so we add a broadcast subscriber for the state change, which when hit will set the
        # future to True. This will be our signal that we can start testing
        waiting_future = Future()
        filters = kiwipy.BroadcastFilter(
            lambda *args, **kwargs: waiting_future.set_result(True),
            sender=calc.pk,
            subject='state_changed.*.waiting')
        self.runner.communicator.add_broadcast_subscriber(filters)

        # The process may already have been picked up by the daemon and put in the waiting state, before the subscriber
        # got the chance to attach itself, making it have missed the broadcast. That's why check if the state is already
        # waiting, and if not, we run the loop of the runner to start waiting for the broadcast message. To make sure
        # that we have the latest state of the node as it is in the database, we force refresh it by reloading it.
        calc = load_node(calc.pk)
        if calc.process_state != plumpy.ProcessState.WAITING:
            self.runner.loop.run_sync(lambda: with_timeout(waiting_future))

        # Here we now that the process is with the daemon runner and in the waiting state so we can starting running
        # the `verdi process` commands that we want to test
        result = self.cli_runner.invoke(cmd_process.process_pause,
                                        ['--wait', str(calc.pk)])
        self.assertIsNone(result.exception, result.output)
        self.assertTrue(calc.paused)

        result = self.cli_runner.invoke(cmd_process.process_play,
                                        ['--wait', str(calc.pk)])
        self.assertIsNone(result.exception, result.output)
        self.assertFalse(calc.paused)

        result = self.cli_runner.invoke(cmd_process.process_kill,
                                        ['--wait', str(calc.pk)])
        self.assertIsNone(result.exception, result.output)
        self.assertTrue(calc.is_terminated)
        self.assertTrue(calc.is_killed)
def launch(lattice_size, matrix_element, lattice_and_surface,
           customstructure_node,
           periodic_xrepeats, periodic_yrepeats, periodic_zrepeats,
           displacement_x, displacement_y, special_pointsonly,
           primitive, solute_elements, maxsolute_layer, testsolute_layer,
           refsolute, structure_group_label, structure_group_description,
           dryrun):
    """
    Script for creating stacking fault structures for a given size and matrix element. Generates
    a set of distorted structures using the 'tilted cell method', i.e. by adding fractional
    increments of the 'x' and 'y' cell vectors to the the 'z', vector.
    """
    STABLE_STACKING_NAME = 'stable_stacking'
    if not dryrun:
        structure_group = Group.objects.get_or_create(
                             label=structure_group_label, description=structure_group_description)[0]
    else:
        structure_group = None



    extras = {}
    if lattice_and_surface:
        if lattice_size is None:
           raise Exception("Must specifiy a lattice_size if using lattice_and_surface")
        if matrix_element is None:
           raise Exception("Must specifiy a matrix_element if using lattice_and_surface")
        lattice_size = float(lattice_size)
        lattice_type, surface_plane = lattice_and_surface.split('_')
        surface_plane = "{"+str(surface_plane)+"}"
        orthogonal = not primitive
        extras = {
            'lattice_size':lattice_size,
            'lattice_type':lattice_type,
            'surface_plane':surface_plane,
            'matrix_element':matrix_element,
                      }
    special_points = {'undistorted':[0,0]}
    if lattice_and_surface == "FCC_111":
       xrepeats = periodic_xrepeats
       zrepeats = 3*periodic_zrepeats
       extras['z_direction']  = '<111>'
       if orthogonal:
           yrepeats = 2*periodic_yrepeats
           extras['x_direction']  = '<112>'
           extras['y_direction']  = '<110>'
           extras['orthogonal'] = 'True'
           special_points[STABLE_STACKING_NAME] = [0, 2./3.]
       else:
           yrepeats = periodic_yrepeats
           extras['x_direction']  = '<110>'
           extras['y_direction']  = '<110>'
           extras['orthogonal'] = 'False'
           special_points[STABLE_STACKING_NAME] = [1./3., 1./3.]
       undistorted_structure = ase.build.fcc111(
                                          matrix_element,
                                          [xrepeats,yrepeats,zrepeats],
                                          orthogonal=orthogonal,
                                          a=lattice_size,
                                          periodic=True,
                                          )
    elif customstructure_node:
        custom_structure = load_node(customstructure_node)
        undistorted_structure = custom_structure.get_ase()
        extras = custom_structure.extras
        if '_aiida_hash' in extras:
            del extras['_aiida_hash']
        extras['inputstructure_uuid'] = custom_structure.uuid
        #Ensuring that the structure has all the required labels
        if 'label' not in extras:
            print(("WARNING: label not found in {} extras".format(custom_structure)))
        if 'x_direction' not in extras:
            print(("WARNING: x_direction not found in {} extras".format(custom_structure)))
        if 'y_direction' not in extras:
            print(("WARNING: y_direction not found in {} extras".format(custom_structure)))
        if 'surface_plane' not in extras:
            print(("WARNING: surface_plane not found in {} extras".format(custom_structure)))
        undistorted_structure =  undistorted_structure.repeat(
                                   [periodic_xrepeats, periodic_yrepeats, periodic_zrepeats]
                                                             )
    else:
       raise Exception("Could not process lattice_and_surface: {}".format(lattice_and_surface))


    #Add extras common to specified & custom lattice
    extras['periodic_xrepeats'] = periodic_xrepeats
    extras['periodic_yrepeats'] = periodic_yrepeats
    extras['periodic_zrepeats'] = periodic_zrepeats


    undistorted_structure.pbc = [True, True, True] # DFT structures always periodic
    a1 = undistorted_structure.get_cell()[0]/float(periodic_xrepeats)
    a2 = undistorted_structure.get_cell()[1]/float(periodic_yrepeats)
    a3 = undistorted_structure.get_cell()[2]/float(periodic_zrepeats)

    dispx_array = get_displacements_array(displacement_x)
    dispy_array = get_displacements_array(displacement_y)
    displacements = [[d_x, d_y] for d_x in dispx_array for d_y in dispy_array]
    special_pointnames = []

    if special_pointsonly:
        displacements = [] # overide any user displacements
        for sp_name in special_points:
            d_x, d_y = special_points[sp_name]
            displacements.append([d_x, d_y])
            special_pointnames.append(sp_name)

    if solute_elements:
        displacements = [] # overide any user displacements
        if STABLE_STACKING_NAME not in special_points:
            raise Exception("{} has no stable_stacking structure defined "
                            "".format(lattice_and_surface))
        if refsolute:
            d_x, d_y = special_points['undistorted']
            special_pointnames.append('undistorted')
        else:
            d_x, d_y = special_points[STABLE_STACKING_NAME]
            special_pointnames.append(STABLE_STACKING_NAME)
        displacements.append([d_x, d_y])

    for displacement in displacements:
        d_x, d_y = displacement
        extras['displacement_x'] = d_x
        extras['displacement_y'] = d_y
        if special_pointsonly:
            extras['special_point'] = special_pointnames.pop(0)
        distorted_structure = undistorted_structure.copy()
        distorted_structure.cell[2] += a1*d_x
        distorted_structure.cell[2] += a2*d_y
        store_asestructure(distorted_structure, extras, structure_group, dryrun)

    solute_elements = prep_elementlist(solute_elements)
    for solute_element in solute_elements:
        extras['sol1_element'] = solute_element
        layer_frame = get_layer_frame(distorted_structure, (0,0,1))
        layer_frame = layer_frame.drop_duplicates("layer_index").reset_index()
        solute_layers = list(range(int(len(layer_frame)/2)))
        if refsolute:
            solute_layers = [0]
        if testsolute_layer:
            solute_layers = [int(len(layer_frame)/2)-1]
        for i in solute_layers:
            solute_structure = distorted_structure.copy()
            solute_index = int(layer_frame.loc[i]['structure_index'])
            solute_structure[solute_index].symbol = solute_element
            extras['sol1_index'] = solute_index
            extras['sol1sf_distance'] = layer_frame.loc[i]['layer_distance']
            extras['sol1layer_index'] = int(layer_frame.loc[i]['layer_distance'])
            store_asestructure(solute_structure, extras, structure_group, dryrun)
            if maxsolute_layer and i >= int(maxsolute_layer):
                break
Пример #6
0

        
Пример #7
0
#calcs_pks = [1464, 1462, 1399, 1403]#, 1059]#, 1414
####
'''
if not calcs_pks:
    try:
        for arg in sys.argv[1:]:
            calc_t = arg
            calcs_pks.append(int(calc_t))
    except:
        pass
#####
'''

# check if calculation pks belong to successful fleur calculations
for pk in calcs_pks:
    calc = load_node(pk)
    if (not isinstance(calc, FleurCalc)):
        raise ValueError(
            "Calculation with pk {} must be a FleurCalculation".format(pk))
    if calc.get_state() != 'FINISHED':
        raise ValueError(
            "Calculation with pk {} must be in state FINISHED".format(pk))

parser_info = {'parser_warnings': [], 'unparsed': []}

### call
test_outxmlfiles = [
    './test_outxml/outBeCr.xml', './test_outxml/out.xml',
    './test_outxml/outCuF.xml', './test_outxml/outFe.xml',
    './test_outxml/outHg.xml', './test_outxml/outO.xml'
]
Пример #8
0
    def test_autogroup_filter_class(self):  # pylint: disable=too-many-locals
        """Check if the autogroup is properly generated but filtered classes are skipped."""
        from aiida.orm import Code, QueryBuilder, Node, AutoGroup, load_node

        script_content = textwrap.dedent("""\
            import sys
            from aiida.orm import Computer, Int, ArrayData, KpointsData, CalculationNode, WorkflowNode
            from aiida.plugins import CalculationFactory
            from aiida.engine import run_get_node
            ArithmeticAdd = CalculationFactory('arithmetic.add')

            computer = Computer(
                label='localhost-example-{}'.format(sys.argv[1]),
                hostname='localhost',
                description='my computer',
                transport_type='local',
                scheduler_type='direct',
                workdir='/tmp'
            ).store()
            computer.configure()

            code = Code(
                input_plugin_name='arithmetic.add',
                remote_computer_exec=[computer, '/bin/true']).store()
            inputs = {
                'x': Int(1),
                'y': Int(2),
                'code': code,
                'metadata': {
                    'options': {
                        'resources': {
                            'num_machines': 1,
                            'num_mpiprocs_per_machine': 1
                        }
                    }
                }
            }

            node1 = KpointsData().store()
            node2 = ArrayData().store()
            node3 = Int(3).store()
            node4 = CalculationNode().store()
            node5 = WorkflowNode().store()
            _, node6 = run_get_node(ArithmeticAdd, **inputs)
            print(node1.pk)
            print(node2.pk)
            print(node3.pk)
            print(node4.pk)
            print(node5.pk)
            print(node6.pk)
            """)

        Code()
        for idx, (
                flags,
                kptdata_in_autogroup,
                arraydata_in_autogroup,
                int_in_autogroup,
                calc_in_autogroup,
                wf_in_autogroup,
                calcarithmetic_in_autogroup,
        ) in enumerate([
            [['--exclude', 'aiida.data:array.kpoints'], False, True, True,
             True, True, True],
                # Check if % works anywhere - both 'int' and 'array.kpoints' contain an 'i'
            [['--exclude', 'aiida.data:%i%'], False, True, False, True, True,
             True],
            [['--exclude', 'aiida.data:int'], True, True, False, True, True,
             True],
            [['--exclude', 'aiida.data:%'], False, False, False, True, True,
             True],
            [['--exclude', 'aiida.data:array', 'aiida.data:array.%'], False,
             False, True, True, True, True],
            [[
                '--exclude', 'aiida.data:array', 'aiida.data:array.%',
                'aiida.data:int'
            ], False, False, False, True, True, True],
            [['--exclude', 'aiida.calculations:arithmetic.add'], True, True,
             True, True, True, False],
            [
                ['--include', 'aiida.node:process.calculation'
                 ],  # Base type, no specific plugin
                False,
                False,
                False,
                True,
                False,
                False
            ],
            [
                ['--include', 'aiida.node:process.workflow'
                 ],  # Base type, no specific plugin
                False,
                False,
                False,
                False,
                True,
                False
            ],
            [[], True, True, True, True, True, True],
        ]):
            with tempfile.NamedTemporaryFile(mode='w+') as fhandle:
                fhandle.write(script_content)
                fhandle.flush()

                options = ['--auto-group'] + flags + [
                    '--', fhandle.name, str(idx)
                ]
                result = self.cli_runner.invoke(cmd_run.run, options)
                self.assertClickResultNoException(result)

                pk1_str, pk2_str, pk3_str, pk4_str, pk5_str, pk6_str = result.output.split(
                )
                pk1 = int(pk1_str)
                pk2 = int(pk2_str)
                pk3 = int(pk3_str)
                pk4 = int(pk4_str)
                pk5 = int(pk5_str)
                pk6 = int(pk6_str)
                _ = load_node(pk1)  # Check if the node can be loaded
                _ = load_node(pk2)  # Check if the node can be loaded
                _ = load_node(pk3)  # Check if the node can be loaded
                _ = load_node(pk4)  # Check if the node can be loaded
                _ = load_node(pk5)  # Check if the node can be loaded
                _ = load_node(pk6)  # Check if the node can be loaded

                queryb = QueryBuilder().append(Node,
                                               filters={'id': pk1},
                                               tag='node')
                queryb.append(AutoGroup, with_node='node', project='*')
                all_auto_groups_kptdata = queryb.all()

                queryb = QueryBuilder().append(Node,
                                               filters={'id': pk2},
                                               tag='node')
                queryb.append(AutoGroup, with_node='node', project='*')
                all_auto_groups_arraydata = queryb.all()

                queryb = QueryBuilder().append(Node,
                                               filters={'id': pk3},
                                               tag='node')
                queryb.append(AutoGroup, with_node='node', project='*')
                all_auto_groups_int = queryb.all()

                queryb = QueryBuilder().append(Node,
                                               filters={'id': pk4},
                                               tag='node')
                queryb.append(AutoGroup, with_node='node', project='*')
                all_auto_groups_calc = queryb.all()

                queryb = QueryBuilder().append(Node,
                                               filters={'id': pk5},
                                               tag='node')
                queryb.append(AutoGroup, with_node='node', project='*')
                all_auto_groups_wf = queryb.all()

                queryb = QueryBuilder().append(Node,
                                               filters={'id': pk6},
                                               tag='node')
                queryb.append(AutoGroup, with_node='node', project='*')
                all_auto_groups_calcarithmetic = queryb.all()

                self.assertEqual(
                    len(all_auto_groups_kptdata),
                    1 if kptdata_in_autogroup else 0,
                    'Wrong number of nodes in autogroup associated with the KpointsData node '
                    "just created with flags '{}'".format(' '.join(flags)))
                self.assertEqual(
                    len(all_auto_groups_arraydata),
                    1 if arraydata_in_autogroup else 0,
                    'Wrong number of nodes in autogroup associated with the ArrayData node '
                    "just created with flags '{}'".format(' '.join(flags)))
                self.assertEqual(
                    len(all_auto_groups_int), 1 if int_in_autogroup else 0,
                    'Wrong number of nodes in autogroup associated with the Int node '
                    "just created with flags '{}'".format(' '.join(flags)))
                self.assertEqual(
                    len(all_auto_groups_calc), 1 if calc_in_autogroup else 0,
                    'Wrong number of nodes in autogroup associated with the CalculationNode '
                    "just created with flags '{}'".format(' '.join(flags)))
                self.assertEqual(
                    len(all_auto_groups_wf), 1 if wf_in_autogroup else 0,
                    'Wrong number of nodes in autogroup associated with the WorkflowNode '
                    "just created with flags '{}'".format(' '.join(flags)))
                self.assertEqual(
                    len(all_auto_groups_calcarithmetic),
                    1 if calcarithmetic_in_autogroup else 0,
                    'Wrong number of nodes in autogroup associated with the ArithmeticAdd CalcJobNode '
                    "just created with flags '{}'".format(' '.join(flags)))
Пример #9
0
                                               [0.0, 1.0, 0.0],
                                               [0.0, 0.0, 1.0]],
                                 'distance': 0.01,
                                 'mesh': [40, 40, 40],
                                 'symmetry_precision': 1e-5}
                          )

calc = code.new_calc(max_wallclock_seconds=3600,
                     resources={"num_machines": 1,
                                "parallel_env":"localmpi",
                                "tot_num_mpiprocs": 6})


calc.label = "test phonopy calculation"
calc.description = "A much longer description"

calc.use_structure(s)
calc.use_code(code)
calc.use_parameters(parameters)
calc.use_data_sets(load_node(23913))

if False:
    subfolder, script_filename = calc.submit_test()
    print "Test_submit for calculation (uuid='{}')".format(calc.uuid)
    print "Submit file in {}".format(os.path.join(
        os.path.relpath(subfolder.abspath),
        script_filename))
else:
    calc.store_all()
    print "created calculation with PK={}".format(calc.pk)
    calc.submit()
Пример #10
0
    def test_load_nodes(self):
        """Test for load_node() function."""
        from aiida.orm import load_node
        from aiida.backends.sqlalchemy import get_scoped_session

        a_obj = Data()
        a_obj.store()

        self.assertEqual(a_obj.pk, load_node(identifier=a_obj.pk).pk)
        self.assertEqual(a_obj.pk, load_node(identifier=a_obj.uuid).pk)
        self.assertEqual(a_obj.pk, load_node(pk=a_obj.pk).pk)
        self.assertEqual(a_obj.pk, load_node(uuid=a_obj.uuid).pk)

        session = get_scoped_session()

        try:
            session.begin_nested()
            with self.assertRaises(ValueError):
                load_node(identifier=a_obj.pk, pk=a_obj.pk)
        finally:
            session.rollback()

        try:
            session.begin_nested()
            with self.assertRaises(ValueError):
                load_node(pk=a_obj.pk, uuid=a_obj.uuid)
        finally:
            session.rollback()

        try:
            session.begin_nested()
            with self.assertRaises(TypeError):
                load_node(pk=a_obj.uuid)
        finally:
            session.rollback()

        try:
            session.begin_nested()
            with self.assertRaises(TypeError):
                load_node(uuid=a_obj.pk)
        finally:
            session.rollback()

        try:
            session.begin_nested()
            with self.assertRaises(ValueError):
                load_node()
        finally:
            session.rollback()
Пример #11
0
    def force_after_scf(self):
        '''
        This routine uses the force theorem to calculate energies dispersion of
        spin spirals. The force theorem calculations implemented into the FLEUR
        code. Hence a single iteration FLEUR input file having <forceTheorem> tag
        has to be created and submitted.
        '''
        calc = self.ctx.reference

        if not calc.is_finished_ok:
            message = ('The reference SCF calculation was not successful.')
            self.control_end_wc(message)
            return self.exit_codes.ERROR_REFERENCE_CALCULATION_FAILED

        try:
            outpara_node = calc.outputs.output_scf_wc_para
        except NotExistent:
            message = (
                'The reference SCF calculation failed, no scf output node.')
            self.control_end_wc(message)
            return self.exit_codes.ERROR_REFERENCE_CALCULATION_FAILED

        outpara = outpara_node.get_dict()

        if 'total_energy' not in outpara:
            message = (
                'Did not manage to extract float total energy from the reference SCF calculation.'
            )
            self.control_end_wc(message)
            return self.exit_codes.ERROR_REFERENCE_CALCULATION_FAILED

        self.report('INFO: run Force theorem calculations')

        status = self.change_fleurinp()
        if status:
            return status

        fleurin = self.ctx.fleurinp

        # Do not copy mixing_history* files from the parent
        settings = {'remove_from_remotecopy_list': ['mixing_history*']}

        # Retrieve remote folder of the reference calculation
        pk_last = 0
        scf_ref_node = load_node(calc.pk)
        for i in scf_ref_node.called:
            if i.node_type == 'process.workflow.workchain.WorkChainNode.':
                if i.process_class is FleurBaseWorkChain:
                    if pk_last < i.pk:
                        pk_last = i.pk
        try:
            remote = load_node(pk_last).outputs.remote_folder
        except AttributeError:
            message = (
                'Found no remote folder of the reference scf calculation.')
            self.control_end_wc(message)
            return self.exit_codes.ERROR_REFERENCE_CALCULATION_NOREMOTE

        label = 'Force_theorem_calculation'
        description = 'This is a force theorem calculation for all SQA'

        code = self.inputs.fleur
        options = self.ctx.options.copy()

        inputs_builder = get_inputs_fleur(
            code,
            remote,
            fleurin,
            options,
            label,
            description,
            settings,
            add_comp_para=self.ctx.wf_dict['add_comp_para'])
        future = self.submit(FleurBaseWorkChain, **inputs_builder)
        return ToContext(f_t=future)

dynaphopy_parameters ={'supercell': [[2, 0, 0],
                                     [0, 2, 0],
                                     [0, 0, 2]],
                       'primitive':  [[1.0, 0.0, 0.0],
                                      [0.0, 1.0, 0.0],
                                      [0.0, 0.0, 1.0]],
                       'mesh': [40, 40, 40],
                       'md_commensurate': True,
                       'temperature': 300}  # Temperature can be omitted (If ommited calculated from Max.-Boltz.)



from aiida.orm import load_node
force_constants = load_node(20569)  # Loads node that contains the harmonic force constants (Array data)

machine = {
    'num_machines': 1,
    'parallel_env': 'mpi*',
    'tot_num_mpiprocs': 16}


parameters_md = {'timestep': 0.001,
                 'temperature': 300,
                 'thermostat_variable': 0.5,
                 'equilibrium_steps': 100,
                 'total_steps': 2000,
                 'dump_rate': 1}

Пример #13
0

        
Пример #14
0

        
Пример #15
0
 def test_load(self):
     for t in self.all_types:
         node = t()
         node.store()
         loaded = load_node(node.pk)
         self.assertEqual(node, loaded)
Пример #16
0
# -*- coding: utf-8 -*-
from aiida_fleur.workflows.scf import FleurScfWorkChain
from aiida.orm import Dict, load_node

fleur_code = load_node(FLEUR_PK)
inpgen_code = load_node(INPGEN_PK)
structure = load_node(STRUCTURE_PK)

wf_para = Dict(
    dict={
        'fleur_runmax': 3,
        'density_converged': 0.001,
        'mode': 'density',
        'itmax_per_run': 30,
        'serial': False,
        'only_even_MPI': False
    })

options = Dict(
    dict={
        'resources': {
            'num_machines': 1,
            'num_mpiprocs_per_machine': 2
        },
        'withmpi': True,
        'max_wallclock_seconds': 600
    })

calc_parameters = Dict(dict={'kpt': {'div1': 2, 'div2': 2, 'div3': 2}})

SCF_workchain = submit(FleurScfWorkChain,
Пример #17
0
from aiida import orm, plugins, engine

builder = plugins.CalculationFactory("quantumespresso.pw").get_builder()

builder.code = orm.Code.get(label="qe-direct")
builder.structure = orm.load_node("5eb94d2d-2f58-4769-9f74-80c223791077")
builder.kpoints = orm.load_node("a63f51e4-4a86-4271-bb30-ad69c1e1a7e2")
builder.parameters = orm.load_node("ea01fb5e-9098-481c-b46e-57cfa60a77cc")
upf_family = orm.Group.get(label="SSSP/1.1/PBE/efficiency",
                           type_string="sssp.family")
builder.pseudos = upf_family.get_pseudos(builder.structure)
builder.metadata.options.withmpi = True
builder.metadata.options.resources = {"num_machines": 1, "tot_num_mpiprocs": 2}
builder.metadata.options.max_wallclock_seconds = 1800

calc = engine.submit(builder)
print("pk=", calc.pk)
Пример #18
0
from aiida.orm import load_node, load_workflow
from aiida.orm import Code, DataFactory

from matplotlib import pyplot as plt
import numpy as np
from scipy import stats

import sys

if len(sys.argv) < 2:
    print('use: python plot_phonon.py {pk_number}')
    exit()

# Set WorkChain PhononPhonopy PK number
################################
wc = load_node(int(sys.argv[1]))
################################


def kdeplot(fig,
            array_data,
            xmax=None,
            ymax=None,
            ymin=0,
            zmax=None,
            ylabel=True,
            title=None,
            cmap='rainbow',
            cutoff=None,
            density_ratio=0.1,
            fmax=None,
Пример #19
0
def upload_calculation(node, transport, calc_info, script_filename, inputs=None, dry_run=False):
    """Upload a `CalcJob` instance

    :param node: the `CalcJobNode`.
    :param transport: an already opened transport to use to submit the calculation.
    :param calc_info: the calculation info datastructure returned by `CalcJobNode.presubmit`
    :param script_filename: the job launch script returned by `CalcJobNode.presubmit`
    :return: tuple of ``calc_info`` and ``script_filename``
    """
    from logging import LoggerAdapter
    from tempfile import NamedTemporaryFile
    from aiida.orm import load_node, Code, RemoteData

    # If the calculation already has a `remote_folder`, simply return. The upload was apparently already completed
    # before, which can happen if the daemon is restarted and it shuts down after uploading but before getting the
    # chance to perform the state transition. Upon reloading this calculation, it will re-attempt the upload.
    link_label = 'remote_folder'
    if node.get_outgoing(RemoteData, link_label_filter=link_label).first():
        execlogger.warning('CalcJobNode<{}> already has a `{}` output: skipping upload'.format(node.pk, link_label))
        return calc_info, script_filename

    computer = node.computer

    codes_info = calc_info.codes_info
    input_codes = [load_node(_.code_uuid, sub_classes=(Code,)) for _ in codes_info]

    logger_extra = get_dblogger_extra(node)
    transport.set_logger_extra(logger_extra)
    logger = LoggerAdapter(logger=execlogger, extra=logger_extra)

    if not dry_run and node.has_cached_links():
        raise ValueError('Cannot submit calculation {} because it has cached input links! If you just want to test the '
                         'submission, set `metadata.dry_run` to True in the inputs.'.format(node.pk))

    folder = node._raw_input_folder

    # If we are performing a dry-run, the working directory should actually be a local folder that should already exist
    if dry_run:
        workdir = transport.getcwd()
    else:
        remote_user = transport.whoami()
        # TODO Doc: {username} field
        # TODO: if something is changed here, fix also 'verdi computer test'
        remote_working_directory = computer.get_workdir().format(username=remote_user)
        if not remote_working_directory.strip():
            raise exceptions.ConfigurationError(
                "[submission of calculation {}] No remote_working_directory configured for computer '{}'".format(
                    node.pk, computer.name))

        # If it already exists, no exception is raised
        try:
            transport.chdir(remote_working_directory)
        except IOError:
            logger.debug(
                '[submission of calculation {}] Unable to chdir in {}, trying to create it'.format(
                    node.pk, remote_working_directory))
            try:
                transport.makedirs(remote_working_directory)
                transport.chdir(remote_working_directory)
            except EnvironmentError as exc:
                raise exceptions.ConfigurationError(
                    '[submission of calculation {}] '
                    'Unable to create the remote directory {} on '
                    "computer '{}': {}".format(
                        node.pk, remote_working_directory, computer.name, exc))
        # Store remotely with sharding (here is where we choose
        # the folder structure of remote jobs; then I store this
        # in the calculation properties using _set_remote_dir
        # and I do not have to know the logic, but I just need to
        # read the absolute path from the calculation properties.
        transport.mkdir(calc_info.uuid[:2], ignore_existing=True)
        transport.chdir(calc_info.uuid[:2])
        transport.mkdir(calc_info.uuid[2:4], ignore_existing=True)
        transport.chdir(calc_info.uuid[2:4])

        try:
            # The final directory may already exist, most likely because this function was already executed once, but
            # failed and as a result was rescheduled by the eninge. In this case it would be fine to delete the folder
            # and create it from scratch, except that we cannot be sure that this the actual case. Therefore, to err on
            # the safe side, we move the folder to the lost+found directory before recreating the folder from scratch
            transport.mkdir(calc_info.uuid[4:])
        except OSError:
            # Move the existing directory to lost+found, log a warning and create a clean directory anyway
            path_existing = os.path.join(transport.getcwd(), calc_info.uuid[4:])
            path_lost_found = os.path.join(remote_working_directory, REMOTE_WORK_DIRECTORY_LOST_FOUND)
            path_target = os.path.join(path_lost_found, calc_info.uuid)
            logger.warning('tried to create path {} but it already exists, moving the entire folder to {}'.format(
                path_existing, path_target))

            # Make sure the lost+found directory exists, then copy the existing folder there and delete the original
            transport.mkdir(path_lost_found, ignore_existing=True)
            transport.copytree(path_existing, path_target)
            transport.rmtree(path_existing)

            # Now we can create a clean folder for this calculation
            transport.mkdir(calc_info.uuid[4:])
        finally:
            transport.chdir(calc_info.uuid[4:])

        # I store the workdir of the calculation for later file retrieval
        workdir = transport.getcwd()
        node.set_remote_workdir(workdir)

    # I first create the code files, so that the code can put
    # default files to be overwritten by the plugin itself.
    # Still, beware! The code file itself could be overwritten...
    # But I checked for this earlier.
    for code in input_codes:
        if code.is_local():
            # Note: this will possibly overwrite files
            for f in code.get_folder_list():
                transport.put(code.get_abs_path(f), f)
            transport.chmod(code.get_local_executable(), 0o755)  # rwxr-xr-x

    # In a dry_run, the working directory is the raw input folder, which will already contain these resources
    if not dry_run:
        for filename in folder.get_content_list():
            logger.debug('[submission of calculation {}] copying file/folder {}...'.format(node.pk, filename))
            transport.put(folder.get_abs_path(filename), filename)

    # local_copy_list is a list of tuples, each with (uuid, dest_rel_path)
    # NOTE: validation of these lists are done inside calculation.presubmit()
    local_copy_list = calc_info.local_copy_list or []
    remote_copy_list = calc_info.remote_copy_list or []
    remote_symlink_list = calc_info.remote_symlink_list or []

    for uuid, filename, target in local_copy_list:
        logger.debug('[submission of calculation {}] copying local file/folder to {}'.format(node.uuid, target))

        def find_data_node(inputs, uuid):
            """Find and return the node with the given UUID from a nested mapping of input nodes.

            :param inputs: (nested) mapping of nodes
            :param uuid: UUID of the node to find
            :return: instance of `Node` or `None` if not found
            """
            from collections import Mapping
            data_node = None

            for link_label, input_node in inputs.items():
                if isinstance(input_node, Mapping):
                    data_node = find_data_node(input_node, uuid)
                elif isinstance(input_node, Node) and input_node.uuid == uuid:
                    data_node = input_node
                if data_node is not None:
                    break

            return data_node

        try:
            data_node = load_node(uuid=uuid)
        except exceptions.NotExistent:
            data_node = find_data_node(inputs, uuid)

        if data_node is None:
            logger.warning('failed to load Node<{}> specified in the `local_copy_list`'.format(uuid))
        else:
            # Note, once #2579 is implemented, use the `node.open` method instead of the named temporary file in
            # combination with the new `Transport.put_object_from_filelike`
            # Since the content of the node could potentially be binary, we read the raw bytes and pass them on
            with NamedTemporaryFile(mode='wb+') as handle:
                handle.write(data_node.get_object_content(filename, mode='rb'))
                handle.flush()
                handle.seek(0)
                transport.put(handle.name, target)

    if dry_run:
        if remote_copy_list:
            with open(os.path.join(workdir, '_aiida_remote_copy_list.txt'), 'w') as handle:
                for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_copy_list:
                    handle.write('would have copied {} to {} in working directory on remote {}'.format(
                        remote_abs_path, dest_rel_path, computer.name))

        if remote_symlink_list:
            with open(os.path.join(workdir, '_aiida_remote_symlink_list.txt'), 'w') as handle:
                for remote_computer_uuid, remote_abs_path, dest_rel_path in remote_symlink_list:
                    handle.write('would have created symlinks from {} to {} in working directory on remote {}'.format(
                        remote_abs_path, dest_rel_path, computer.name))

    else:

        for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_copy_list:
            if remote_computer_uuid == computer.uuid:
                logger.debug('[submission of calculation {}] copying {} remotely, directly on the machine {}'.format(
                    node.pk, dest_rel_path, computer.name))
                try:
                    transport.copy(remote_abs_path, dest_rel_path)
                except (IOError, OSError):
                    logger.warning('[submission of calculation {}] Unable to copy remote resource from {} to {}! '
                                   'Stopping.'.format(node.pk, remote_abs_path, dest_rel_path))
                    raise
            else:
                raise NotImplementedError(
                    '[submission of calculation {}] Remote copy between two different machines is '
                    'not implemented yet'.format(node.pk))

        for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_symlink_list:
            if remote_computer_uuid == computer.uuid:
                logger.debug('[submission of calculation {}] copying {} remotely, directly on the machine {}'.format(
                    node.pk, dest_rel_path, computer.name))
                try:
                    transport.symlink(remote_abs_path, dest_rel_path)
                except (IOError, OSError):
                    logger.warning('[submission of calculation {}] Unable to create remote symlink from {} to {}! '
                                   'Stopping.'.format(node.pk, remote_abs_path, dest_rel_path))
                    raise
            else:
                raise IOError('It is not possible to create a symlink between two different machines for '
                              'calculation {}'.format(node.pk))

    if not dry_run:
        # Make sure that attaching the `remote_folder` with a link is the last thing we do. This gives the biggest
        # chance of making this method idempotent. That is to say, if a runner gets interrupted during this action, it
        # will simply retry the upload, unless we got here and managed to link it up, in which case we move to the next
        # task. Because in that case, the check for the existence of this link at the top of this function will exit
        # early from this command.
        remotedata = RemoteData(computer=computer, remote_path=workdir)
        remotedata.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
        remotedata.store()

    return calc_info, script_filename