예제 #1
0
    def get_io_tree(self, uuid_pattern):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.node import Node

        def get_node_shape(ntype):
            type = ntype.split(".")[0]

            # default and data node shape
            shape = "dot"

            if type == "calculation":
                shape = "square"
            elif type == "code":
                shape = "triangle"

            return shape

        # Check whether uuid_pattern identifies a unique node
        self._check_id_validity(uuid_pattern)

        qb = QueryBuilder()
        qb.append(Node, tag="main", project=["*"],
                  filters=self._id_filter)

        nodes = []
        edges = []
        nodeCount = 0

        if qb.count() > 0:
            mainNode = qb.first()[0]
            pk = mainNode.pk
            uuid = mainNode.uuid
            nodetype = mainNode.type
            display_type = nodetype.split('.')[-2]
            description = mainNode.get_desc()
            if description == '':
                description = mainNode.type.split('.')[-2]

            nodes.append({
                "id": nodeCount,
                "nodeid": pk,
                "nodeuuid": uuid,
                "nodetype": nodetype,
                "displaytype": display_type,
                "group": "mainNode",
                "description": description,
                "shape": get_node_shape(nodetype)
            })
        nodeCount += 1

        # get all inputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'],
                  filters=self._id_filter)
        qb.append(Node, tag="in", project=['*'], edge_project=['label'],
                  input_of='main')

        if qb.count() > 0:
            for input in qb.iterdict():
                node = input['in']['*']
                linktype = input['main--in']['label']
                pk = node.pk
                uuid = node.uuid
                nodetype = node.type
                display_type = nodetype.split('.')[-2]
                description = node.get_desc()
                if description == '':
                    description = node.type.split('.')[-2]

                nodes.append({
                    "id": nodeCount,
                    "nodeid": pk,
                    "nodeuuid": uuid,
                    "nodetype": nodetype,
                    "displaytype": display_type,
                    "group": "inputs",
                    "description": description,
                    "linktype": linktype,
                    "shape": get_node_shape(nodetype)
                })
                edges.append({
                    "from": nodeCount,
                    "to": 0,
                    "arrows": "to",
                    "color": {"inherit": 'from'},
                    "linktype": linktype,
                })
                nodeCount += 1

        # get all outputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'],
                  filters=self._id_filter)
        qb.append(Node, tag="out", project=['*'], edge_project=['label'],
                  output_of='main')
        if qb.count() > 0:
            for output in qb.iterdict():
                node = output['out']['*']
                linktype = output['main--out']['label']
                pk = node.pk
                uuid = node.uuid
                nodetype = node.type
                display_type = nodetype.split('.')[-2]
                description = node.get_desc()
                if description == '':
                    description = node.type.split('.')[-2]

                nodes.append({
                    "id": nodeCount,
                    "nodeid": pk,
                    "nodeuuid": uuid,
                    "nodetype": nodetype,
                    "displaytype": display_type,
                    "group": "outputs",
                    "description": description,
                    "linktype": linktype,
                    "shape": get_node_shape(nodetype)
                })
                edges.append({
                    "from": 0,
                    "to": nodeCount,
                    "arrows": "to",
                    "color": {"inherit": 'to'},
                    "linktype": linktype
                })
                nodeCount += 1

        return {"nodes": nodes, "edges": edges}
예제 #2
0
    def get_io_tree(self, uuid_pattern, tree_in_limit, tree_out_limit):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.node import Node

        def get_node_shape(ntype):
            type = ntype.split(".")[0]

            # default and data node shape
            shape = "dot"

            if type == "calculation":
                shape = "square"
            elif type == "code":
                shape = "triangle"

            return shape

        # Check whether uuid_pattern identifies a unique node
        self._check_id_validity(uuid_pattern)

        qb = QueryBuilder()
        qb.append(Node, tag="main", project=["*"], filters=self._id_filter)

        nodes = []
        edges = []
        nodeCount = 0

        if qb.count() > 0:
            mainNode = qb.first()[0]
            pk = mainNode.pk
            uuid = mainNode.uuid
            nodetype = mainNode.type
            nodelabel = mainNode.label
            display_type = nodetype.split('.')[-2]
            description = mainNode.get_desc()
            if description == '':
                description = mainNode.type.split('.')[-2]

            nodes.append({
                "id": nodeCount,
                "nodeid": pk,
                "nodeuuid": uuid,
                "nodetype": nodetype,
                "nodelabel": nodelabel,
                "displaytype": display_type,
                "group": "mainNode",
                "description": description,
                "shape": get_node_shape(nodetype)
            })
        nodeCount += 1

        # get all inputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'], filters=self._id_filter)
        qb.append(Node,
                  tag="in",
                  project=['*'],
                  edge_project=['label', 'type'],
                  input_of='main')
        if tree_in_limit is not None:
            qb.limit(tree_in_limit)

        input_node_pks = {}
        sent_no_of_incomings = qb.count()

        if sent_no_of_incomings > 0:
            for input in qb.iterdict():
                node = input['in']['*']
                pk = node.pk
                linklabel = input['main--in']['label']
                linktype = input['main--in']['type']

                # add node if it is not present
                if pk not in input_node_pks.keys():
                    input_node_pks[pk] = nodeCount
                    uuid = node.uuid
                    nodetype = node.type
                    nodelabel = node.label
                    display_type = nodetype.split('.')[-2]
                    description = node.get_desc()
                    if description == '':
                        description = node.type.split('.')[-2]

                    nodes.append({
                        "id": nodeCount,
                        "nodeid": pk,
                        "nodeuuid": uuid,
                        "nodetype": nodetype,
                        "nodelabel": nodelabel,
                        "displaytype": display_type,
                        "group": "inputs",
                        "description": description,
                        "linklabel": linklabel,
                        "linktype": linktype,
                        "shape": get_node_shape(nodetype)
                    })
                    nodeCount += 1

                from_edge = input_node_pks[pk]
                edges.append({
                    "from": from_edge,
                    "to": 0,
                    "arrows": "to",
                    "color": {
                        "inherit": 'from'
                    },
                    "label": linktype,
                })

        # get all outputs
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['*'], filters=self._id_filter)
        qb.append(Node,
                  tag="out",
                  project=['*'],
                  edge_project=['label', 'type'],
                  output_of='main')
        if tree_out_limit is not None:
            qb.limit(tree_out_limit)

        output_node_pks = {}
        sent_no_of_outgoings = qb.count()

        if sent_no_of_outgoings > 0:
            for output in qb.iterdict():
                node = output['out']['*']
                pk = node.pk
                linklabel = output['main--out']['label']
                linktype = output['main--out']['type']

                # add node if it is not present
                if pk not in output_node_pks.keys():
                    output_node_pks[pk] = nodeCount
                    uuid = node.uuid
                    nodetype = node.type
                    nodelabel = node.label
                    display_type = nodetype.split('.')[-2]
                    description = node.get_desc()
                    if description == '':
                        description = node.type.split('.')[-2]

                    nodes.append({
                        "id": nodeCount,
                        "nodeid": pk,
                        "nodeuuid": uuid,
                        "nodetype": nodetype,
                        "nodelabel": nodelabel,
                        "displaytype": display_type,
                        "group": "outputs",
                        "description": description,
                        "linklabel": linklabel,
                        "linktype": linktype,
                        "shape": get_node_shape(nodetype)
                    })
                    nodeCount += 1

                to_edge = output_node_pks[pk]
                edges.append({
                    "from": 0,
                    "to": to_edge,
                    "arrows": "to",
                    "color": {
                        "inherit": 'to'
                    },
                    "label": linktype
                })

        # count total no of nodes
        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['id'], filters=self._id_filter)
        qb.append(Node, tag="in", project=['id'], input_of='main')
        total_no_of_incomings = qb.count()

        qb = QueryBuilder()
        qb.append(Node, tag="main", project=['id'], filters=self._id_filter)
        qb.append(Node, tag="out", project=['id'], output_of='main')
        total_no_of_outgoings = qb.count()

        return {
            "nodes": nodes,
            "edges": edges,
            "total_no_of_incomings": total_no_of_incomings,
            "total_no_of_outgoings": total_no_of_outgoings,
            "sent_no_of_incomings": sent_no_of_incomings,
            "sent_no_of_outgoings": sent_no_of_outgoings
        }
예제 #3
0
    def test_same_computer_different_name_import(self):
        """
        This test checks that if the computer is re-imported with a different
        name to the same database, then the original computer will not be
        renamed. It also checks that the names were correctly imported (without
        any change since there is no computer name collision)
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Store a calculation
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({"num_machines": 1,
                                 "num_mpiprocs_per_machine": 1})
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            # Store locally the computer name
            comp1_name = unicode(self.computer.name)

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Rename the computer
            self.computer.set_name(comp1_name + "_updated")

            # Store a second calculation
            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({"num_machines": 2,
                                 "num_mpiprocs_per_machine": 2})
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(qb.count(), 0, "There should not be any computers"
                                            "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(qb.count(), 0, "There should not be any "
                                            "calculations in the database at "
                                            "this point.")

            # Import the first calculation
            import_data(filename1, silent=True)

            # Check that the calculation computer is imported correctly.
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'])
            self.assertEqual(qb.count(), 1, "Only one calculation should be "
                                            "found.")
            self.assertEqual(unicode(qb.first()[0]), calc1_label,
                             "The calculation label is not correct.")

            # Check that the referenced computer is imported correctly.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                                            "found.")
            self.assertEqual(unicode(qb.first()[0]), comp1_name,
                             "The computer name is not correct.")

            # Import the second calculation
            import_data(filename2, silent=True)

            # Check that the number of computers remains the same and its data
            # did not change.
            qb = QueryBuilder()
            qb.append(Computer, project=['name'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                                            "found.")
            self.assertEqual(unicode(qb.first()[0]), comp1_name,
                             "The computer name is not correct.")

        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
예제 #4
0
    def get_potcar_groups(cls, filter_elements=None, filter_symbols=None):
        """
        List all names of groups of type PotcarFamily, possibly with some filters.

        :param filter_elements: list of strings.
               If present, returns only the groups that contains one POTCAR for
               every element present in the list. Default=None, meaning that
               all families are returned. A single element can be passed as a string.
        :param filter_symbols: list of strings with symbols to filter for.
        """
        group_query = QueryBuilder()
        group_query.append(cls, tag='potcar_data')
        group_query.append(
            Group,
            group_of='potcar_data',
            tag='potcar_family',
            filters={'type': {
                '==': cls.potcar_family_type_string
            }},
            project='*')

        groups = [group_list[0] for group_list in group_query.all()]

        if filter_elements:
            for element in filter_elements:
                idx_has_element = []
                for i, group in enumerate(groups):
                    group_filters = {
                        'name': {
                            '==': group.name
                        },
                        'type': {
                            '==': cls.potcar_family_type_string
                        }
                    }
                    element_filters = {'attributes.element': {'==': element}}
                    elem_query = QueryBuilder()
                    elem_query.append(Group,
                                      tag='family',
                                      filters=group_filters)
                    elem_query.append(cls,
                                      tag='potcar',
                                      member_of='family',
                                      filters=element_filters)
                    if elem_query.count() > 0:
                        idx_has_element.append(i)
                groups = [
                    groups[i] for i in range(len(groups))
                    if i in idx_has_element
                ]

        if filter_symbols:
            for symbol in filter_symbols:
                idx_has_symbol = []
                for i, group in enumerate(groups):
                    group_filters = {
                        'name': {
                            '==': group.name
                        },
                        'type': {
                            '==': cls.potcar_family_type_string
                        }
                    }
                    symbol_filters = {'attributes.symbol': {'==': symbol}}
                    symbol_query = QueryBuilder()
                    symbol_query.append(Group,
                                        tag='family',
                                        filters=group_filters)
                    symbol_query.append(cls,
                                        tag='potcar',
                                        member_of='family',
                                        filters=symbol_filters)
                    if symbol_query.count() > 0:
                        idx_has_symbol.append(i)
                groups = [
                    groups[i] for i in range(len(groups))
                    if i in idx_has_symbol
                ]

        return groups
예제 #5
0
파일: query.py 프로젝트: asle85/aiida-core
    def test_simple_query_1(self):
        """
        Testing a simple query
        """
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm import Node, Data, Calculation
        from datetime import datetime
        from aiida.common.links import LinkType

        n1 = Data()
        n1.label = 'node1'
        n1._set_attr('foo', ['hello', 'goodbye'])
        n1.store()

        n2 = Calculation()
        n2.label = 'node2'
        n2._set_attr('foo', 1)
        n2.store()

        n3 = Data()
        n3.label = 'node3'
        n3._set_attr('foo', 1.0000)  # Stored as fval
        n3.store()

        n4 = Calculation()
        n4.label = 'node4'
        n4._set_attr('foo', 'bar')
        n4.store()

        n5 = Data()
        n5.label = 'node5'
        n5._set_attr('foo', None)
        n5.store()

        n2.add_link_from(n1, link_type=LinkType.INPUT)
        n3.add_link_from(n2, link_type=LinkType.CREATE)

        n4.add_link_from(n3, link_type=LinkType.INPUT)
        n5.add_link_from(n4, link_type=LinkType.CREATE)

        qb1 = QueryBuilder()
        qb1.append(Node, filters={'attributes.foo': 1.000})

        self.assertEqual(len(qb1.all()), 2)

        qb2 = QueryBuilder()
        qb2.append(Data)
        self.assertEqual(qb2.count(), 3)

        qb2 = QueryBuilder()
        qb2.append(type='data.Data.')
        self.assertEqual(qb2.count(), 3)

        qb3 = QueryBuilder()
        qb3.append(Node, project='label', tag='node1')
        qb3.append(Node, project='label', tag='node2')
        self.assertEqual(qb3.count(), 4)

        qb4 = QueryBuilder()
        qb4.append(Calculation, tag='node1')
        qb4.append(Data, tag='node2')
        self.assertEqual(qb4.count(), 2)

        qb5 = QueryBuilder()
        qb5.append(Data, tag='node1')
        qb5.append(Calculation, tag='node2')
        self.assertEqual(qb5.count(), 2)

        qb6 = QueryBuilder()
        qb6.append(Data, tag='node1')
        qb6.append(Data, tag='node2')
        self.assertEqual(qb6.count(), 0)
예제 #6
0
 def preprocess(self):
     qb = QueryBuilder()
     qb.append(StructureData, filters={'extras': {'!has_key': 'formula'}})
     for n in qb.all():  # iterall() would interfere with set_extra()
         formula = n[0].get_formula()
         n[0].set_extra("formula", formula)
예제 #7
0
                        structure += k + str(v)
                file_vtune = []

                for i in file_content[2:12]:
                    tmp = []
                    tmp.append(re.split("  +", i))
                    tmp[0][1] = float(tmp[0][1][:-1])
                    file_vtune.append(tmp[0][0:2])

                times.update({structure: file_vtune})
                #print "Vtune file is found at %s " % vtune_file
            else:
                print "Vtune profile file for calc %s is not available" % calc

qb = QueryBuilder()
qb.append(JobCalculation, tag="mycalculation", project=["*"])
qb.append(Group, filters={"name": args.group}, group_of="mycalculation")
calcs_list = qb.all()

scale_factor = reduce((lambda x, y: x * y), args.scale)
for calc in calcs_list:
    composition = calc[0].get_inputs_dict()['structure'].get_composition()
    structure = ''
    modified_structure = ''
    for k, v in composition.iteritems():
        structure += k + str(v * scale_factor)
    extra_type = calc[0].get_extra('type')
    system_type[extra_type].append(structure)
    if extra_type not in extra_types: extra_types.append(extra_type)

for i, j in enumerate(extra_types):
예제 #8
0
#for queries examplefrom tutorial 

from aiida.orm.querybuilder import QueryBuilder

qb=QueryBuilder()
qb.all()
qb.append(Node)
qb.all()
qb.count()

#enumerate the <pk> for each query key
for node, in qb.iterall():
	print node

#may need this line
StructureData = DataFactory("structure")
qb=QueryBuilder()
qb.append(StructureData)	#met le pk pour chaque structure si on met qb.all()
qb.all()

예제 #9
0
    def run(self, *args):
        load_dbenv()

        import argparse

        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm import Group, Node, Computer
        from aiida.orm.importexport import export, export_zip

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='Export data from the DB.')
        parser.add_argument('-c',
                            '--computers',
                            nargs='+',
                            type=int,
                            metavar="PK",
                            help="Export the given computers")
        parser.add_argument('-n',
                            '--nodes',
                            nargs='+',
                            type=int,
                            metavar="PK",
                            help="Export the given nodes")
        parser.add_argument('-g',
                            '--groups',
                            nargs='+',
                            metavar="GROUPNAME",
                            help="Export all nodes in the given group(s), "
                            "identified by name.",
                            type=str)
        parser.add_argument('-G',
                            '--group_pks',
                            nargs='+',
                            metavar="PK",
                            help="Export all nodes in the given group(s), "
                            "identified by pk.",
                            type=str)
        parser.add_argument('-P',
                            '--no-parents',
                            dest='no_parents',
                            action='store_true',
                            help="Store only the nodes that are explicitly "
                            "given, without exporting the parents")
        parser.set_defaults(no_parents=False)
        parser.add_argument('-O',
                            '--no-calc-outputs',
                            dest='no_calc_outputs',
                            action='store_true',
                            help="If a calculation is included in the list of "
                            "nodes to export, do not export its outputs")
        parser.set_defaults(no_calc_outputs=False)
        parser.add_argument('-y',
                            '--overwrite',
                            dest='overwrite',
                            action='store_true',
                            help="Overwrite the output file, if it exists")
        parser.set_defaults(overwrite=False)

        zipsubgroup = parser.add_mutually_exclusive_group()
        zipsubgroup.add_argument(
            '-z',
            '--zipfile-compressed',
            dest='zipfilec',
            action='store_true',
            help="Store as zip file (experimental, should be "
            "faster")
        zipsubgroup.add_argument('-Z',
                                 '--zipfile-uncompressed',
                                 dest='zipfileu',
                                 action='store_true',
                                 help="Store as uncompressed zip file "
                                 "(experimental, should be faster")
        parser.set_defaults(zipfilec=False)
        parser.set_defaults(zipfileu=False)

        parser.add_argument('output_file',
                            type=str,
                            help='The output file name for the export file')

        parsed_args = parser.parse_args(args)

        if parsed_args.nodes is None:
            node_id_set = set()
        else:
            node_id_set = set(parsed_args.nodes)

        group_dict = dict()

        if parsed_args.groups is not None:
            qb = QueryBuilder()
            qb.append(Group,
                      tag='group',
                      project=['*'],
                      filters={'name': {
                          'in': parsed_args.groups
                      }})
            qb.append(Node, tag='node', member_of='group', project=['id'])
            res = qb.dict()

            group_dict.update(
                {_['group']['*'].name: _['group']['*'].dbgroup
                 for _ in res})
            node_id_set.update([_['node']['id'] for _ in res])

        if parsed_args.group_pks is not None:
            qb = QueryBuilder()
            qb.append(Group,
                      tag='group',
                      project=['*'],
                      filters={'id': {
                          'in': parsed_args.group_pks
                      }})
            qb.append(Node, tag='node', member_of='group', project=['id'])
            res = qb.dict()

            group_dict.update(
                {_['group']['*'].name: _['group']['*'].dbgroup
                 for _ in res})
            node_id_set.update([_['node']['id'] for _ in res])

        # The db_groups that correspond to what was searched above
        dbgroups_list = group_dict.values()

        # Getting the nodes that correspond to the ids that were found above
        if len(node_id_set) > 0:
            qb = QueryBuilder()
            qb.append(Node,
                      tag='node',
                      project=['*'],
                      filters={'id': {
                          'in': node_id_set
                      }})
            node_list = [_[0] for _ in qb.all()]
        else:
            node_list = list()

        # Check if any of the nodes wasn't found in the database.
        missing_nodes = node_id_set.difference(_.id for _ in node_list)
        for id in missing_nodes:
            print >> sys.stderr, ("WARNING! Node with pk= {} "
                                  "not found, skipping.".format(id))

        # The dbnodes of the above node list
        dbnode_list = [_.dbnode for _ in node_list]

        if parsed_args.computers is not None:
            qb = QueryBuilder()
            qb.append(Computer,
                      tag='comp',
                      project=['*'],
                      filters={'id': {
                          'in': set(parsed_args.computers)
                      }})
            computer_list = [_[0] for _ in qb.all()]
            missing_computers = set(parsed_args.computers).difference(
                _.id for _ in computer_list)
            for id in missing_computers:
                print >> sys.stderr, ("WARNING! Computer with pk= {} "
                                      "not found, skipping.".format(id))
        else:
            computer_list = []

        # The dbcomputers of the above computer list
        dbcomputer_list = [_.dbcomputer for _ in computer_list]

        what_list = dbnode_list + dbcomputer_list + dbgroups_list

        export_function = export
        additional_kwargs = {}
        if parsed_args.zipfileu:
            export_function = export_zip
            additional_kwargs.update({"use_compression": False})
        elif parsed_args.zipfilec:
            export_function = export_zip
            additional_kwargs.update({"use_compression": True})
        try:
            export_function(what=what_list,
                            also_parents=not parsed_args.no_parents,
                            also_calc_outputs=not parsed_args.no_calc_outputs,
                            outfile=parsed_args.output_file,
                            overwrite=parsed_args.overwrite,
                            **additional_kwargs)
        except IOError as e:
            print >> sys.stderr, "IOError: {}".format(e.message)
            sys.exit(1)
from aiida.tools.importexport.dbexport import export  # Updated to AiiDA v1.3.0
from pipeline_pyrenemofs import TAG_KEY, GROUP_DIR

from aiida import load_profile
load_profile()

CIFS_DIR = "./cifs_cellopt/"
os.mkdir(CIFS_DIR)

mat_df = pd.read_csv('../pipeline_pyrenemofs/static/pynene-mofs-info.csv')
mat_list = list(mat_df['refcode'].values)

qb = QueryBuilder()
qb.append(CifData,
          filters={'label': {
              'in': mat_list
          }},
          tag='n',
          project='label')
qb.append(Group,
          with_node='n',
          filters={'label': {
              'like': GROUP_DIR + "%"
          }},
          tag='g')
qb.append(CifData,
          filters={'extras.{}'.format(TAG_KEY): 'opt_cif_ddec'},
          with_group='g',
          project='*')
qb.order_by({CifData: {'label': 'asc'}})

for q in qb.all():
예제 #11
0
    def computer_configure(self, *args):
        """
        Configure the authentication information for a given computer
        """
        if not is_dbenv_loaded():
            load_dbenv()

        import readline
        import inspect

        from django.core.exceptions import ObjectDoesNotExist

        from aiida.common.exceptions import (
            NotExistent, ValidationError)
        from aiida.backends.utils import get_automatic_user
        from aiida.common.utils import get_configured_user_email
        from aiida.backends.settings import BACKEND
        from aiida.backends.profile import BACKEND_SQLA, BACKEND_DJANGO

        import argparse

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='Configure a computer for a given AiiDA user.')
        # The default states are those that are shown if no option is given
        parser.add_argument('-u', '--user', type=str, metavar='EMAIL',
                            help="Configure the computer for the given AiiDA user (otherwise, configure the current default user)",
                            )
        parser.add_argument('computer', type=str,
                            help="The name of the computer that you want to configure")

        parsed_args = parser.parse_args(args)

        user_email = parsed_args.user
        computername = parsed_args.computer

        try:
            computer = self.get_computer(name=computername)
        except NotExistent:
            print >> sys.stderr, "No computer exists with name '{}'".format(
                computername)
            sys.exit(1)
        if user_email is None:
            user = get_automatic_user()
        else:
            from aiida.orm.querybuilder import QueryBuilder
            qb = QueryBuilder()
            qb.append(type="user", filters={'email': user_email})
            user = qb.first()
            if user is None:
                print >> sys.stderr, ("No user with email '{}' in the "
                                      "database.".format(user_email))
                sys.exit(1)

        if BACKEND == BACKEND_DJANGO:
            from aiida.backends.djsite.db.models import DbAuthInfo

            try:
                authinfo = DbAuthInfo.objects.get(
                    dbcomputer=computer.dbcomputer,
                    aiidauser=user)

                old_authparams = authinfo.get_auth_params()
            except ObjectDoesNotExist:
                authinfo = DbAuthInfo(dbcomputer=computer.dbcomputer, aiidauser=user)
                old_authparams = {}

        elif BACKEND == BACKEND_SQLA:
            from aiida.backends.sqlalchemy.models.authinfo import DbAuthInfo
            from aiida.backends.sqlalchemy import session

            authinfo = session.query(DbAuthInfo).filter(
                DbAuthInfo.dbcomputer == computer.dbcomputer
            ).filter(
                DbAuthInfo.aiidauser == user
            ).first()
            if authinfo is None:
                authinfo = DbAuthInfo(
                    dbcomputer=computer.dbcomputer,
                    aiidauser=user
                )
                old_authparams = {}
            else:
                old_authparams = authinfo.get_auth_params()
        else:
            raise Exception(
                "Unknown backend {}".format(BACKEND)
            )
        Transport = computer.get_transport_class()

        print ("Configuring computer '{}' for the AiiDA user '{}'".format(
            computername, user.email))

        print "Computer {} has transport of type {}".format(computername,
                                                            computer.get_transport_type())

        if user.email != get_configured_user_email():
            print "*" * 72
            print "** {:66s} **".format("WARNING!")
            print "** {:66s} **".format(
                "  You are configuring a different user.")
            print "** {:66s} **".format(
                "  Note that the default suggestions are taken from your")
            print "** {:66s} **".format(
                "  local configuration files, so they may be incorrect.")
            print "*" * 72

        valid_keys = Transport.get_valid_auth_params()

        default_authparams = {}
        for k in valid_keys:
            if k in old_authparams:
                default_authparams[k] = old_authparams.pop(k)
        if old_authparams:
            print ("WARNING: the following keys were previously in the "
                   "authorization parameters,")
            print "but have not been recognized and have been deleted:"
            print ", ".join(old_authparams.keys())

        if not valid_keys:
            print "There are no special keys to be configured. Configuration completed."
            authinfo.set_auth_params({})
            authinfo.save()
            return

        print ""
        print "Note: to leave a field unconfigured, leave it empty and press [Enter]"

        # I strip out the old auth_params that are not among the valid keys

        new_authparams = {}

        for k in valid_keys:
            key_set = False
            while not key_set:
                try:
                    converter_name = '_convert_{}_fromstring'.format(k)
                    try:
                        converter = dict(inspect.getmembers(
                            Transport))[converter_name]
                    except KeyError:
                        print >> sys.stderr, ("Internal error! "
                                              "No {} defined in Transport {}".format(
                            converter_name, computer.get_transport_type()))
                        sys.exit(1)

                    if k in default_authparams:
                        readline.set_startup_hook(lambda:
                                                  readline.insert_text(str(default_authparams[k])))
                    else:
                        # Use suggestion only if parameters were not already set
                        suggester_name = '_get_{}_suggestion_string'.format(k)
                        try:
                            suggester = dict(inspect.getmembers(
                                Transport))[suggester_name]
                            suggestion = suggester(computer)
                            readline.set_startup_hook(lambda:
                                                      readline.insert_text(suggestion))
                        except KeyError:
                            readline.set_startup_hook()

                    txtval = raw_input("=> {} = ".format(k))
                    if txtval:
                        new_authparams[k] = converter(txtval)
                    key_set = True
                except ValidationError as e:
                    print "Error in the inserted value: {}".format(e.message)

        authinfo.set_auth_params(new_authparams)
        authinfo.save()
        print "Configuration stored for your user on computer '{}'.".format(
            computername)
from sys import argv
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.remote import RemoteData
from aiida.orm.calculation import *

path="/home/aiida/Documents/seb352-travail/essais-tuto/res/"
StructureData = DataFactory("structure")
ParameterData = DataFactory("parameter")
#PwCalculation= DataFactory("calculation")

qb=QueryBuilder()



qb.append(ParameterData,
	project=["attributes.step0", "attributes.steps"],
	filters={"id":{"==":5615}}
	)

ok=0
a=qb.count()
file=open(path+"results-parabola-dict", 'w')



for i in qb.iterall():
	file.write("{}\n\n{}\n\n{}\n\n{}".format(i[0],i[1][0],i[1][1],i[1][2]))
	ok+=1
	print i[0]['dE']
	print len(i[0])

file.close()
예제 #13
0
    def get(cls,
            element,
            name=None,
            version="latest",
            match_aliases=True,
            group_label=None,
            n_el=None):
        """
        Get the first matching Pseudopotential for the given parameters.

        :param element: The atomic symbol
        :param name: The name of the pseudo
        :param version: A specific version (if more than one in the database and not the highest/latest)
        :param match_aliases: Whether to look in the list of of aliases for a matching name
        """
        from aiida.orm.querybuilder import QueryBuilder

        query = QueryBuilder()

        params = {}

        if group_label:
            query.append(Group, filters={"label": group_label}, tag="group")
            params["with_group"] = "group"

        query.append(Pseudopotential, **params)

        filters = {"attributes.element": {"==": element}}

        if version != "latest":
            filters["attributes.version"] = {"==": version}

        if name:
            if match_aliases:
                filters["attributes.aliases"] = {"contains": [name]}
            else:
                filters["attributes.name"] = {"==": name}

        query.add_filter(Pseudopotential, filters)

        # SQLA ORM only solution:
        # query.order_by({Pseudopotential: [{"attributes.version": {"cast": "i", "order": "desc"}}]})
        # items = query.first()

        all_iter = query.iterall()

        if n_el:
            all_iter = filter(lambda p: sum(p[0].n_el) == n_el, all_iter)

        items = sorted(all_iter, key=lambda p: p[0].version, reverse=True)

        if not items:
            raise NotExistent(
                f"No Gaussian Pseudopotential found for element={element}, name={name}, version={version}"
            )

        # if we get different names there is no well ordering, sorting by version only works if they have the same name
        if len(set(p[0].name for p in items)) > 1:
            raise MultipleObjectsError(
                f"Multiple Gaussian Pseudopotentials found for element={element}, name={name}, version={version}"
            )

        return items[0][0]
예제 #14
0
parser.add_argument("code",
                    help="code and machine where you would like to run")
parser.add_argument("json_hpc", help="json file with HPC parameters")
parser.add_argument("json_pw", help="json file with PW parameters")
args = parser.parse_args()

StructureData = DataFactory('structure')
UpfData = DataFactory('upf')
ParameterData = DataFactory('parameter')
KpointsData = DataFactory('array.kpoints')

with open(args.json_hpc) as data_file:
    json_hpc = json.load(data_file)

qb = QueryBuilder()
qb.append(JobCalculation, tag="mycalculation", project=["*"])
qb.append(Group,
          filters={"name": json_hpc["query_group"]},
          group_of="mycalculation")
calcs_list = qb.all()

pseudo_family = json_hpc['pseudo']
structures_wf = []
kpoints_wf = []
pw_parameters_wf = []
hpc_workflow_params = {}
keys = []
count = 0

#for i in calcs_list[0:1]:
for i in calcs_list:
예제 #15
0
def upload_psf_family(folder, group_label, group_description, stop_if_existing=True):
    """
    Upload a set of PSF files in a given group.

    :param folder: a path containing all PSF files to be added.
        Only files ending in .PSF (case-insensitive) are considered.
    :param group_label: the name of the group to create. If it exists and is
        non-empty, a UniquenessError is raised.
    :param group_description: a string to be set as the group description.
        Overwrites previous descriptions, if the group was existing.
    :param stop_if_existing: if True, check for the md5 of the files and,
        if the file already exists in the DB, raises a MultipleObjectsError.
        If False, simply adds the existing PsfData node to the group.
    """
    import os
    from aiida import orm
    from aiida.common import AIIDA_LOGGER as aiidalogger
    from aiida.common.exceptions import UniquenessError
    from aiida.orm.querybuilder import QueryBuilder
    from aiida_siesta.groups.pseudos import PsfFamily

    message = (  #pylint: disable=invalid-name
        'This function has been deprecated and will be removed in `v2.0.0`. ' +
        '`upload_psf_family` is substitued by `fam.create_from_folder` ' +
        'where `fam` is an instance of the families classes in `aiida_pseudo.groups.family`.'
    )

    warnings.warn(message, AiidaSiestaDeprecationWarning)

    if not os.path.isdir(folder):
        raise ValueError("folder must be a directory")

    # only files, and only those ending with .psf or .PSF;
    # go to the real file if it is a symlink
    files = [
        os.path.realpath(os.path.join(folder, i))
        for i in os.listdir(folder)
        if os.path.isfile(os.path.join(folder, i)) and i.lower().endswith('.psf')
    ]

    nfiles = len(files)

    automatic_user = orm.User.objects.get_default()
    group, group_created = PsfFamily.objects.get_or_create(label=group_label, user=automatic_user)

    if group.user.email != automatic_user.email:
        raise UniquenessError(
            "There is already a PsfFamily group with name {}"
            ", but it belongs to user {}, therefore you "
            "cannot modify it".format(group_label, group.user.email)
        )

    # Always update description, even if the group already existed
    group.description = group_description

    # NOTE: GROUP SAVED ONLY AFTER CHECKS OF UNICITY

    pseudo_and_created = []

    for afile in files:
        md5sum = md5_file(afile)
        qb = QueryBuilder()
        qb.append(PsfData, filters={'attributes.md5': {'==': md5sum}})
        existing_psf = qb.first()

        #existing_psf = PsfData.query(dbattributes__key="md5",
        #                            dbattributes__tval = md5sum)

        if existing_psf is None:
            # return the psfdata instances, not stored
            pseudo, created = PsfData.get_or_create(afile, use_first=True, store_psf=False)
            # to check whether only one psf per element exists
            # NOTE: actually, created has the meaning of "to_be_created"
            pseudo_and_created.append((pseudo, created))
        else:
            if stop_if_existing:
                raise ValueError(
                    "A PSF with identical MD5 to "
                    " {} cannot be added with stop_if_existing"
                    "".format(afile)
                )
            existing_psf = existing_psf[0]
            pseudo_and_created.append((existing_psf, False))

    # check whether pseudo are unique per element
    elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
    # If group already exists, check also that I am not inserting more than
    # once the same element
    if not group_created:
        for aiida_n in group.nodes:
            # Skip non-pseudos
            if not isinstance(aiida_n, PsfData):
                continue
            elements.append((aiida_n.element, aiida_n.md5sum))

    elements = set(elements)  # Discard elements with the same MD5, that would
    # not be stored twice
    elements_names = [e[0] for e in elements]

    if not len(elements_names) == len(set(elements_names)):
        duplicates = {x for x in elements_names if elements_names.count(x) > 1}
        duplicates_string = ", ".join(i for i in duplicates)
        raise UniquenessError("More than one PSF found for the elements: " + duplicates_string + ".")

    # At this point, save the group, if still unstored
    if group_created:
        group.store()

    # save the psf in the database, and add them to group
    for pseudo, created in pseudo_and_created:
        if created:
            pseudo.store()

            aiidalogger.debug("New node {} created for file {}".format(pseudo.uuid, pseudo.filename))
        else:
            aiidalogger.debug("Reusing node {} for file {}".format(pseudo.uuid, pseudo.filename))

    # Add elements to the group all togetehr
    group.add_nodes([pseudo for pseudo, created in pseudo_and_created])

    nuploaded = len([_ for _, created in pseudo_and_created if created])

    return nfiles, nuploaded
예제 #16
0
    def test_import(self):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.node import Node
        from aiida.orm.calculation import Calculation
        from aiida.orm.data.structure import StructureData
        import inspect
        import os

        curr_path = inspect.getfile(inspect.currentframe())
        folder_path = os.path.dirname(curr_path)
        relative_folder_path = ("export_import_test_files/"
                                "parents_of_6537645.aiida")
        test_file_path = os.path.join(folder_path, relative_folder_path)

        # Clean the database
        self.clean_db()

        # Insert the default data to the database
        self.insert_data()

        # Import the needed data
        import_data(test_file_path, silent=True)

        # Check that the number of nodes if correct
        qb = QueryBuilder()
        qb.append(Node, project=["id"])
        self.assertEquals(qb.count(), 83, "The number of Nodes is not the "
                          "expected one.")

        # Check the number of calculations and that the attributes were
        # imported correctly
        qb = QueryBuilder()
        qb.append(Calculation, project=["*"])
        self.assertEquals(
            qb.count(), 19, "The number of Calculations is not "
            "the expected one.")
        for [calc] in qb.all():
            attr = calc.get_attrs()
            self.assertIsInstance(attr, dict, "A dictionary should be "
                                  "returned")
            self.assertNotEquals(len(attr), 0, "The attributes should not be "
                                 "empty.")

        # Check the number of the structure data and that the label is the
        # expected one
        qb = QueryBuilder()
        qb.append(StructureData, project=["*"])
        self.assertEquals(
            qb.count(), 7, "The number of StructureData is not "
            "the expected one.")
        for [struct] in qb.all():
            self.assertEquals(struct.label, "3D_with_2D_substructure",
                              "A label is not correct")

        # TO BE SEEN WITH MOUNET
        # print "<================= ParameterData attributes.energy ====================>"
        #
        # from aiida.orm.data.parameter import ParameterData
        # qb = QueryBuilder()
        # # qb.append(Calculation, filters={
        # #     'id': {"==": 6525492}}, project=["id"], tag="res")
        # qb.append(ParameterData, project=["attributes"], tag="res")
        # print qb.all()
        # for [struct] in qb.all():
        #     print struct
        #     # print struct.get_attrs()
        #     # print struct.uuid
        #     # print struct.label
        #     print "=============="
        # TO BE SEEN WITH MOUNET

        # Check that the cell attributes of the structure data is not empty.
        qb = QueryBuilder()
        qb.append(StructureData, project=["attributes.cell"])
        for [cell] in qb.all():
            self.assertNotEquals(len(cell), 0, "There should be cells.")

        # Check that the cell of specific structure data is the expected one
        qb = QueryBuilder()
        qb.append(
            StructureData,
            project=["attributes.cell"],
            filters={'uuid': {
                "==": "45670237-dc1e-4300-8e0b-4d3639dc77cf"
            }})
        for [cell] in qb.all():
            #print cell
            self.assertEquals(
                cell,
                [[8.34, 0.0, 0.0], [0.298041701839357, 8.53479766274308, 0.0],
                 [0.842650688117053, 0.47118495164127, 10.6965192730702]],
                "The cell is not the expected one.")

        # Check that the kind attributes are the correct ones.
        qb = QueryBuilder()
        qb.append(StructureData, project=["attributes.kinds"], tag="res")
        for [kinds] in qb.all():
            self.assertEqual(len(kinds), 2, "Attributes kinds should be of "
                             "length 2")
            self.assertIn(
                {
                    u'symbols': [u'Fe'],
                    u'weights': [1.0],
                    u'mass': 55.847,
                    u'name': u'Fe'
                }, kinds)
            self.assertIn(
                {
                    u'symbols': [u'S'],
                    u'weights': [1.0],
                    u'mass': 32.066,
                    u'name': u'S'
                }, kinds)

        # Check that there are StructureData that are outputs of Calculations
        qb = QueryBuilder()
        qb.append(Calculation, project=["uuid"], tag="res")
        qb.append(StructureData, output_of="res")
        self.assertGreater(len(qb.all()), 0, "There should be results for the"
                           "query.")

        # Check that there are RemoteData that are children and
        # parents of Calculations
        from aiida.orm.data.remote import RemoteData
        qb = QueryBuilder()
        qb.append(Calculation, tag="c1")
        qb.append(RemoteData, project=["uuid"], output_of="c1", tag='r1')
        qb.append(Calculation, output_of="r1", tag="c2")

        self.assertGreater(len(qb.all()), 0, "There should be results for the"
                           "query.")

        # TO BE SEEN WITH MOUNET
        # from aiida.orm.data.array.trajectory import TrajectoryData
        # qb = QueryBuilder()
        # qb.append(TrajectoryData, project=["*"], tag="res")
        # print qb.all()
        # for [struct] in qb.all():
        #     print struct
        #     print struct.get_attrs()
        #     # print struct.uuid
        #     # print struct.label
        #     print "=============="
        # TO BE SEEN WITH MOUNET

        # Check that a specific UUID exists
        qb = QueryBuilder()
        qb.append(
            Node,
            filters={'uuid': {
                "==": "45670237-dc1e-4300-8e0b-4d3639dc77cf"
            }},
            project=["*"],
            tag="res")
        self.assertGreater(len(qb.all()), 0, "There should be results for the"
                           "query.")
예제 #17
0
def extract_structure_info(keys, structures=None):
    """
    A method that collects a bunch of information (specified in keys) from
    structures (default whole db, or provided node list) in the database and
    returns that information as a dict, which could be used for further evalation
    #keys = ['uuid', 'formula', 'pk', 'symmetry', 'pbc', 'volume', 'total_energy',
    'child_nodes', 'natoms', 'group', extras', 'label', 'description', 'cif_file',
    'cif_number', 'cif_uuid', 'cif_ref', 'calcfunctions', 'band', 'dos', 'eos',
    'init_cls', 'corehole', primitive]

    """
    StructureData = DataFactory('structure')
    structure_list = []

    from aiida_fleur.tools.StructureData_util import get_spacegroup, is_structure
    from aiida_fleur.tools.StructureData_util import is_primitive

    if not structures:
        StructureData = DataFactory('structure')
        #t = time.time()
        qb = QB()
        qb.append(StructureData)
        structures = qb.all()
        #elapsed = time.time() - t
        # print "Total number of structures: {} (retrieved in {} s.)".format(len(structures), elapsed)
        #t = time.time()

    # for structure in structures:
    #    structure_dict = {}
    #    struc = structure[0]
    #    for key in keys:
    #        structure_dict[key] = get_methods(key)(struc)

    # get information
    for structure in structures:
        structure_dict = {}

        if isinstance(structure, list):
            struc = structure[0]
        else:
            struc = is_structure(structure)

        if 'formula' in keys:
            structure_dict['formula'] = struc.get_formula()
        if 'pk' in keys:
            structure_dict['pk'] = struc.pk
        if 'uuid' in keys:
            structure_dict['uuid'] = str(struc.uuid)
        if 'natoms' in keys:
            structure_dict['natoms'] = len(struc.sites)
        if 'cell' in keys:
            structure_dict['cell'] = str(struc.cell)
        if 'pbc' in keys:
            structure_dict['pbc'] = str(struc.pbc)
        if 'label' in keys:
            structure_dict['label'] = struc.label
        if 'description' in keys:
            structure_dict['description'] = struc.description
        if 'extras' in keys:
            extras = struc.extras
            structure_dict['extras'] = str(extras)
        if 'symmetry' in keys:
            symmetry = get_spacegroup(struc)
            structure_dict['symmetry'] = str(symmetry)
        if 'volume' in keys:
            volume = struc.get_cell_volume()
            structure_dict['volume'] = volume
        if 'child_nodes' in keys:
            child_nodes = len(struc.get_outgoing().all())
            structure_dict['child_nodes'] = child_nodes
        if 'primitive' in keys:
            prim = is_primitive(struc)
            structure_dict['primitive'] = prim

        if 'cif_file' in keys:
            cif_file = get_cif_file(struc)
            structure_dict['cif_file'] = cif_file
        '''
        if 'cif_number' in keys:
            cif_number = get_cif_number(struc)
            structure_dict['cif_number'] = cif_number
        if 'cif_uuid' in keys:
            cif_uuid = get_cif_uuid(struc)
            structure_dict['cif_uuid'] = cif_uuid
        if 'cif_ref' in keys:
            cif_ref = get_cif_ref(struc)
            structure_dict['cif_ref'] = cif_ref
        if 'total_energy' in keys:
            total_energy = get_total_energy(struc)
            structure_dict['total_energy'] = total_energy
        '''
        if 'group' in keys:
            group = group_member(struc)
            structure_dict['group'] = group
        if 'scf' in keys:
            scf = input_of_workcal('fleur_scf_wc', struc)
            structure_dict['scf'] = scf
        if 'band' in keys:
            band = input_of_workcal('fleur_band_wc', struc)
            structure_dict['band'] = band
        if 'dos' in keys:
            dos = input_of_workcal('fleur_dos_wc', struc)
            structure_dict['dos'] = dos
        if 'eos' in keys:
            eos = input_of_workcal('fleur_eos_wc', struc)
            structure_dict['eos'] = eos
        if 'init_cls' in keys:
            init_cls = input_of_workcal('fleur_initial_cls_wc', struc)
            structure_dict['init_cls'] = init_cls
        if 'corehole' in keys:
            corehole = input_of_workcal('fleur_corehole_wc', struc)
            structure_dict['corehole'] = corehole
        if 'calcfunctions' in keys:
            calcfunctions_uuid, calcfunctions_name = input_of_calcfunctions(
                struc)
            structure_dict['calcfunctions'] = [
                calcfunctions_uuid, calcfunctions_name
            ]

        structure_list.append(structure_dict)

    #elapsed = time.time() - t
    # print "(needed {} s.!!!)".format(elapsed)

    return structure_list
예제 #18
0
파일: query.py 프로젝트: asle85/aiida-core
    def test_simple_query_2(self):
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm import Node
        from datetime import datetime
        from aiida.common.exceptions import MultipleObjectsError, NotExistent
        n0 = Node()
        n0.label = 'hello'
        n0.description=''
        n0._set_attr('foo', 'bar')

        n1 = Node()
        n1.label='foo'
        n1.description='I am FoO'

        n2 = Node()
        n2.label='bar'
        n2.description='I am BaR'

        n2.add_link_from(n1, label='random_2')
        n1.add_link_from(n0, label='random_1')

        for n in (n0, n1, n2):
            n.store()



        qb1 = QueryBuilder()
        qb1.append(Node, filters={'label': 'hello'})
        self.assertEqual(len(list(qb1.all())), 1)


        qh = {
            'path': [
                {
                    'cls': Node,
                    'tag': 'n1'
                },
                {
                    'cls': Node,
                    'tag': 'n2',
                    'output_of': 'n1'
                }
            ],
            'filters': {
                'n1': {
                    'label': {'ilike': '%foO%'},
                },
                'n2': {
                    'label': {'ilike': 'bar%'},
                }
            },
            'project': {
                'n1': ['id', 'uuid', 'ctime', 'label'],
                'n2': ['id', 'description', 'label'],
            }
        }

        qb2 = QueryBuilder(**qh)


        resdict = qb2.dict()
        self.assertEqual(len(resdict), 1)
        self.assertTrue(isinstance(resdict[0]['n1']['ctime'], datetime))


        res_one = qb2.one()
        self.assertTrue('bar' in res_one)




        qh = {
            'path': [
                {
                    'cls': Node,
                    'tag': 'n1'
                },
                {
                    'cls': Node,
                    'tag': 'n2',
                    'output_of': 'n1'
                }
            ],
            'filters': {
                'n1--n2': {'label': {'like': '%_2'}}
            }
        }
        qb = QueryBuilder(**qh)
        self.assertEqual(qb.count(), 1)


        # Test the hashing:
        query1 = qb.get_query()
        qb.add_filter('n2', {'label': 'nonexistentlabel'})
        self.assertEqual(qb.count(), 0)

        with self.assertRaises(NotExistent):
            qb.one()
        with self.assertRaises(MultipleObjectsError):
            QueryBuilder().append(Node).one()

        query2 = qb.get_query()
        query3 = qb.get_query()

        self.assertTrue(id(query1) != id(query2))
        self.assertTrue(id(query2) == id(query3))
예제 #19
0
    def __init__(self):
        # Find all process labels
        qb = QueryBuilder()
        qb.append(WorkCalculation,
                  project="attributes._process_label",
                  filters={'attributes': {
                      '!has_key': 'source_code'
                  }})
        qb.order_by({WorkCalculation: {'ctime': 'desc'}})
        process_labels = []
        for i in qb.iterall():
            if i[0] not in process_labels:
                process_labels.append(i[0])

        layout = ipw.Layout(width="900px")

        self.mode = ipw.RadioButtons(
            options=['all', 'uploaded', 'edited', 'calculated'],
            layout=ipw.Layout(width="25%"))

        # Date range
        self.dt_now = datetime.datetime.now()
        self.dt_end = self.dt_now - datetime.timedelta(days=7)
        self.date_start = ipw.Text(value='',
                                   description='From: ',
                                   style={'description_width': '120px'})

        self.date_end = ipw.Text(value='', description='To: ')

        self.date_text = ipw.HTML(value='<p>Select the date range:</p>')

        self.btn_date = ipw.Button(description='Search',
                                   layout={'margin': '1em 0 0 0'})

        self.age_selection = ipw.VBox([
            self.date_text,
            ipw.HBox([self.date_start, self.date_end]), self.btn_date
        ],
                                      layout={
                                          'border': '1px solid #fafafa',
                                          'padding': '1em'
                                      })

        # Labels
        self.drop_label = ipw.Dropdown(options=([
            'All',
        ] + process_labels),
                                       description='Process Label',
                                       style={'description_width': '120px'},
                                       layout={'width': '50%'})

        self.btn_date.on_click(self.search)
        self.mode.observe(self.search, names='value')
        self.drop_label.observe(self.search, names='value')

        hr = ipw.HTML('<hr>')
        box = ipw.VBox(
            [self.age_selection, hr,
             ipw.HBox([self.mode, self.drop_label])])

        self.results = ipw.Dropdown(layout=layout)
        self.search()
        super(StructureBrowser, self).__init__([box, hr, self.results])
예제 #20
0
def code_list(computer, input_plugin, all_entries, all_users, show_owner):
    """List the codes in the database."""
    from aiida.orm.backend import construct_backend
    backend = construct_backend()

    from aiida.orm.querybuilder import QueryBuilder
    from aiida.orm.code import Code  # pylint: disable=redefined-outer-name
    from aiida.orm.computer import Computer
    from aiida.orm.user import User

    qb_user_filters = dict()
    if not all_users:
        user = backend.users.get_automatic_user()
        qb_user_filters['email'] = user.email

    qb_computer_filters = dict()
    if computer is not None:
        qb_computer_filters['name'] = computer.name

    qb_code_filters = dict()
    if input_plugin is not None:
        qb_code_filters['attributes.input_plugin'] = input_plugin.name

    # If not all_entries, hide codes with HIDDEN_KEY extra set to True
    if not all_entries:
        qb_code_filters['or'] = [{
            'extras': {
                '!has_key': Code.HIDDEN_KEY
            }
        }, {
            'extras.{}'.format(Code.HIDDEN_KEY): {
                '==': False
            }
        }]

    echo.echo("# List of configured codes:")
    echo.echo("# (use 'verdi code show CODEID' to see the details)")

    # pylint: disable=invalid-name
    if computer is not None:
        qb = QueryBuilder()
        qb.append(Code,
                  tag="code",
                  filters=qb_code_filters,
                  project=["id", "label"])
        # We have a user assigned to the code so we can ask for the
        # presence of a user even if there is no user filter
        qb.append(User,
                  creator_of="code",
                  project=["email"],
                  filters=qb_user_filters)
        # We also add the filter on computer. This will automatically
        # return codes that have a computer (and of course satisfy the
        # other filters). The codes that have a computer attached are the
        # remote codes.
        qb.append(Computer,
                  computer_of="code",
                  project=["name"],
                  filters=qb_computer_filters)
        qb.order_by({Code: {'id': 'asc'}})
        print_list_res(qb, show_owner)

    # If there is no filter on computers
    else:
        # Print all codes that have a computer assigned to them
        # (these are the remote codes)
        qb = QueryBuilder()
        qb.append(Code,
                  tag="code",
                  filters=qb_code_filters,
                  project=["id", "label"])
        # We have a user assigned to the code so we can ask for the
        # presence of a user even if there is no user filter
        qb.append(User,
                  creator_of="code",
                  project=["email"],
                  filters=qb_user_filters)
        qb.append(Computer, computer_of="code", project=["name"])
        qb.order_by({Code: {'id': 'asc'}})
        print_list_res(qb, show_owner)

        # Now print all the local codes. To get the local codes we ask
        # the dbcomputer_id variable to be None.
        qb = QueryBuilder()
        comp_non_existence = {"dbcomputer_id": {"==": None}}
        if not qb_code_filters:
            qb_code_filters = comp_non_existence
        else:
            new_qb_code_filters = {
                "and": [qb_code_filters, comp_non_existence]
            }
            qb_code_filters = new_qb_code_filters
        qb.append(Code,
                  tag="code",
                  filters=qb_code_filters,
                  project=["id", "label"])
        # We have a user assigned to the code so we can ask for the
        # presence of a user even if there is no user filter
        qb.append(User,
                  creator_of="code",
                  project=["email"],
                  filters=qb_user_filters)
        qb.order_by({Code: {'id': 'asc'}})
        print_list_res(qb, show_owner)
예제 #21
0
    def search(self, c=None):
        self.preprocess()

        qb = QueryBuilder()
        try:  # If the date range is valid, use it for the search
            self.start_date = datetime.datetime.strptime(
                self.date_start.value, '%Y-%m-%d')
            self.end_date = datetime.datetime.strptime(
                self.date_end.value, '%Y-%m-%d') + datetime.timedelta(hours=24)
        except ValueError:  # Otherwise revert to the standard (i.e. last 7 days)
            self.start_date = self.dt_end
            self.end_date = self.dt_now + datetime.timedelta(hours=24)

            self.date_start.value = self.start_date.strftime('%Y-%m-%d')
            self.date_end.value = self.end_date.strftime('%Y-%m-%d')

        filters = {}
        filters['ctime'] = {
            'and': [{
                '<=': self.end_date
            }, {
                '>': self.start_date
            }]
        }
        if self.drop_label.value != 'All':
            qb.append(
                WorkCalculation,
                filters={'attributes._process_label': self.drop_label.value})

            qb.append(JobCalculation, output_of=WorkCalculation)

            qb.append(StructureData, output_of=JobCalculation, filters=filters)
        else:
            if self.mode.value == "uploaded":
                qb2 = QueryBuilder()
                qb2.append(StructureData, project=["id"])
                qb2.append(Node, input_of=StructureData)
                processed_nodes = [n[0] for n in qb2.all()]
                if processed_nodes:
                    filters['id'] = {"!in": processed_nodes}
                qb.append(StructureData, filters=filters)

            elif self.mode.value == "calculated":
                qb.append(JobCalculation)
                qb.append(StructureData,
                          output_of=JobCalculation,
                          filters=filters)

            elif self.mode.value == "edited":
                qb.append(WorkCalculation)
                qb.append(StructureData,
                          output_of=WorkCalculation,
                          filters=filters)

            else:
                self.mode.value == "all"
                qb.append(StructureData, filters=filters)

        qb.order_by({StructureData: {'ctime': 'desc'}})
        matches = set([n[0] for n in qb.iterall()])
        matches = sorted(matches, reverse=True, key=lambda n: n.ctime)

        c = len(matches)
        options = OrderedDict()
        options["Select a Structure (%d found)" % c] = False

        for n in matches:
            label = "PK: %d" % n.pk
            label += " | " + n.ctime.strftime("%Y-%m-%d %H:%M")
            label += " | " + n.get_extra("formula")
            label += " | " + n.description
            options[label] = n

        self.results.options = options
예제 #22
0
    def test_same_computer_import(self):
        """
        Test that you can import nodes in steps without any problems. In this
        test we will import a first calculation and then a second one. The
        import should work as expected and have in the end two job
        calculations.

        Each calculation is related to the same computer. In the end we should
        have only one computer
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Store two job calculation related to the same computer
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({"num_machines": 1,
                                 "num_mpiprocs_per_machine": 1})
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({"num_machines": 2,
                                 "num_mpiprocs_per_machine": 2})
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Store locally the computer name
            comp_name = unicode(self.computer.name)
            comp_uuid = unicode(self.computer.uuid)

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(qb.count(), 0, "There should not be any computers"
                                            "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(qb.count(), 0, "There should not be any "
                                            "calculations in the database at "
                                            "this point.")

            # Import the first calculation
            import_data(filename1, silent=True)

            # Check that the calculation computer is imported correctly.
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'])
            self.assertEqual(qb.count(), 1, "Only one calculation should be "
                                            "found.")
            self.assertEqual(unicode(qb.first()[0]), calc1_label,
                             "The calculation label is not correct.")

            # Check that the referenced computer is imported correctly.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                                            "found.")
            self.assertEqual(unicode(qb.first()[0]), comp_name,
                             "The computer name is not correct.")
            self.assertEqual(unicode(qb.first()[1]), comp_uuid,
                             "The computer uuid is not correct.")

            # Store the id of the computer
            comp_id = qb.first()[2]

            # Import the second calculation
            import_data(filename2, silent=True)

            # Check that the number of computers remains the same and its data
            # did not change.
            qb = QueryBuilder()
            qb.append(Computer, project=['name', 'uuid', 'id'])
            self.assertEqual(qb.count(), 1, "Only one computer should be "
                                            "found.")
            self.assertEqual(unicode(qb.first()[0]), comp_name,
                             "The computer name is not correct.")
            self.assertEqual(unicode(qb.first()[1]), comp_uuid,
                             "The computer uuid is not correct.")
            self.assertEqual(qb.first()[2], comp_id,
                             "The computer id is not correct.")

            # Check that now you have two calculations attached to the same
            # computer.
            qb = QueryBuilder()
            qb.append(Computer, tag='comp')
            qb.append(JobCalculation, has_computer='comp', project=['label'])
            self.assertEqual(qb.count(), 2, "Two calculations should be "
                                            "found.")
            ret_labels = set(_ for [_] in qb.all())
            self.assertEqual(ret_labels, set([calc1_label, calc2_label]),
                             "The labels of the calculations are not correct.")

        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
예제 #23
0
def test_and_get_codenode(codenode, expected_code_type, use_exceptions=False):
    """
    Pass a code node and an expected code (plugin) type. Check that the
    code exists, is unique, and return the Code object.

    :param codenode: the name of the code to load (in the form label@machine)
    :param expected_code_type: a string with the plugin that is expected to
      be loaded. In case no plugins exist with the given name, show all existing
      plugins of that type
    :param use_exceptions: if True, raise a ValueError exception instead of
      calling sys.exit(1)
    :return: a Code object
    
    :example usage: from kkr_scf workflow::
        
        if 'voronoi' in inputs:
            try:
                test_and_get_codenode(inputs.voronoi, 'kkr.voro', use_exceptions=True)
            except ValueError:
                error = ("The code you provided for voronoi  does not "
                         "use the plugin kkr.voro")
                self.control_end_wc(error)
    """
    import sys
    from aiida.common.exceptions import NotExistent
    from aiida.orm import Code

    try:
        if codenode is None:
            raise ValueError
        code = codenode
        if code.get_input_plugin_name() != expected_code_type:
            raise ValueError
    except (NotExistent, ValueError):
        from aiida.orm.querybuilder import QueryBuilder
        qb = QueryBuilder()
        qb.append(
            Code,
            filters={'attributes.input_plugin': {
                '==': expected_code_type
            }},
            project='*')

        valid_code_labels = [
            "{}@{}".format(c.label,
                           c.get_computer().name) for [c] in qb.all()
        ]

        if valid_code_labels:
            msg = ("Pass as further parameter a valid code label.\n"
                   "Valid labels with a {} executable are:\n".format(
                       expected_code_type))
            msg += "\n".join("* {}".format(l) for l in valid_code_labels)

            if use_exceptions:
                raise ValueError(msg)
            else:
                print >> sys.stderr, msg
                sys.exit(1)
        else:
            msg = ("Code not valid, and no valid codes for {}.\n"
                   "Configure at least one first using\n"
                   "    verdi code setup".format(expected_code_type))
            if use_exceptions:
                raise ValueError(msg)
            else:
                print >> sys.stderr, msg
                sys.exit(1)

    return code
예제 #24
0
    def test_different_computer_same_name_import(self):
        """
        This test checks that if there is a name collision, the imported
        computers are renamed accordingly.
        """
        import os
        import shutil
        import tempfile

        from aiida.orm.importexport import export
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.importexport import COMP_DUPL_SUFFIX

        # Creating a folder for the import/export files
        export_file_tmp_folder = tempfile.mkdtemp()
        unpack_tmp_folder = tempfile.mkdtemp()

        try:
            # Set the computer name
            comp1_name = "localhost_1"
            self.computer.set_name(comp1_name)

            # Store a calculation
            calc1_label = "calc1"
            calc1 = JobCalculation()
            calc1.set_computer(self.computer)
            calc1.set_resources({"num_machines": 1,
                                 "num_mpiprocs_per_machine": 1})
            calc1.label = calc1_label
            calc1.store()
            calc1._set_state(u'RETRIEVING')

            # Export the first job calculation
            filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
            export([calc1.dbnode], outfile=filename1, silent=True)

            # Reset the database
            self.clean_db()
            self.insert_data()

            # Set the computer name to the same name as before
            self.computer.set_name(comp1_name)

            # Store a second calculation
            calc2_label = "calc2"
            calc2 = JobCalculation()
            calc2.set_computer(self.computer)
            calc2.set_resources({"num_machines": 2,
                                 "num_mpiprocs_per_machine": 2})
            calc2.label = calc2_label
            calc2.store()
            calc2._set_state(u'RETRIEVING')

            # Export the second job calculation
            filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
            export([calc2.dbnode], outfile=filename2, silent=True)

            # Reset the database
            self.clean_db()
            self.insert_data()

            # Set the computer name to the same name as before
            self.computer.set_name(comp1_name)

            # Store a third calculation
            calc3_label = "calc3"
            calc3 = JobCalculation()
            calc3.set_computer(self.computer)
            calc3.set_resources({"num_machines": 2,
                                 "num_mpiprocs_per_machine": 2})
            calc3.label = calc3_label
            calc3.store()
            calc3._set_state(u'RETRIEVING')

            # Export the third job calculation
            filename3 = os.path.join(export_file_tmp_folder, "export3.tar.gz")
            export([calc3.dbnode], outfile=filename3, silent=True)

            # Clean the local database
            self.clean_db()

            # Check that there are no computers
            qb = QueryBuilder()
            qb.append(Computer, project=['*'])
            self.assertEqual(qb.count(), 0, "There should not be any computers"
                                            "in the database at this point.")

            # Check that there are no calculations
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['*'])
            self.assertEqual(qb.count(), 0, "There should not be any "
                                            "calculations in the database at "
                                            "this point.")

            # Import all the calculations
            import_data(filename1, silent=True)
            import_data(filename2, silent=True)
            import_data(filename3, silent=True)

            # Retrieve the calculation-computer pairs
            qb = QueryBuilder()
            qb.append(JobCalculation, project=['label'], tag='jcalc')
            qb.append(Computer, project=['name'],
                      computer_of='jcalc')
            self.assertEqual(qb.count(), 3, "Three combinations expected.")
            res = qb.all()
            self.assertIn([calc1_label, comp1_name], res,
                          "Calc-Computer combination not found.")
            self.assertIn([calc2_label,
                           comp1_name + COMP_DUPL_SUFFIX.format(0)], res,
                          "Calc-Computer combination not found.")
            self.assertIn([calc3_label,
                           comp1_name + COMP_DUPL_SUFFIX.format(1)], res,
                          "Calc-Computer combination not found.")
        finally:
            # Deleting the created temporary folders
            shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
            shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
예제 #25
0
    def get_from_string(cls, code_string):
        """
        Get a Computer object with given identifier string, that can either be
        the numeric ID (pk), or the label (if unique); the label can either
        be simply the label, or in the format label@machinename. See the note
        below for details on the string detection algorithm.

        .. note:: If a string that can be converted to an integer is given,
          the numeric ID is verified first (therefore, is a code A with a
          label equal to the ID of another code B is present, code A cannot
          be referenced by label). Similarly, the (leftmost) '@' symbol is
          always used to split code and computername. Therefore do not use
          '@' in the code name if you want to use this function
          ('@' in the computer name are instead valid).

        :param code_string: the code string identifying the code to load

        :raise NotExistent: if no code identified by the given string is found
        :raise MultipleObjectsError: if the string cannot identify uniquely
            a code
        """
        from aiida.common.exceptions import NotExistent, MultipleObjectsError
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm.computer import Computer
        from aiida.orm.code import Code

        try:
            code_int = int(code_string)
            try:
                return cls.get_subclass_from_pk(code_int)
            except NotExistent:
                raise ValueError()  # Jump to the following section
                # to check if a code with the given
                # label exists.
            except MultipleObjectsError:
                raise MultipleObjectsError("More than one code in the DB "
                                           "with pk='{}'!".format(code_string))
        except ValueError:
            # Before dying, try to see if the user passed a (unique) label.
            # split with the leftmost '@' symbol (i.e. code names cannot
            # contain '@' symbols, computer names can)
            qb = QueryBuilder()
            codename, sep, computername = code_string.partition('@')
            qb.append(cls,
                      filters={'label': {
                          '==': codename
                      }},
                      project=['*'],
                      tag='code')
            if sep:
                qb.append(Computer,
                          filters={'name': {
                              '==': computername
                          }},
                          computer_of='code')

            if qb.count() == 0:
                raise NotExistent("'{}' is not a valid code "
                                  "ID or label.".format(code_string))
            elif qb.count() > 1:
                codes = [_ for [_] in qb.all()]
                retstr = ("There are multiple codes with label '{}', "
                          "having IDs: ".format(code_string))
                retstr += ", ".join(sorted([str(c.pk) for c in codes])) + ".\n"
                retstr += ("Relabel them (using their ID), or refer to them "
                           "with their ID.")
                raise MultipleObjectsError(retstr)
            else:
                return qb.first()[0]
예제 #26
0
    def get_potcars_dict(cls, elements, family_name, mapping=None):
        """
        Get a dictionary {element: ``PotcarData.full_name``} for all given symbols.

        :param elements: The list of symbols to find POTCARs for
        :param family_name: The POTCAR family to be used
        :param mapping: A mapping[element] -> ``full_name``, for example: mapping={'In': 'In', 'As': 'As_d'}

        Exceptions:

         *If the mapping does not contain an item for a given element name, raise a ``ValueError``.
         *If no POTCAR is found for a given element, a ``NotExistent`` error is raised.

        If there are multiple POTCAR with the same ``full_name``, the first one
        returned by ``PotcarData.find()`` will be used.
        """
        if not mapping:
            mapping = {element: element for element in elements}
        group_filters = {
            'name': {
                '==': family_name
            },
            'type': {
                '==': cls.potcar_family_type_string
            }
        }
        element_filters = {
            'attributes.full_name': {
                'in': [mapping[element] for element in elements]
            }
        }
        query = QueryBuilder()
        query.append(Group, tag='family', filters=group_filters)
        query.append(cls,
                     tag='potcar',
                     member_of='family',
                     filters=element_filters)

        result_potcars = {}
        for element in elements:
            if element not in mapping:
                raise ValueError(
                    'Potcar mapping must contain an item for each element in the structure, '
                    'with the full name of the POTCAR file (i.e. "In_d", "As_h").'
                )
            full_name = mapping[element]
            potcars_of_kind = [
                potcar[0] for potcar in query.all()
                if potcar[0].full_name == full_name
            ]
            if not potcars_of_kind:
                raise NotExistent(
                    'No POTCAR found for full name {} in family {}'.format(
                        full_name, family_name))
            elif len(potcars_of_kind) > 1:
                result_potcars[element] = cls.find(family=family_name,
                                                   full_name=full_name)[0]
            else:
                result_potcars[element] = potcars_of_kind[0]

        return result_potcars
예제 #27
0
def configure_computer_v012(computer, user_email=None, authparams=None):
    """Configure the authentication information for a given computer

    adapted from aiida-core v0.12.2:
    aiida_core.aiida.cmdline.commands.computer.Computer.computer_configure

    :param computer: the computer to authenticate against
    :param user_email: the user email (otherwise use default)
    :param authparams: a dictionary of additional authorisation parameters to use (in string format)
    :return:
    """
    from aiida.common.exceptions import ValidationError
    from aiida.backends.utils import get_automatic_user
    # aiida-core v1
    # from aiida.orm.backend import construct_backend
    # backend = construct_backend()
    # get_automatic_user = backend.users.get_automatic_user

    authparams = {} if authparams is None else authparams
    transport = computer.get_transport_class()
    valid_keys = transport.get_valid_auth_params()

    if user_email is None:
        user = get_automatic_user()
    else:
        from aiida.orm.querybuilder import QueryBuilder
        qb = QueryBuilder()
        qb.append(type="user", filters={'email': user_email})
        user = qb.first()
        if not user:
            raise ValueError("user email not found: {}".format(user_email))
        user = user[0]._dbuser  # for Django, the wrong user class is returned

    authinfo, old_authparams = _get_auth_info(computer, user)

    # print ("Configuring computer '{}' for the AiiDA user '{}'".format(
    #     computername, user.email))
    #
    # print "Computer {} has transport of type {}".format(computername,
    #                                                     computer.get_transport_type())

    # from aiida.common.utils import get_configured_user_email
    # if user.email != get_configured_user_email():
    # print "*" * 72
    # print "** {:66s} **".format("WARNING!")
    # print "** {:66s} **".format(
    #     "  You are configuring a different user.")
    # print "** {:66s} **".format(
    #     "  Note that the default suggestions are taken from your")
    # print "** {:66s} **".format(
    #     "  local configuration files, so they may be incorrect.")
    # print "*" * 72

    default_authparams = {}
    for k in valid_keys:
        if k in old_authparams:
            default_authparams[k] = old_authparams.pop(k)
            if k not in authparams:
                authparams[k] = default_authparams[k]

    if old_authparams:
        print("WARNING: the following keys were previously in the "
              "authorization parameters, but have not been recognized "
              "and have been deleted: {}".format(", ".join(
                  old_authparams.keys())))

    if set(authparams.keys()) != set(valid_keys):
        raise ValueError(
            "new_authparams should contain only the keys: {}".format(
                valid_keys))

    # convert keys from strings
    transport_members = dict(inspect.getmembers(transport))
    for k, txtval in authparams.items():

        converter_name = '_convert_{}_fromstring'.format(k)
        suggester_name = '_get_{}_suggestion_string'.format(k)
        if converter_name not in transport_members:
            raise ValueError("No {} defined in Transport {}".format(
                converter_name, computer.get_transport_type()))
        converter = transport_members[converter_name]

        suggestion = ""
        if k in default_authparams:
            suggestion = default_authparams[k]
        elif suggester_name in transport_members:
            suggestion = transport_members[suggester_name](computer)

        try:
            authparams[k] = converter(txtval)
        except ValidationError, err:
            raise ValueError("error in the authparam "
                             "{0}: {1}, suggested value: {2}".format(
                                 k, err, suggestion))
예제 #28
0
#for queries examplefrom tutorial 

from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.remote import RemoteData

StructureData=DataFactory("structure")
ParameterData=DataFactory("parameter")

qb=QueryBuilder()
qb.append(RemoteData, tag="remote", project=["*"])
qb.append(Group,group_of="remote",
	filters={"name":{"in": ["tutorial_pbesol", "tutorial_lda", "tutorial_pbe"]   }})

qb.append(ParameterData, project=["attributes.energy_smearing"]


#qb.append(ParameterData, project=["attributes.energy_smearing"],
#	 filters={"id":{"==":1}} )

#qb.append(ParameterData, project=["attributes.energy_smearing"]

qb.all()

예제 #29
0
    def get_creation_statistics(
            self,
            user_email=None
    ):
        """
        Return a dictionary with the statistics of node creation, summarized by day.

        :note: Days when no nodes were created are not present in the returned `ctime_by_day` dictionary.

        :param user_email: If None (default), return statistics for all users.
            If an email is specified, return only the statistics for the given user.

        :return: a dictionary as
            follows::

                {
                   "total": TOTAL_NUM_OF_NODES,
                   "types": {TYPESTRING1: count, TYPESTRING2: count, ...},
                   "ctime_by_day": {'YYYY-MMM-DD': count, ...}

            where in `ctime_by_day` the key is a string in the format 'YYYY-MM-DD' and the value is
            an integer with the number of nodes created that day.
        """
        from aiida.orm.querybuilder import QueryBuilder as QB
        from aiida.orm import User, Node
        from collections import Counter
        import datetime

        def count_statistics(dataset):

            def get_statistics_dict(dataset):
                results = {}
                for count, typestring in sorted(
                        (v, k) for k, v in dataset.iteritems())[::-1]:
                    results[typestring] = count
                return results

            count_dict = {}

            types = Counter([r[2] for r in dataset])
            count_dict["types"] = get_statistics_dict(types)

            ctimelist = [r[1].strftime("%Y-%m-%d") for r in dataset]
            ctime = Counter(ctimelist)

            if len(ctimelist) > 0:

                # For the way the string is formatted, we can just sort it alphabetically
                firstdate = datetime.datetime.strptime(sorted(ctimelist)[0], '%Y-%m-%d')
                lastdate = datetime.datetime.strptime(sorted(ctimelist)[-1], '%Y-%m-%d')

                curdate = firstdate
                outdata = {}

                while curdate <= lastdate:
                    curdatestring = curdate.strftime('%Y-%m-%d')
                    outdata[curdatestring] = ctime.get(curdatestring, 0)
                    curdate += datetime.timedelta(days=1)
                count_dict["ctime_by_day"] = outdata

            else:
                count_dict["ctime_by_day"] = {}

            return count_dict

        statistics = {}

        q = QB()
        q.append(Node, project=['id', 'ctime', 'type'], tag='node')
        if user_email is not None:
            q.append(User, creator_of='node', project='email', filters={'email': user_email})
        qb_res = q.all()

        # total count
        statistics["total"] = len(qb_res)
        statistics.update(count_statistics(qb_res))

        return statistics
예제 #30
0
def upload_upf_family(folder,
                      group_name,
                      group_description,
                      stop_if_existing=True):
    """
    Upload a set of UPF files in a given group.

    :param folder: a path containing all UPF files to be added.
        Only files ending in .UPF (case-insensitive) are considered.
    :param group_name: the name of the group to create. If it exists and is
        non-empty, a UniquenessError is raised.
    :param group_description: a string to be set as the group description.
        Overwrites previous descriptions, if the group was existing.
    :param stop_if_existing: if True, check for the md5 of the files and,
        if the file already exists in the DB, raises a MultipleObjectsError.
        If False, simply adds the existing UPFData node to the group.
    """
    import os

    import aiida.common
    from aiida.common import aiidalogger
    from aiida.orm import Group
    from aiida.common.exceptions import UniquenessError, NotExistent
    from aiida.orm.backend import construct_backend
    from aiida.orm.querybuilder import QueryBuilder
    if not os.path.isdir(folder):
        raise ValueError("folder must be a directory")

    backend = construct_backend()

    # only files, and only those ending with .upf or .UPF;
    # go to the real file if it is a symlink
    files = [
        os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
        if os.path.isfile(os.path.join(folder, i))
        and i.lower().endswith('.upf')
    ]

    nfiles = len(files)

    automatic_user = backend.users.get_automatic_user()
    try:
        group = Group.get(name=group_name, type_string=UPFGROUP_TYPE)
        group_created = False
    except NotExistent:
        group = Group(name=group_name,
                      type_string=UPFGROUP_TYPE,
                      user=automatic_user)
        group_created = True

    if group.user.email != automatic_user.email:
        raise UniquenessError("There is already a UpfFamily group with name {}"
                              ", but it belongs to user {}, therefore you "
                              "cannot modify it".format(
                                  group_name, group.user.email))

    # Always update description, even if the group already existed
    group.description = group_description

    # NOTE: GROUP SAVED ONLY AFTER CHECKS OF UNICITY

    pseudo_and_created = []

    for f in files:
        md5sum = aiida.common.utils.md5_file(f)
        qb = QueryBuilder()
        qb.append(UpfData, filters={'attributes.md5': {'==': md5sum}})
        existing_upf = qb.first()

        #~ existing_upf = UpfData.query(dbattributes__key="md5",
        #~ dbattributes__tval=md5sum)

        if existing_upf is None:
            # return the upfdata instances, not stored
            pseudo, created = UpfData.get_or_create(f,
                                                    use_first=True,
                                                    store_upf=False)
            # to check whether only one upf per element exists
            # NOTE: actually, created has the meaning of "to_be_created"
            pseudo_and_created.append((pseudo, created))
        else:
            if stop_if_existing:
                raise ValueError("A UPF with identical MD5 to "
                                 " {} cannot be added with stop_if_existing"
                                 "".format(f))
            existing_upf = existing_upf[0]
            pseudo_and_created.append((existing_upf, False))

    # check whether pseudo are unique per element
    elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
    # If group already exists, check also that I am not inserting more than
    # once the same element
    if not group_created:
        for aiida_n in group.nodes:
            # Skip non-pseudos
            if not isinstance(aiida_n, UpfData):
                continue
            elements.append((aiida_n.element, aiida_n.md5sum))

    elements = set(elements)  # Discard elements with the same MD5, that would
    # not be stored twice
    elements_names = [e[0] for e in elements]

    if not len(elements_names) == len(set(elements_names)):
        duplicates = set(
            [x for x in elements_names if elements_names.count(x) > 1])
        duplicates_string = ", ".join(i for i in duplicates)
        raise UniquenessError("More than one UPF found for the elements: " +
                              duplicates_string + ".")

        # At this point, save the group, if still unstored
    if group_created:
        group.store()

    # save the upf in the database, and add them to group
    for pseudo, created in pseudo_and_created:
        if created:
            pseudo.store()

            aiidalogger.debug("New node {} created for file {}".format(
                pseudo.uuid, pseudo.filename))
        else:
            aiidalogger.debug("Reusing node {} for file {}".format(
                pseudo.uuid, pseudo.filename))

    # Add elements to the group all togetehr
    group.add_nodes(pseudo for pseudo, created in pseudo_and_created)

    nuploaded = len([_ for _, created in pseudo_and_created if created])

    return nfiles, nuploaded
예제 #31
0
    def get_bands_and_parents_structure(self, args):
        """
        Search for bands and return bands and the closest structure that is a parent of the instance.
        This is the backend independent way, can be overriden for performance reason

        :returns:
            A list of sublists, each latter containing (in order):
                pk as string, formula as string, creation date, bandsdata-label
        """
        
        import datetime
        from aiida.utils import timezone
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.backends.utils import get_automatic_user
        from aiida.orm.implementation import User
        from aiida.orm.implementation import Group
        from aiida.orm.data.structure import (get_formula, get_symbols_string)
        from aiida.orm.data.array.bands import BandsData
        from aiida.orm.data.structure import StructureData

        qb = QueryBuilder()
        if args.all_users is False:
            au = get_automatic_user()
            user = User(dbuser=au)
            qb.append(User, tag="creator", filters={"email": user.email})
        else:
            qb.append(User, tag="creator")

        bdata_filters = {}
        if args.past_days is not None:
            now = timezone.now()
            n_days_ago = now - datetime.timedelta(days=args.past_days)
            bdata_filters.update({"ctime": {'>=': n_days_ago}})

        qb.append(BandsData, tag="bdata", created_by="creator",
                  filters=bdata_filters,
                  project=["id", "label", "ctime"]
                  )

        group_filters = {}

        if args.group_name is not None:
            group_filters.update({"name": {"in": args.group_name}})
        if args.group_pk is not None:
            group_filters.update({"id": {"in": args.group_pk}})
        if group_filters:
            qb.append(Group, tag="group", filters=group_filters,
                      group_of="bdata")

        qb.append(StructureData, tag="sdata", ancestor_of="bdata",
                  # We don't care about the creator of StructureData
                  project=["id", "attributes.kinds", "attributes.sites"])

        qb.order_by({StructureData: {'ctime': 'desc'}})

        list_data = qb.distinct()

        entry_list = []
        already_visited_bdata = set()

        for [bid, blabel, bdate, sid, akinds, asites] in list_data.all():

            # We process only one StructureData per BandsData.
            # We want to process the closest StructureData to
            # every BandsData.
            # We hope that the StructureData with the latest
            # creation time is the closest one.
            # This will be updated when the QueryBuilder supports
            # order_by by the distance of two nodes.
            if already_visited_bdata.__contains__(bid):
                continue
            already_visited_bdata.add(bid)

            if args.element is not None:
                all_symbols = [_["symbols"][0] for _ in akinds]
                if not any([s in args.element for s in all_symbols]
                           ):
                    continue

            if args.element_only is not None:
                all_symbols = [_["symbols"][0] for _ in akinds]
                if not all(
                        [s in all_symbols for s in args.element_only]
                        ):
                    continue

            # We want only the StructureData that have attributes
            if akinds is None or asites is None:
                continue

            symbol_dict = {}
            for k in akinds:
                symbols = k['symbols']
                weights = k['weights']
                symbol_dict[k['name']] = get_symbols_string(symbols,
                                                            weights)

            try:
                symbol_list = []
                for s in asites:
                    symbol_list.append(symbol_dict[s['kind_name']])
                formula = get_formula(symbol_list,
                                      mode=args.formulamode)
            # If for some reason there is no kind with the name
            # referenced by the site
            except KeyError:
                formula = "<<UNKNOWN>>"
            entry_list.append([str(bid), str(formula),
                               bdate.strftime('%d %b %Y'), blabel])

        return entry_list
예제 #32
0
    def get_io_tree(self, uuid_pattern, tree_in_limit, tree_out_limit):
        # pylint: disable=too-many-statements,too-many-locals
        """
        json data to display nodes in tree format
        :param uuid_pattern: main node uuid
        :return: json data to display node tree
        """
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.orm import Node

        def get_node_description(node):
            """
            Get the description of the node.
            CalcJobNodes migrated from AiiDA < 1.0.0 do not have a valid CalcJobState,
            in this case the function returns as description the type of the node (CalcJobNode)
            :param node: node object
            :return: description of the node
            """
            try:
                description = node.get_description()
            except ValueError:
                description = node.node_type.split('.')[-2]
            return description

        # Check whether uuid_pattern identifies a unique node
        self._check_id_validity(uuid_pattern)

        qb_obj = QueryBuilder()
        qb_obj.append(Node, tag='main', project=['*'], filters=self._id_filter)

        nodes = []

        if qb_obj.count() > 0:
            main_node = qb_obj.first()[0]
            pk = main_node.pk
            uuid = main_node.uuid
            nodetype = main_node.node_type
            nodelabel = main_node.label
            description = get_node_description(main_node)
            ctime = main_node.ctime
            mtime = main_node.mtime

            nodes.append({
                'ctime': ctime,
                'mtime': mtime,
                'id': pk,
                'uuid': uuid,
                'node_type': nodetype,
                'node_label': nodelabel,
                'description': description,
                'incoming': [],
                'outgoing': []
            })

        # get all incoming
        qb_obj = QueryBuilder()
        qb_obj.append(Node, tag='main', project=['*'], filters=self._id_filter)
        qb_obj.append(Node,
                      tag='in',
                      project=['*'],
                      edge_project=['label', 'type'],
                      with_outgoing='main').order_by(
                          {'in': [{
                              'id': {
                                  'order': 'asc'
                              }
                          }]})
        if tree_in_limit is not None:
            qb_obj.limit(tree_in_limit)

        sent_no_of_incomings = qb_obj.count()

        if sent_no_of_incomings > 0:
            for node_input in qb_obj.iterdict():
                node = node_input['in']['*']
                pk = node.pk
                linklabel = node_input['main--in']['label']
                linktype = node_input['main--in']['type']
                uuid = node.uuid
                nodetype = node.node_type
                nodelabel = node.label
                description = get_node_description(node)
                node_ctime = node.ctime
                node_mtime = node.mtime

                nodes[0]['incoming'].append({
                    'ctime': node_ctime,
                    'mtime': node_mtime,
                    'id': pk,
                    'uuid': uuid,
                    'node_type': nodetype,
                    'node_label': nodelabel,
                    'description': description,
                    'link_label': linklabel,
                    'link_type': linktype
                })

        # get all outgoing
        qb_obj = QueryBuilder()
        qb_obj.append(Node, tag='main', project=['*'], filters=self._id_filter)
        qb_obj.append(Node,
                      tag='out',
                      project=['*'],
                      edge_project=['label', 'type'],
                      with_incoming='main').order_by(
                          {'out': [{
                              'id': {
                                  'order': 'asc'
                              }
                          }]})
        if tree_out_limit is not None:
            qb_obj.limit(tree_out_limit)

        sent_no_of_outgoings = qb_obj.count()

        if sent_no_of_outgoings > 0:
            for output in qb_obj.iterdict():
                node = output['out']['*']
                pk = node.pk
                linklabel = output['main--out']['label']
                linktype = output['main--out']['type']
                uuid = node.uuid
                nodetype = node.node_type
                nodelabel = node.label
                description = get_node_description(node)
                node_ctime = node.ctime
                node_mtime = node.mtime

                nodes[0]['outgoing'].append({
                    'ctime': node_ctime,
                    'mtime': node_mtime,
                    'id': pk,
                    'uuid': uuid,
                    'node_type': nodetype,
                    'node_label': nodelabel,
                    'description': description,
                    'link_label': linklabel,
                    'link_type': linktype
                })

        # count total no of nodes
        builder = QueryBuilder()
        builder.append(Node,
                       tag='main',
                       project=['id'],
                       filters=self._id_filter)
        builder.append(Node, tag='in', project=['id'], with_outgoing='main')
        total_no_of_incomings = builder.count()

        builder = QueryBuilder()
        builder.append(Node,
                       tag='main',
                       project=['id'],
                       filters=self._id_filter)
        builder.append(Node, tag='out', project=['id'], with_incoming='main')
        total_no_of_outgoings = builder.count()

        metadata = [{
            'total_no_of_incomings': total_no_of_incomings,
            'total_no_of_outgoings': total_no_of_outgoings,
            'sent_no_of_incomings': sent_no_of_incomings,
            'sent_no_of_outgoings': sent_no_of_outgoings
        }]

        return {'nodes': nodes, 'metadata': metadata}
예제 #33
0
    def query_jobcalculations_by_computer_user_state(
            self, state, computer=None, user=None,
            only_computer_user_pairs=False,
            only_enabled=True, limit=None
    ):
        """
        Filter all calculations with a given state.

        Issue a warning if the state is not in the list of valid states.

        :param state: The state to be used to filter (should be a string among
                those defined in aiida.common.datastructures.calc_states)
        :type state: str
        :param computer: a Django DbComputer entry, or a Computer object, of a
                computer in the DbComputer table.
                A string for the hostname is also valid.
        :param user: a Django entry (or its pk) of a user in the DbUser table;
                if present, the results are restricted to calculations of that
                specific user
        :param only_computer_user_pairs: if False (default) return a queryset
                where each element is a suitable instance of Node (it should
                be an instance of Calculation, if everything goes right!)
                If True, return only a list of tuples, where each tuple is
                in the format
                ('dbcomputer__id', 'user__id')
                [where the IDs are the IDs of the respective tables]
        :type only_computer_user_pairs: bool
        :param limit: Limit the number of rows returned
        :type limit: int

        :return: a list of calculation objects matching the filters.
        """
        # I assume that calc_states are strings. If this changes in the future,
        # update the filter below from dbattributes__tval to the correct field.
        from aiida.orm.computer import Computer
        from aiida.orm.calculation.job import JobCalculation
        from aiida.orm.user import User
        from aiida.orm.querybuilder import QueryBuilder
        from aiida.common.exceptions import InputValidationError
        from aiida.common.datastructures import calc_states

        if state not in calc_states:
            raise InputValidationError("querying for calculation state='{}', but it "
                                "is not a valid calculation state".format(state))

        calcfilter = {'state': {'==': state}}
        computerfilter = {"enabled": {'==': True}}
        userfilter = {}

        if computer is None:
            pass
        elif isinstance(computer, int):
            computerfilter.update({'id': {'==': computer}})
        elif isinstance(computer, Computer):
            computerfilter.update({'id': {'==': computer.pk}})
        else:
            try:
                computerfilter.update({'id': {'==': computer.id}})
            except AttributeError as e:
                raise Exception(
                    "{} is not a valid computer\n{}".format(computer, e)
                )
        if user is None:
            pass
        elif isinstance(user, int):
            userfilter.update({'id': {'==': user}})
        else:
            try:
                userfilter.update({'id': {'==': int(user.id)}})
                # Is that safe?
            except:
                raise Exception("{} is not a valid user".format(user))

        qb = QueryBuilder()
        qb.append(type="computer", tag='computer', filters=computerfilter)
        qb.append(JobCalculation, filters=calcfilter, tag='calc', has_computer='computer')
        qb.append(type="user", tag='user', filters=userfilter,
                  creator_of="calc")

        if only_computer_user_pairs:
            qb.add_projection("computer", "*")
            qb.add_projection("user", "*")
            returnresult = qb.distinct().all()
        else:
            qb.add_projection("calc", "*")
            if limit is not None:
                qb.limit(limit)
            returnresult = qb.all()
            returnresult = zip(*returnresult)[0]
        return returnresult
예제 #34
0
from aiida.orm.data.remote import RemoteData

qb=QueryBuilder()
#qb.append(Node, project=["id"])
StructureData = DataFactory("structure")
ParameterData = DataFactory("parameter")

#enumerate the <pk> for each query key
#for node, in qb.iterall():
#	print node
#print
#print("Number of species "+str( qb.count()))

#qb.append(StructureData, project=["id", "uuid"], 
#	filters={"or":[
#	{"id":{"==":285}}, {"id":{"==":3512}} ] })



#	Pour etablir des liens entre etats
qb.append(RemoteData, tag="remote", project=["*"])
qb.append(Group, group_of="remote")

#qb.append(ParameterData, project=["attributes.energy_smearing"]) #, filters=)
#qb.append(ParameterData, project=["attributes.element"])

#for i in qb.iterall():
#	print i
print qb.all()

예제 #35
0
from aiida.orm.calculation import *

path = "/home/aiida/Documents/seb352-travail/essais-tuto/res/"
StructureData = DataFactory("structure")
ParameterData = DataFactory("parameter")
# PwCalculation= DataFactory("calculation")

qb = QueryBuilder()


qb.append

# essai une query sur PwClaculation
qb.append(
    Calculation,
    # filters={"id":{"==":4285}},
    tag="calculation",
    output_of="structure",
)


# donne juste les nom qui sont dans les groupes
qb.append(
    Group,
    group_of="calculation",
    project=["name"],
    filters={"name": {"in": ["tutorial_pbesol", "tutorial_lda", "tutorial_pbe"]}},
)


a = qb.count()
for i in qb.iterall():