def mock_vasp(fresh_aiida_env, localhost): """Points to a mock-up of a VASP executable.""" from aiida.orm import Code from aiida.orm.querybuilder import QueryBuilder query_builder = QueryBuilder() query_builder.append(Code, tag='code') query_builder.add_filter('code', {'label': {'==': 'mock-vasp'}}) query_results = query_builder.all() if query_results: code = query_results[0][0] else: os_env = os.environ.copy() if not localhost.pk: localhost.store() # returns unicode mock_vasp_path = sp.check_output(['which', 'mock-vasp'], env=os_env, universal_newlines=True).strip() code = Code() code.label = 'mock-vasp' code.description = 'Mock VASP for tests' code.set_remote_computer_exec((localhost, mock_vasp_path)) code.set_input_plugin_name('vasp.vasp') aiidapath = py_path.local( fresh_aiida_env._manager.root_dir).join('.aiida') code.set_prepend_text('export AIIDA_PATH={}'.format(aiidapath)) return code
def _get_query_builder_uuid_identifier(cls, identifier, classes, query_with_dashes): """ Return the query builder instance that attempts to map the identifier onto an entity of the orm class, defined for this loader class, interpreting the identifier as a UUID like identifier :param identifier: the UUID identifier :param classes: a tuple of orm classes to which the identifier should be mapped :returns: the query builder instance """ from uuid import UUID uuid = identifier.replace('-', '') if query_with_dashes: for dash_pos in [20, 16, 12, 8]: if len(uuid) > dash_pos: uuid = f'{uuid[:dash_pos]}-{uuid[dash_pos:]}' builder = QueryBuilder() builder.append(cls=classes, tag='entity', project=['*']) # If a UUID can be constructed from the identifier, it is a full UUID and the query can use an equality operator try: UUID(uuid) except ValueError: builder.add_filter('entity', {'uuid': {'like': f'{uuid}%'}}) else: builder.add_filter('entity', {'uuid': uuid}) return builder
def _rehash_cmd(all, class_name, pks): try_load_dbenv() from aiida.orm.querybuilder import QueryBuilder # Get the Node class to match try: node_class = load_class(class_name) except ClassNotFoundException: click.echo("Could not load class '{}'.\nAborted!".format(class_name)) sys.exit(1) # Add the filters for the class and PKs. qb = QueryBuilder() qb.append(node_class, tag='node') if pks: qb.add_filter('node', {'id': {'in': pks}}) else: if not all: click.echo( "Nothing specified, nothing re-hashed.\nExplicitly specify the PK of the nodes, or use '--all'." ) return if not qb.count(): click.echo('No matching nodes found.') return for i, (node, ) in enumerate(qb.iterall()): if i % 100 == 0: click.echo('.', nl=False) node.rehash() click.echo('\nAll done! {} node(s) re-hashed.'.format(i + 1))
def listfamilies(element, with_description): """ Print on screen the list of installed PSF-pseudo families. """ from aiida import is_dbenv_loaded, load_dbenv if not is_dbenv_loaded(): load_dbenv() from aiida.orm import DataFactory from aiida_siesta.data.psf import PSFGROUP_TYPE PsfData = DataFactory('siesta.psf') from aiida.orm.querybuilder import QueryBuilder from aiida.orm.group import Group qb = QueryBuilder() qb.append(PsfData, tag='psfdata') if element: qb.add_filter(PsfData, {'attributes.element': {'in': element}}) qb.append( Group, group_of='psfdata', tag='group', project=["name", "description"], filters={ "type": { '==': PSFGROUP_TYPE } }) qb.distinct() if qb.count() > 0: for res in qb.dict(): group_name = res.get("group").get("name") group_desc = res.get("group").get("description") qb = QueryBuilder() qb.append( Group, tag='thisgroup', filters={ "name": { 'like': group_name } }) qb.append(PsfData, project=["id"], member_of='thisgroup') if with_description: description_string = ": {}".format(group_desc) else: description_string = "" click.echo("* {} [{} pseudos]{}".format(group_name, qb.count(), description_string)) else: click.echo("No valid PSF pseudopotential family found.", err=True)
def get(cls, element, name=None, version="latest", match_aliases=True, group_label=None, n_el=None): from aiida.orm.querybuilder import QueryBuilder query = QueryBuilder() params = {} if group_label: query.append(Group, filters={"label": group_label}, tag="group") params["with_group"] = "group" query.append(BasisSet, **params) filters = {"attributes.element": {"==": element}} if version != "latest": filters["attributes.version"] = {"==": version} if name: if match_aliases: filters["attributes.aliases"] = {"contains": [name]} else: filters["attributes.name"] = {"==": name} if n_el: filters["attributes.n_el"] = {"==": n_el} query.add_filter(BasisSet, filters) # SQLA ORM only solution: # query.order_by({BasisSet: [{"attributes.version": {"cast": "i", "order": "desc"}}]}) # items = query.first() items = sorted(query.iterall(), key=lambda b: b[0].version, reverse=True) if not items: raise NotExistent( f"No Gaussian Basis Set found for element={element}, name={name}, version={version}" ) # if we get different names there is no well ordering, sorting by version only works if they have the same name if len(set(b[0].name for b in items)) > 1: raise MultipleObjectsError( f"Multiple Gaussian Basis Set found for element={element}, name={name}, version={version}" ) return items[0][0]
def _get_query_builder_id_identifier(cls, identifier, classes): """ Return the query builder instance that attempts to map the identifier onto an entity of the orm class, defined for this loader class, interpreting the identifier as an ID like identifier :param identifier: the ID identifier :param classes: a tuple of orm classes to which the identifier should be mapped :returns: the query builder instance """ builder = QueryBuilder() builder.append(cls=classes, tag='entity', project=['*']) builder.add_filter('entity', {'id': identifier}) return builder
def listfamilies(elements, with_description): """ Print on screen the list of upf families installed """ from aiida.orm import DataFactory from aiida.orm.data.upf import UPFGROUP_TYPE # pylint: disable=invalid-name UpfData = DataFactory('upf') from aiida.orm.querybuilder import QueryBuilder from aiida.orm.group import Group qb = QueryBuilder() qb.append(UpfData, tag='upfdata') if elements is not None: qb.add_filter(UpfData, {'attributes.element': {'in': elements}}) qb.append(Group, group_of='upfdata', tag='group', project=["name", "description"], filters={"type": { '==': UPFGROUP_TYPE }}) qb.distinct() if qb.count() > 0: for res in qb.dict(): group_name = res.get("group").get("name") group_desc = res.get("group").get("description") qb = QueryBuilder() qb.append(Group, tag='thisgroup', filters={"name": { 'like': group_name }}) qb.append(UpfData, project=["id"], member_of='thisgroup') if with_description: description_string = ": {}".format(group_desc) else: description_string = "" echo.echo_success("* {} [{} pseudos]{}".format( group_name, qb.count(), description_string)) else: echo.echo_warning("No valid UPF pseudopotential family found.")
def load_paw(cls, **kwargs): """ py:method:: load_paw([family=None][, element=None][, symbol=None]) Load PawData nodes from the databank. Use kwargs to filter. :return: a list of PawData instances :rtype: list :key str family: Filter by family :key str element: Filter by chemical symbol :key str symbol: Filter by PAW symbol (example: As vs. As_d) :raises ValueError: if no PAWs are found """ usage_msg = 'use import_family or from_folder to import PAWs' error_msg = 'no PAWs found for the given kwargs!\n' + usage_msg group = kwargs.pop('group', None) family = kwargs.pop('family', None) silent = kwargs.pop('silent', None) if not (group or family): query_builder = QueryBuilder() query_builder.append(cls, tag='paw') filters = {} for key, value in kwargs.iteritems(): filters['attributes.{}'.format(key)] = {'==': value} query_builder.add_filter('paw', filters) res = [i[0] for i in query_builder.all()] else: if family: group, created = cls.get_or_create_famgroup(family) elif group: created = not group.is_stored # pylint: disable=protected-access try: paw_filter = cls._node_filter(**kwargs) res = filter(paw_filter, group.nodes) except ValueError as err: if silent: res = [] elif created: raise NotExistent('No family with that name exists') else: raise err if not res and not silent: raise ValueError(error_msg) return res
def localhost(aiida_env, localhost_dir): """Fixture for a local computer called localhost""" from aiida.orm import Computer from aiida.orm.querybuilder import QueryBuilder query_builder = QueryBuilder() query_builder.append(Computer, tag='comp') query_builder.add_filter('comp', {'name': {'==': 'localhost'}}) query_results = query_builder.all() if query_results: computer = query_results[0][0] else: computer = Computer(name='localhost', description='description', hostname='localhost', workdir=localhost_dir.strpath, transport_type='local', scheduler_type='direct', enabled_state=True) return computer
def list_pseudo(sym, name, tags): """ List Gaussian Pseudopotentials """ from aiida.orm.querybuilder import QueryBuilder from aiida_gaussian_datatypes.pseudopotential.data import Pseudopotential query = QueryBuilder() query.append(Pseudopotential) if sym: query.add_filter(Pseudopotential, {'attributes.element': {'==': sym}}) if name: query.add_filter(Pseudopotential, {'attributes.aliases': { 'contains': [name] }}) if tags: query.add_filter(Pseudopotential, {'attributes.tags': { 'contains': tags }}) if not query.count(): echo.echo("No Gaussian Pseudopotentials found.") return echo.echo_report("{} Gaussian Pseudopotentials found:\n".format( query.count())) echo.echo(_formatted_table_list(pseudo for [pseudo] in query.iterall())) echo.echo("")
def list_basisset(sym, name, tags): """ List Gaussian Basis Sets """ from aiida.orm.querybuilder import QueryBuilder from aiida_gaussian_datatypes.basisset.data import BasisSet query = QueryBuilder() query.append(BasisSet) if sym: query.add_filter(BasisSet, {'attributes.element': {'==': sym}}) if name: query.add_filter(BasisSet, {'attributes.aliases': { 'contains': [name] }}) if tags: query.add_filter(BasisSet, {'attributes.tags': {'contains': tags}}) if not query.count(): echo.echo("No Gaussian Basis Sets found.") return echo.echo_report("{} Gaussian Basis Sets found:\n".format(query.count())) echo.echo(_formatted_table_list(bs for [bs] in query.iterall())) echo.echo("")
def dump_pseudo(sym, name, tags, output_format, data): """ Print specified Pseudopotentials """ from aiida.orm.querybuilder import QueryBuilder from aiida_gaussian_datatypes.pseudopotential.data import Pseudopotential writers = { "cp2k": Pseudopotential.to_cp2k, } if data: # if explicit nodes where given the only thing left is to make sure no filters are present if sym or name or tags: raise click.UsageError( "can not specify node IDs and filters at the same time") else: query = QueryBuilder() query.append(Pseudopotential, project=["*"]) if sym: query.add_filter(Pseudopotential, {"attributes.element": { "==": sym }}) if name: query.add_filter(Pseudopotential, {"attributes.aliases": { "contains": [name] }}) if tags: query.add_filter(Pseudopotential, {"attributes.tags": { "contains": tags }}) if not query.count(): echo.echo_warning("No Gaussian Pseudopotential found.", err=echo.is_stdout_redirected()) return data = [pseudo for pseudo, in query.iterall() ] # query always returns a tuple, unpack it here for pseudo in data: if echo.is_stdout_redirected(): echo.echo_report("Dumping {}/{} ({})...".format( pseudo.name, pseudo.element, pseudo.uuid), err=True) writers[output_format](pseudo, sys.stdout)
def dump_basisset(sym, name, tags, output_format, data): """ Print specified Basis Sets """ from aiida.orm.querybuilder import QueryBuilder from aiida_gaussian_datatypes.basisset.data import BasisSet writers = { "cp2k": BasisSet.to_cp2k, } if data: # if explicit nodes where given the only thing left is to make sure no filters are present if sym or name or tags: raise click.UsageError( "can not specify node IDs and filters at the same time") else: query = QueryBuilder() query.append(BasisSet, project=['*']) if sym: query.add_filter(BasisSet, {'attributes.element': {'==': sym}}) if name: query.add_filter(BasisSet, {'attributes.aliases': { 'contains': [name] }}) if tags: query.add_filter(BasisSet, {'attributes.tags': {'contains': tags}}) if not query.count(): echo.echo_warning("No Gaussian Basis Sets found.", err=echo.is_stdout_redirected()) return data = [bset for bset, in query.iterall() ] # query always returns a tuple, unpack it here for bset in data: if echo.is_stdout_redirected(): echo.echo_report("Dumping {}/{} ({})...".format( bset.name, bset.element, bset.uuid), err=True) writers[output_format](bset, sys.stdout)
def test_simple_query_django_2(self): from aiida.orm.querybuilder import QueryBuilder from aiida.orm import Node from datetime import datetime from aiida.backends.querybuild.dummy_model import ( DbNode, DbLink, DbAttribute, session ) n0 = DbNode( label='hello', type='', description='', user_id=1, ) n1 = DbNode( label='foo', type='', description='I am FoO', user_id=2, ) n2 = DbNode( label='bar', type='', description='I am BaR', user_id=3, ) DbAttribute( key='foo', datatype='txt', tval='bar', dbnode=n0 ) l1 = DbLink(input=n0, output=n1, label='random_1', type='') l2 = DbLink(input=n1, output=n2, label='random_2', type='') session.add_all([n0, n1, n2, l1, l2]) qb1 = QueryBuilder() qb1.append( DbNode, filters={ 'label': 'hello', } ) self.assertEqual(len(list(qb1.all())), 1) qh = { 'path': [ { 'cls': Node, 'tag': 'n1' }, { 'cls': Node, 'tag': 'n2', 'output_of': 'n1' } ], 'filters': { 'n1': { 'label': {'ilike': '%foO%'}, }, 'n2': { 'label': {'ilike': 'bar%'}, } }, 'project': { 'n1': ['id', 'uuid', 'ctime', 'label'], 'n2': ['id', 'description', 'label'], } } qb2 = QueryBuilder(**qh) resdict = qb2.dict() self.assertEqual(len(resdict), 1) resdict = resdict[0] self.assertTrue(isinstance(resdict['n1']['ctime'], datetime)) self.assertEqual(resdict['n2']['label'], 'bar') qh = { 'path': [ { 'cls': Node, 'label': 'n1' }, { 'cls': Node, 'label': 'n2', 'output_of': 'n1' } ], 'filters': { 'n1--n2': {'label': {'like': '%_2'}} } } qb = QueryBuilder(**qh) self.assertEqual(qb.count(), 1) # Test the hashing: query1 = qb.get_query() qb.add_filter('n2', {'label': 'nonexistentlabel'}) self.assertEqual(qb.count(), 0) query2 = qb.get_query() query3 = qb.get_query() self.assertTrue(id(query1) != id(query2)) self.assertTrue(id(query2) == id(query3))
def load_node(node_id=None, pk=None, uuid=None, parent_class=None, query_with_dashes=True): """ Return an AiiDA node given PK or UUID. :param node_id: PK (integer) or UUID (string) or a node :param pk: PK of a node :param uuid: UUID of a node, or the beginning of the uuid :param parent_class: if specified, checks whether the node loaded is a subclass of parent_class :param bool query_with_dashes: Specific if uuid is passed, allows to put the uuid in the correct form. Default=True :return: an AiiDA node :raise InputValidationError: if none or more than one of parameters is supplied :raise TypeError: I the wrong types are provided :raise NotExistent: if no matching Node is found. :raise MultipleObjectsError: If more than one Node was fouuund """ from aiida.common.exceptions import NotExistent, MultipleObjectsError, NotExistent, InputValidationError # This must be done inside here, because at import time the profile # must have been already loaded. If you put it at the module level, # the implementation is frozen to the default one at import time. from aiida.orm.implementation import Node from aiida.orm.querybuilder import QueryBuilder # First checking if the inputs are valid: inputs_provided = [val is not None for val in (node_id, pk, uuid)].count(True) if inputs_provided == 0: raise InputValidationError( "one of the parameters 'node_id', 'pk' and 'uuid' " "has to be supplied") elif inputs_provided > 1: raise InputValidationError( "only one of parameters 'node_id', 'pk' and 'uuid' " "has to be supplied") class_ = parent_class or Node if not issubclass(class_, Node): raise TypeError("{} is not a subclass of {}".format(class_, Node)) # The logic is as follows: If pk is specified I will look for the pk # if UUID is specified for the uuid. # node_id can either be string -> uuid or an integer -> pk # Checking first if node_id specified if node_id is not None: if isinstance(node_id, (str, unicode)): uuid = node_id elif isinstance(node_id, int): pk = node_id else: raise TypeError("'node_id' has to be either string, unicode or " "integer, {} given".format(type(node_id))) #I am checking whether uuid, if supplied, is a string if uuid is not None: if not isinstance(uuid, (str, unicode)): raise TypeError("'uuid' has to be string or unicode") # or whether the pk, if provided, is an integer elif pk is not None: if not isinstance(pk, int): raise TypeError("'pk' has to be an integer") else: # I really shouldn't get here assert True, "Neither pk nor uuid was provided" qb = QueryBuilder() qb.append(class_, tag='node', project='*') if pk: qb.add_filter('node', {'id': pk}) elif uuid: # Put back dashes in the right place start_uuid = uuid.replace('-', '') if query_with_dashes: # Essential that this is ordered from largest to smallest! for dash_pos in [20, 16, 12, 8]: if len(start_uuid) > dash_pos: start_uuid = "{}-{}".format(start_uuid[:dash_pos], start_uuid[dash_pos:]) qb.add_filter('node', {'uuid': {'like': "{}%".format(start_uuid)}}) try: return qb.one()[0] except MultipleObjectsError: raise MultipleObjectsError("More than one node found") except NotExistent: raise NotExistent("No node was found")
class UpdateRule(Operation): def __init__(self, querybuilder, mode=MODES.APPEND, max_iterations=1, track_edges=False, track_visits=True): def get_spec_from_path(queryhelp, idx): if (queryhelp['path'][idx]['type'].startswith('node') or queryhelp['path'][idx]['type'].startswith('data') or queryhelp['path'][idx]['type'] == ''): return 'nodes' elif queryhelp['path'][idx]['type'] == 'group': return 'groups' else: raise Exception("not understood entity from ( {} )".format( queryhelp['path'][0]['type'])) queryhelp = querybuilder.get_json_compatible_queryhelp() for pathspec in queryhelp['path']: if not pathspec['type']: pathspec['type'] = 'node.Node.' self._querybuilder = QueryBuilder(**queryhelp) queryhelp = self._querybuilder.get_json_compatible_queryhelp() self._first_tag = queryhelp['path'][0]['tag'] self._last_tag = queryhelp['path'][-1]['tag'] self._entity_from = get_spec_from_path(queryhelp, 0) self._entity_to = get_spec_from_path(queryhelp, -1) super(UpdateRule, self).__init__(mode, max_iterations, track_edges=track_edges, track_visits=track_visits) def _init_run(self, entity_set): # Removing all other projections in the QueryBuilder instance: for tag in self._querybuilder._projections.keys(): self._querybuilder._projections[tag] = [] # priming querybuilder to add projection on the key I need: self._querybuilder.add_projection( self._last_tag, entity_set[self._entity_to].identifier) self._entity_to_identifier = entity_set[self._entity_to].identifier if self._track_edges: self._querybuilder.add_projection( self._first_tag, entity_set[self._entity_to].identifier) edge_set = entity_set._dict['{}_{}'.format(self._entity_from, self._entity_to)] self._edge_label = '{}--{}'.format(self._first_tag, self._last_tag) self._edge_keys = tuple( [(self._first_tag, entity_set[self._entity_from].identifier), (self._last_tag, entity_set[self._entity_to].identifier)] + [(self._edge_label, identifier) for identifier in edge_set._additional_identifiers]) try: self._querybuilder.add_projection( self._edge_label, edge_set._additional_identifiers) except InputValidationError as e: raise KeyError( "The key for the edge is invalid.\n" "Are the entities really connected, or have you overwritten the edge-tag?" ) def _load_results(self, target_set, operational_set): """ :param target_set: The set to load the results into :param operational_set: Where the results originate from (walkers) """ # I check that I have primary keys primkeys = operational_set[self._entity_from].get_keys() # Empty the target set, so that only these results are inside target_set.empty() if primkeys: self._querybuilder.add_filter(self._first_tag, { operational_set[self._entity_from].identifier: { 'in': primkeys } }) qres = self._querybuilder.dict() # These are the new results returned by the query target_set[self._entity_to].add_entities([ item[self._last_tag][self._entity_to_identifier] for item in qres ]) if self._track_edges: target_set['{}_{}'.format( self._entity_to, self._entity_to)].add_entities([ tuple(item[key1][key2] for (key1, key2) in self._edge_keys) for item in qres ])
def create_node_id_qb(node_id=None, pk=None, uuid=None, parent_class=None, query_with_dashes=True): """ Returns the QueryBuilder instance set to retrieve AiiDA objects given their (parent)class and PK (in which case the object should be unique) or UUID or UUID starting pattern. :param node_id: PK (integer) or UUID (string) or a node :param pk: PK of a node :param uuid: UUID of a node, or the beginning of the uuid :param parent_class: if specified, looks only among objects that are instances of a subclass of parent_class, otherwise among nodes :param bool query_with_dashes: Specific if uuid is passed, allows to put the uuid in the correct form. Default=True :return: a QueryBuilder instance """ from aiida.common.exceptions import InputValidationError # This must be done inside here, because at import time the profile # must have been already loaded. If you put it at the module level, # the implementation is frozen to the default one at import time. from aiida.orm.implementation import Node from aiida.orm.querybuilder import QueryBuilder # First checking if the inputs are valid: inputs_provided = [val is not None for val in (node_id, pk, uuid)].count(True) if inputs_provided == 0: raise InputValidationError( "one of the parameters 'node_id', 'pk' and 'uuid' " "has to be supplied") elif inputs_provided > 1: raise InputValidationError( "only one of parameters 'node_id', 'pk' and 'uuid' " "has to be supplied") # In principle, I can use this function to fetch any kind of AiiDA object, # but if I don't specify anything, I assume that I am looking for nodes class_ = parent_class or Node # The logic is as follows: If pk is specified I will look for the pk # if UUID is specified for the uuid. # node_id can either be string -> uuid or an integer -> pk # Checking first if node_id specified if node_id is not None: if isinstance(node_id, (str, unicode)): uuid = node_id elif isinstance(node_id, int): pk = node_id else: raise TypeError("'node_id' has to be either string, unicode or " "integer, {} given".format(type(node_id))) #I am checking whether uuid, if supplied, is a string if uuid is not None: if not isinstance(uuid, (str, unicode)): raise TypeError("'uuid' has to be string or unicode") # or whether the pk, if provided, is an integer elif pk is not None: if not isinstance(pk, int): raise TypeError("'pk' has to be an integer") else: # I really shouldn't get here assert True, "Neither pk nor uuid was provided" qb = QueryBuilder() qb.append(class_, tag='node') if pk: qb.add_filter('node', {'id': pk}) elif uuid: # Put back dashes in the right place start_uuid = uuid.replace('-', '') #TODO (only if it brings any speed advantage) add a check on the number of characters to recognize if the uuid pattern is complete. If so, the filter operator can be # '==' if query_with_dashes: # Essential that this is ordered from largest to smallest! for dash_pos in [20, 16, 12, 8]: if len(start_uuid) > dash_pos: start_uuid = "{}-{}".format(start_uuid[:dash_pos], start_uuid[dash_pos:]) qb.add_filter('node', {'uuid': {'like': "{}%".format(start_uuid)}}) return qb
def test_query_path(self): from aiida.orm.querybuilder import QueryBuilder from aiida.orm import Node n1 = Node() n1.label='n1' n1.store() n2 = Node() n2.label='n2' n2.store() n3 = Node() n3.label='n3' n3.store() n4 = Node() n4.label='n4' n4.store() n5 = Node() n5.label='n5' n5.store() n6 = Node() n6.label='n6' n6.store() n7 = Node() n7.label='n7' n7.store() n8 = Node() n8.label='n8' n8.store() n9 = Node() n9.label='n9' n9.store() # I create a strange graph, inserting links in a order # such that I often have to create the transitive closure # between two graphs n3.add_link_from(n2) n2.add_link_from(n1) n5.add_link_from(n3) n5.add_link_from(n4) n4.add_link_from(n2) n7.add_link_from(n6) n8.add_link_from(n7) for with_dbpath in (True, False): # Yet, no links from 1 to 8 self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n1.pk}, tag='anc' ).append(Node, descendant_of='anc', filters={'id':n8.pk} ).count(), 0) self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n8.pk}, tag='desc' ).append(Node, ancestor_of='desc', filters={'id':n1.pk} ).count(), 0) n6.add_link_from(n5) # Yet, now 2 links from 1 to 8 for with_dbpath in (True, False): self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n1.pk}, tag='anc' ).append(Node, descendant_of='anc', filters={'id':n8.pk} ).count(), 2 ) self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n8.pk}, tag='desc' ).append(Node, ancestor_of='desc', filters={'id':n1.pk} ).count(), 2) qb = QueryBuilder(with_dbpath=False,expand_path=True).append( Node, filters={'id':n8.pk}, tag='desc', ).append(Node, ancestor_of='desc', edge_project='path', filters={'id':n1.pk}) queried_path_set = set([frozenset(p) for p, in qb.all()]) paths_there_should_be = set([ frozenset([n1.pk, n2.pk, n3.pk, n5.pk, n6.pk, n7.pk, n8.pk]), frozenset([n1.pk, n2.pk, n4.pk, n5.pk, n6.pk, n7.pk, n8.pk]) ]) self.assertTrue(queried_path_set == paths_there_should_be) qb = QueryBuilder(with_dbpath=False, expand_path=True).append( Node, filters={'id':n1.pk}, tag='anc' ).append( Node, descendant_of='anc', filters={'id':n8.pk}, edge_project='path' ) self.assertTrue(set( [frozenset(p) for p, in qb.all()] ) == set( [frozenset([n1.pk, n2.pk, n3.pk, n5.pk, n6.pk, n7.pk, n8.pk]), frozenset([n1.pk, n2.pk, n4.pk, n5.pk, n6.pk, n7.pk, n8.pk])] )) n7.add_link_from(n9) # Still two links... for with_dbpath in (True, False): self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n1.pk}, tag='anc' ).append(Node, descendant_of='anc', filters={'id':n8.pk} ).count(), 2 ) self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n8.pk}, tag='desc' ).append(Node, ancestor_of='desc', filters={'id':n1.pk} ).count(), 2) n9.add_link_from(n6) # And now there should be 4 nodes for with_dbpath in (True, False): self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n1.pk}, tag='anc' ).append(Node, descendant_of='anc', filters={'id':n8.pk} ).count(), 4) self.assertEquals( QueryBuilder(with_dbpath=with_dbpath).append( Node, filters={'id':n8.pk}, tag='desc' ).append(Node, ancestor_of='desc', filters={'id':n1.pk} ).count(), 4) for with_dbpath in (True, False): qb = QueryBuilder(with_dbpath=True).append( Node, filters={'id':n1.pk}, tag='anc' ).append( Node, descendant_of='anc', filters={'id':n8.pk}, edge_tag='edge' ) qb.add_projection('edge', 'depth') self.assertTrue(set(zip(*qb.all())[0]), set([5,6])) qb.add_filter('edge', {'depth':6}) self.assertTrue(set(zip(*qb.all())[0]), set([6]))
def test_simple_query_2(self): from aiida.orm.querybuilder import QueryBuilder from aiida.orm import Node from datetime import datetime from aiida.common.exceptions import MultipleObjectsError, NotExistent n0 = Node() n0.label = 'hello' n0.description='' n0._set_attr('foo', 'bar') n1 = Node() n1.label='foo' n1.description='I am FoO' n2 = Node() n2.label='bar' n2.description='I am BaR' n2.add_link_from(n1, label='random_2') n1.add_link_from(n0, label='random_1') for n in (n0, n1, n2): n.store() qb1 = QueryBuilder() qb1.append(Node, filters={'label': 'hello'}) self.assertEqual(len(list(qb1.all())), 1) qh = { 'path': [ { 'cls': Node, 'tag': 'n1' }, { 'cls': Node, 'tag': 'n2', 'output_of': 'n1' } ], 'filters': { 'n1': { 'label': {'ilike': '%foO%'}, }, 'n2': { 'label': {'ilike': 'bar%'}, } }, 'project': { 'n1': ['id', 'uuid', 'ctime', 'label'], 'n2': ['id', 'description', 'label'], } } qb2 = QueryBuilder(**qh) resdict = qb2.dict() self.assertEqual(len(resdict), 1) self.assertTrue(isinstance(resdict[0]['n1']['ctime'], datetime)) res_one = qb2.one() self.assertTrue('bar' in res_one) qh = { 'path': [ { 'cls': Node, 'tag': 'n1' }, { 'cls': Node, 'tag': 'n2', 'output_of': 'n1' } ], 'filters': { 'n1--n2': {'label': {'like': '%_2'}} } } qb = QueryBuilder(**qh) self.assertEqual(qb.count(), 1) # Test the hashing: query1 = qb.get_query() qb.add_filter('n2', {'label': 'nonexistentlabel'}) self.assertEqual(qb.count(), 0) with self.assertRaises(NotExistent): qb.one() with self.assertRaises(MultipleObjectsError): QueryBuilder().append(Node).one() query2 = qb.get_query() query3 = qb.get_query() self.assertTrue(id(query1) != id(query2)) self.assertTrue(id(query2) == id(query3))
def get(cls, element, name=None, version="latest", match_aliases=True, group_label=None, n_el=None): """ Get the first matching Pseudopotential for the given parameters. :param element: The atomic symbol :param name: The name of the pseudo :param version: A specific version (if more than one in the database and not the highest/latest) :param match_aliases: Whether to look in the list of of aliases for a matching name """ from aiida.orm.querybuilder import QueryBuilder query = QueryBuilder() params = {} if group_label: query.append(Group, filters={"label": group_label}, tag="group") params["with_group"] = "group" query.append(Pseudopotential, **params) filters = {"attributes.element": {"==": element}} if version != "latest": filters["attributes.version"] = {"==": version} if name: if match_aliases: filters["attributes.aliases"] = {"contains": [name]} else: filters["attributes.name"] = {"==": name} query.add_filter(Pseudopotential, filters) # SQLA ORM only solution: # query.order_by({Pseudopotential: [{"attributes.version": {"cast": "i", "order": "desc"}}]}) # items = query.first() all_iter = query.iterall() if n_el: all_iter = filter(lambda p: sum(p[0].n_el) == n_el, all_iter) items = sorted(all_iter, key=lambda p: p[0].version, reverse=True) if not items: raise NotExistent( f"No Gaussian Pseudopotential found for element={element}, name={name}, version={version}" ) # if we get different names there is no well ordering, sorting by version only works if they have the same name if len(set(p[0].name for p in items)) > 1: raise MultipleObjectsError( f"Multiple Gaussian Pseudopotentials found for element={element}, name={name}, version={version}" ) return items[0][0]
def test_query_path(self): from aiida.orm.querybuilder import QueryBuilder from aiida.orm import Node from aiida.common.links import LinkType from aiida.backends.utils import QueryFactory q = QueryFactory()() n1 = Node() n1.label = 'n1' n1.store() n2 = Node() n2.label = 'n2' n2.store() n3 = Node() n3.label = 'n3' n3.store() n4 = Node() n4.label = 'n4' n4.store() n5 = Node() n5.label = 'n5' n5.store() n6 = Node() n6.label = 'n6' n6.store() n7 = Node() n7.label = 'n7' n7.store() n8 = Node() n8.label = 'n8' n8.store() n9 = Node() n9.label = 'n9' n9.store() # I create a strange graph, inserting links in a order # such that I often have to create the transitive closure # between two graphs # I set everything as an INPUT-links now, because the QueryBuilder path query or # our custom queries don't follow other links than CREATE or INPUT n3.add_link_from(n2, link_type=LinkType.INPUT) n2.add_link_from(n1, link_type=LinkType.INPUT) n5.add_link_from(n3, link_type=LinkType.INPUT) n5.add_link_from(n4, link_type=LinkType.INPUT) n4.add_link_from(n2, link_type=LinkType.INPUT) n7.add_link_from(n6, link_type=LinkType.INPUT) n8.add_link_from(n7, link_type=LinkType.INPUT) # There are no parents to n9, checking that self.assertEqual(set([]), set(q.get_all_parents([n9.pk]))) # There is one parent to n6 self.assertEqual(set([(_, ) for _ in (n6.pk, )]), set([tuple(_) for _ in q.get_all_parents([n7.pk])])) # There are several parents to n4 self.assertEqual(set([(_.pk, ) for _ in (n1, n2)]), set([tuple(_) for _ in q.get_all_parents([n4.pk])])) # There are several parents to n5 self.assertEqual(set([(_.pk, ) for _ in (n1, n2, n3, n4)]), set([tuple(_) for _ in q.get_all_parents([n5.pk])])) # Yet, no links from 1 to 8 self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n1.pk }, tag='anc').append(Node, descendant_of='anc', filters={ 'id': n8.pk }).count(), 0) self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n8.pk }, tag='desc').append(Node, ancestor_of='desc', filters={ 'id': n1.pk }).count(), 0) n6.add_link_from(n5, link_type=LinkType.INPUT) # Yet, now 2 links from 1 to 8 self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n1.pk }, tag='anc').append(Node, descendant_of='anc', filters={ 'id': n8.pk }).count(), 2) self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n8.pk }, tag='desc').append(Node, ancestor_of='desc', filters={ 'id': n1.pk }).count(), 2) self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n8.pk }, tag='desc').append( Node, ancestor_of='desc', filters={ 'id': n1.pk }, edge_filters={ 'depth': { '<': 6 } }, ).count(), 2) self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n8.pk }, tag='desc').append( Node, ancestor_of='desc', filters={ 'id': n1.pk }, edge_filters={ 'depth': 5 }, ).count(), 2) self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n8.pk }, tag='desc').append( Node, ancestor_of='desc', filters={ 'id': n1.pk }, edge_filters={ 'depth': { '<': 5 } }, ).count(), 0) # TODO write a query that can filter certain paths by traversed ID qb = QueryBuilder().append( Node, filters={ 'id': n8.pk }, tag='desc', ).append(Node, ancestor_of='desc', edge_project='path', filters={'id': n1.pk}) queried_path_set = set([frozenset(p) for p, in qb.all()]) paths_there_should_be = set([ frozenset([n1.pk, n2.pk, n3.pk, n5.pk, n6.pk, n7.pk, n8.pk]), frozenset([n1.pk, n2.pk, n4.pk, n5.pk, n6.pk, n7.pk, n8.pk]) ]) self.assertTrue(queried_path_set == paths_there_should_be) qb = QueryBuilder().append(Node, filters={ 'id': n1.pk }, tag='anc').append(Node, descendant_of='anc', filters={'id': n8.pk}, edge_project='path') self.assertTrue( set([frozenset(p) for p, in qb.all()]) == set([ frozenset([n1.pk, n2.pk, n3.pk, n5.pk, n6.pk, n7.pk, n8.pk]), frozenset([n1.pk, n2.pk, n4.pk, n5.pk, n6.pk, n7.pk, n8.pk]) ])) n7.add_link_from(n9, link_type=LinkType.INPUT) # Still two links... self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n1.pk }, tag='anc').append(Node, descendant_of='anc', filters={ 'id': n8.pk }).count(), 2) self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n8.pk }, tag='desc').append(Node, ancestor_of='desc', filters={ 'id': n1.pk }).count(), 2) n9.add_link_from(n6, link_type=LinkType.INPUT) # And now there should be 4 nodes self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n1.pk }, tag='anc').append(Node, descendant_of='anc', filters={ 'id': n8.pk }).count(), 4) self.assertEquals( QueryBuilder().append(Node, filters={ 'id': n8.pk }, tag='desc').append(Node, ancestor_of='desc', filters={ 'id': n1.pk }).count(), 4) qb = QueryBuilder().append(Node, filters={ 'id': n1.pk }, tag='anc').append(Node, descendant_of='anc', filters={'id': n8.pk}, edge_tag='edge') qb.add_projection('edge', 'depth') self.assertTrue(set(zip(*qb.all())[0]), set([5, 6])) qb.add_filter('edge', {'depth': 6}) self.assertTrue(set(zip(*qb.all())[0]), set([6]))