def _set_shear(self): """ Set ShearWorkChain data. """ qb = QueryBuilder() qb.append(Node, filters={'id': {'==': self._pk}}, tag='wf') qb.append(CalcFunctionNode, filters={'label': { '==': 'get_shear_structures' }}, with_incoming='wf', project=['id']) create_shears_pk = qb.all()[0][0] qb = QueryBuilder() qb.append(Node, filters={'id': {'==': create_shears_pk}}, tag='cs') qb.append(StructureData, with_incoming='cs', project=['id', 'label']) structs = qb.all() orig_cell_pks = [ struct[0] for struct in structs if 'shear_orig' in struct[1] ] orig_cell_pks.sort(key=lambda x: x) self._create_shears_pk = create_shears_pk self._structure_pks = {} self._structure_pks['shear_original_pks'] = orig_cell_pks self._structure_pks['hexagonal_pk'] = self._node.inputs.structure self._cells = {} self._cells['hexagonal'] = \ get_cell_from_aiida(self._node.inputs.structure) self._cells['shear_original'] = \ [ get_cell_from_aiida(load_node(pk)) for pk in orig_cell_pks ]
def test_log_querybuilder(self): """ Test querying for logs by joining on nodes in the QueryBuilder """ from aiida.orm import QueryBuilder # Setup nodes log_1, calc = self.create_log() log_2 = Log(now(), 'loggername', logging.getLevelName(LOG_LEVEL_REPORT), calc.id, 'log message #2') log_3 = Log(now(), 'loggername', logging.getLevelName(LOG_LEVEL_REPORT), calc.id, 'log message #3') # Retrieve a node by joining on a specific log ('log_1') builder = QueryBuilder() builder.append(Log, tag='log', filters={'id': log_2.id}) builder.append(orm.CalculationNode, with_log='log', project=['uuid']) nodes = builder.all() self.assertEqual(len(nodes), 1) for node in nodes: self.assertIn(str(node[0]), [calc.uuid]) # Retrieve all logs for a specific node by joining on a said node builder = QueryBuilder() builder.append(orm.CalculationNode, tag='calc', filters={'id': calc.id}) builder.append(Log, with_node='calc', project=['uuid']) logs = builder.all() self.assertEqual(len(logs), 3) for log in logs: self.assertIn(str(log[0]), [str(log_1.uuid), str(log_2.uuid), str(log_3.uuid)])
def test_get_or_create_invalid_prefix(self): """Test the ``get_or_create_group`` method of ``Autogroup`` when there is already a group with the same prefix, but followed by other non-underscore characters.""" label_prefix = 'new_test_prefix_TestAutogroup' # I create a group with the same prefix, but followed by non-underscore # characters. These should be ignored in the logic. AutoGroup(label=f'{label_prefix}xx').store() # Check that there are no groups to begin with queryb = QueryBuilder().append(AutoGroup, filters={'label': label_prefix}) assert not list(queryb.all()) queryb = QueryBuilder().append(AutoGroup, filters={'label': {'like': r'{}\_%'.format(label_prefix)}}) assert not list(queryb.all()) # First group (no existing one) autogroup = Autogroup() autogroup.set_group_label_prefix(label_prefix) group = autogroup.get_or_create_group() expected_label = label_prefix self.assertEqual( group.label, expected_label, f"The auto-group should be labelled '{expected_label}', it is instead '{group.label}'" ) # Second group (only one with no suffix existing) autogroup = Autogroup() autogroup.set_group_label_prefix(label_prefix) group = autogroup.get_or_create_group() expected_label = f'{label_prefix}_1' self.assertEqual( group.label, expected_label, f"The auto-group should be labelled '{expected_label}', it is instead '{group.label}'" )
def test_ordering_limits_offsets_sqla(self): """Test ordering limits offsets of SQLA query results.""" # Creating 10 nodes with an attribute that can be ordered for i in range(10): node = Data() node.set_attribute('foo', i) node.store() q_b = QueryBuilder().append(Node, project='attributes.foo').order_by( {Node: { 'attributes.foo': { 'cast': 'i' } }}) res = next(zip(*q_b.all())) self.assertEqual(res, tuple(range(10))) # Now applying an offset: q_b.offset(5) res = next(zip(*q_b.all())) self.assertEqual(res, tuple(range(5, 10))) # Now also applying a limit: q_b.limit(3) res = next(zip(*q_b.all())) self.assertEqual(res, tuple(range(5, 8)))
def test_autogroup_clashing_label(self): """Check if the autogroup label is properly (re)generated when it clashes with an existing group name.""" from aiida.orm import QueryBuilder, Node, AutoGroup, load_node script_content = textwrap.dedent("""\ from aiida.orm import Data node = Data().store() print(node.pk) """) autogroup_label = 'SOME_repeated_group_LABEL' with tempfile.NamedTemporaryFile(mode='w+') as fhandle: fhandle.write(script_content) fhandle.flush() # First run options = [ fhandle.name, '--auto-group', '--auto-group-label-prefix', autogroup_label ] result = self.cli_runner.invoke(cmd_run.run, options) self.assertClickResultNoException(result) pk = int(result.output) _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' ) self.assertEqual(all_auto_groups[0][0].label, autogroup_label) # A few more runs with the same label - it should not crash but append something to the group name for _ in range(10): options = [ fhandle.name, '--auto-group', '--auto-group-label-prefix', autogroup_label ] result = self.cli_runner.invoke(cmd_run.run, options) self.assertClickResultNoException(result) pk = int(result.output) _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' ) self.assertTrue( all_auto_groups[0][0].label.startswith(autogroup_label))
def process_play(processes, all_entries, timeout, wait): """Play (unpause) paused processes.""" from aiida.orm import ProcessNode, QueryBuilder controller = get_manager().get_process_controller() if processes and all_entries: raise click.BadOptionUsage( 'all', 'cannot specify individual processes and the `--all` flag at the same time.' ) if not processes and all_entries: builder = QueryBuilder().append(ProcessNode, filters={'attributes.paused': True}) processes = [entry[0] for entry in builder.all()] futures = {} for process in processes: if process.is_terminated: echo.echo_error('Process<{}> is already terminated'.format( process.pk)) continue try: future = controller.play_process(process.pk) except communications.UnroutableError: echo.echo_error('Process<{}> is unreachable'.format(process.pk)) else: futures[future] = process process_actions(futures, 'play', 'playing', 'played', wait, timeout)
def retrieve_alluncalculated_structures(structure_group_label, workchain_group_label=None): from aiida.orm import Group from aiida.orm import StructureData from aiida.orm import WorkChainNode from aiida.orm import QueryBuilder sqb = QueryBuilder() sqb.append(Group, filters={'label': structure_group_label}, tag='g') sqb.append(StructureData, project='id', tag='s', with_group='g') sqb.append(WorkChainNode, tag='job', with_incoming='s') filters = {} if workchain_group_label: filters = {'label': workchain_group_label} sqb.append(Group, with_node='job', filters=filters) ids_dealt_with = [_ for _, in sqb.distinct().all()] or [-1] # prevent empty list # # Now the main query: qb = QueryBuilder() qb.append(Group, filters={'label': structure_group_label}, tag='g') qb.append(StructureData, project='*', tag='s', with_group='g', filters={'id': {'!in': ids_dealt_with}}) # filter out calculated '!in' for not in res = [x[0] for x in qb.all()] return res
def get_vasp_calculation_pks(self) -> tuple: """ Get VaspWorkChain pks. Returns: tuple: (relax_calcs, static_calc). """ qb = QueryBuilder() qb.append(Node, filters={'id':{'==': self._pk}}) qb.append(WorkChainNode) # extract vasp.verify WorkChainNodes qb.append(WorkChainNode, project=['id']) # extract vasp.vasp WorkChainNodes qb.order_by({WorkChainNode: {'id': 'asc'}}) vasp_pks = qb.all() relax_pks = None static_pk = None if 'nsw' not in \ load_node(vasp_pks[-1][0]).inputs.parameters.get_dict().keys(): static_pk = vasp_pks[-1][0] relax_pks = [ pk[0] for pk in vasp_pks[:-1] ] else: warnings.warn("Could not find final static_pk calculation in {}.". format(self._pk)) relax_pks = [ pk[0] for pk in vasp_pks ] return (relax_pks, static_pk)
def check_existence_wf(input_nodes, successful=True): """ This methods checks in the database waether a certain type of node with the given input nodes already exists. If yes it returns the output nodes of that node. param: input_nodes : List of input nodes returns output nodes """ #TODO: some checks and inputnodes could be parsed in different formats inputnodesuuid = [node.uuid for node in input_nodes] qb = QueryBuilder() qb.append(JobCalculation, tag='calc', project='*', filters={'state': { '==': 'FINISHED' }}) for idx, uuid in enumerate(inputnodesuuid): qb.append(Node, input_of='calc', filters={'uuid': uuid}, tag='input_{}'.format(idx)) qb.order_by({JobCalculation: 'ctime'}) res = qb.all() if res: return res[-1][0].get_outputs() else: return None
def get_basis_groups(cls, filter_elements=None, user=None): """Return all names of groups of type BasisFamily, possibly with some filters. :param filter_elements: A string or a list of strings. If present, returns only the groups that contains one Basis for every element present in the list. Default=None, meaning that all families are returned. :param user: if None (default), return the groups for all users. If defined, it should be either a DbUser instance, or a string for the username (that is, the user email). """ builder = QueryBuilder() builder.append(BasisSetFamily, tag="group", project="*") if user: builder.append(User, filters={"email": {"==": user}}, with_group="group") if isinstance(filter_elements, str): filter_elements = [filter_elements] if filter_elements is not None: builder.append( cls, filters={"attributes.element": {"in": filter_elements}}, with_group="group", ) builder.order_by({BasisSetFamily: {"id": "asc"}}) return builder.all(flat=True)
def get(cls, **kwargs): """ Custom get for group which can be used to get a group with the given attributes :param kwargs: the attributes to match the group to :return: the group :type nodes: :class:`aiida.orm.Node` or list """ from aiida.orm import QueryBuilder filters = {} if 'type_string' in kwargs: if not isinstance(kwargs['type_string'], six.string_types): raise exceptions.ValidationError( 'type_string must be {}, you provided an object of type ' '{}'.format(str, type(kwargs['type_string']))) query = QueryBuilder() for key, val in kwargs.items(): filters[key] = val query.append(cls, filters=filters) results = query.all() if len(results) > 1: raise exceptions.MultipleObjectsError( "Found {} groups matching criteria '{}'".format( len(results), kwargs)) if not results: raise exceptions.NotExistent( "No group found matching criteria '{}'".format(kwargs)) return results[0][0]
def process_play(processes, all_entries, timeout, wait): """Play (unpause) paused processes.""" from aiida.orm import ProcessNode, QueryBuilder controller = get_manager().get_process_controller() if processes and all_entries: raise click.BadOptionUsage( 'all', 'cannot specify individual processes and the `--all` flag at the same time.' ) if not processes and all_entries: filters = CalculationQueryBuilder().get_filters( process_state=('created', 'waiting', 'running'), paused=True) builder = QueryBuilder().append(ProcessNode, filters=filters) processes = builder.all(flat=True) futures = {} for process in processes: if process.is_terminated: echo.echo_error(f'Process<{process.pk}> is already terminated') continue try: future = controller.play_process(process.pk) except communications.UnroutableError: echo.echo_error(f'Process<{process.pk}> is unreachable') else: futures[future] = process process_actions(futures, 'play', 'playing', 'played', wait, timeout)
def retrieve_alluncalculated_structures(structure_group_name, workchain_group_name=None): print("Warning! Warning! Untested!") sqb = QueryBuilder() sqb.append(Group, filters={'name': structure_group_name}, tag='g') sqb.append(StructureData, project='id', tag='s', member_of='g') sqb.append(WorkChainNode, tag='job', output_of='s') filters = {} if workchain_group_name: filters = {'name': workchain_group_name} sqb.append(Group, group_of='job', filters=filters) ids_dealt_with = [_ for _, in sqb.distinct().all() ] or [-1] # prevent empty list # # Now the main query: qb = QueryBuilder() qb.append(Group, filters={'name': structure_group_name}, tag='g') qb.append(StructureData, project='*', tag='s', member_of='g', filters={'id': { '!in': ids_dealt_with }}) # filter out calculated '!in' for not in res = [x[0] for x in qb.all()] return res
def test_autogroup(self): """Check if the autogroup is properly generated.""" from aiida.orm import QueryBuilder, Node, AutoGroup, load_node script_content = textwrap.dedent("""\ from aiida.orm import Data node = Data().store() print(node.pk) """) with tempfile.NamedTemporaryFile(mode='w+') as fhandle: fhandle.write(script_content) fhandle.flush() options = ['--auto-group', fhandle.name] result = self.cli_runner.invoke(cmd_run.run, options) self.assertClickResultNoException(result) pk = int(result.output) _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' )
def test_no_autogroup(self): """Check if the autogroup is not generated if ``verdi run`` is asked not to.""" from aiida.orm import QueryBuilder, Node, AutoGroup, load_node script_content = textwrap.dedent("""\ from aiida.orm import Data node = Data().store() print(node.pk) """) with tempfile.NamedTemporaryFile(mode='w+') as fhandle: fhandle.write(script_content) fhandle.flush() options = [fhandle.name] # Not storing an autogroup by default result = self.cli_runner.invoke(cmd_run.run, options) self.assertClickResultNoException(result) pk = int(result.output) _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual(len(all_auto_groups), 0, 'There should be no autogroup generated')
def delete_many(self, filters): """ Delete Logs based on ``filters`` :param filters: similar to QueryBuilder filter :type filters: dict :return: (former) ``PK`` s of deleted Logs :rtype: list :raises TypeError: if ``filters`` is not a `dict` :raises `~aiida.common.exceptions.ValidationError`: if ``filters`` is empty """ from aiida.orm import Log, QueryBuilder # Checks if not isinstance(filters, dict): raise TypeError('filters must be a dictionary') if not filters: raise exceptions.ValidationError('filters must not be empty') # Apply filter and delete found entities builder = QueryBuilder().append(Log, filters=filters, project='id') entities_to_delete = builder.all(flat=True) for entity in entities_to_delete: self.delete(entity) # Return list of deleted entities' (former) PKs for checking return entities_to_delete
def get_code(entry_point, computer): """Get local code. Sets up code for given entry point on given computer. :param entry_point: Entry point of calculation plugin :param computer: (local) AiiDA computer :return: The code node :rtype: :py:class:`aiida.orm.Code` """ from aiida.orm import Code, QueryBuilder, Computer try: executable = EXECUTABLE[entry_point] except KeyError: raise KeyError( "Entry point '{}' not recognized. Allowed values: {}".format( entry_point, list(EXECUTABLE.keys()))) qbuilder = QueryBuilder() qbuilder.append(Computer, filters={'id': computer.pk}) qbuilder.append(Code, with_computer=Computer, filters={'label': executable}) codes = [_[0] for _ in qbuilder.all()] if codes: return codes[0] path = get_path_to_executable(executable) code = Code( input_plugin_name=entry_point, remote_computer_exec=[computer, path], ) code.label = executable return code.store()
def get_basis_family_names(self): """Get the list of all basiset family names to which the basis belongs.""" query = QueryBuilder() query.append(BasisSetFamily, tag="group", project="label") query.append( self.__class__, filters={"id": {"==": self.id}}, with_group="group" ) return query.all(flat=True)
def merge_comment(incoming_comment, comment_mode): """ Merge comment according comment_mode :return: New UUID if new Comment should be created, else None. """ # Get incoming Comment's UUID, 'mtime', and 'comment' incoming_uuid = str(incoming_comment['uuid']) incoming_mtime = incoming_comment['mtime'] incoming_content = incoming_comment['content'] # Compare modification time 'mtime' if comment_mode == 'newest': # Get existing Comment's 'mtime' and 'content' builder = QueryBuilder().append(Comment, filters={'uuid': incoming_uuid}, project=['mtime', 'content']) if builder.count() != 1: raise exceptions.ImportValidationError( 'Multiple Comments with the same UUID: {}'.format( incoming_uuid)) builder = builder.all() existing_mtime = builder[0][0] existing_content = builder[0][1] # Existing Comment is "newer" than imported Comment: KEEP existing if existing_mtime > incoming_mtime: return None # Existing Comment is "older" than imported Comment: OVERWRITE existing if existing_mtime < incoming_mtime: cmt = Comment.objects.get(uuid=incoming_uuid) cmt.set_content(incoming_content) cmt.set_mtime(incoming_mtime) return None # Existing Comment has the same modification time as the imported Comment # Check content. If the same, ignore Comment. If different, add as new Comment. if existing_mtime == incoming_mtime: if existing_content == incoming_content: # Ignore return None # ELSE: Add it as a new comment return get_new_uuid() # Overwrite existing Comment elif comment_mode == 'overwrite': cmt = Comment.objects.get(uuid=incoming_uuid) cmt.set_content(incoming_content) cmt.set_mtime(incoming_mtime) return None # Invalid comment_mode else: raise exceptions.ImportValidationError( 'Unknown comment_mode value: {}. Should be ' "either 'newest' or 'overwrite'".format(comment_mode))
def retrieve_numactive_calculations(): qb = QueryBuilder() qb.append(CalcJobNode, filters={ 'attributes.process_state': { '!in': ['finished', 'excepted', 'killed'] } }) return len(qb.all())
def get_data_aiida(projections, sliders_dict, quantities, plot_info): """Query the AiiDA database""" from figure.aiida import load_profile from aiida.orm import QueryBuilder, Dict load_profile() filters = {} def add_range_filter(bounds, label): # a bit of cheating until this is resolved # https://github.com/aiidateam/aiida_core/issues/1389 #filters['attributes.'+label] = {'>=':bounds[0]} filters['attributes.' + label] = { 'and': [{ '>=': bounds[0] }, { '<': bounds[1] }] } for k, v in sliders_dict.items(): # Note: filtering is costly, avoid if possible if not v.value == quantities[k]['range']: add_range_filter(v.value, k) qb = QueryBuilder() qb.append( Dict, filters=filters, project=['attributes.' + p for p in projections] + ['uuid', 'extras.cif_uuid'], ) nresults = qb.count() if nresults == 0: plot_info.text = 'No matching COFs found.' return data_empty plot_info.text = '{} COFs found. Plotting...'.format(nresults) # x,y position x, y, clrs, uuids, names, cif_uuids = list(zip(*qb.all())) plot_info.text = '{} COFs queried'.format(nresults) x = list(map(float, x)) y = list(map(float, y)) cif_uuids = list(map(str, cif_uuids)) uuids = list(map(str, uuids)) if projections[2] == 'bond_type': #clrs = map(lambda clr: bondtypes.index(clr), clrs) clrs = list(map(str, clrs)) else: clrs = list(map(float, clrs)) return dict(x=x, y=y, uuid=cif_uuids, color=clrs, name=names)
def from_md5(cls, md5): """ Return a list of all Basis Sets that match a given MD5 hash. Note that the hash has to be stored in a _md5 attribute, otherwise the basis will not be found. """ qb = QueryBuilder() qb.append(cls, filters={"attributes.md5": {"==": md5}}) return qb.all(flat=True)
def retrieve_numactive_elastic(): from aiida.orm import QueryBuilder from aiida.orm import CalcJobNode qb = QueryBuilder() qb.append(CalcJobNode, filters={'attributes.process_state': {'!in': ['finished', 'excepted', 'killed']}, 'attributes._process_label':'ElasticWorkChain'} ) return len(qb.all())
def get_outputcalcs(node): q = QueryBuilder() q.append(WorkChainNode, filters={"uuid": node.uuid}, tag="worknode") q.append(WorkChainNode, tag="worknode2", with_incoming="worknode", project=["id", "ctime", "*"]) q.order_by({"worknode2": "ctime"}) child_nodes = [x[2] for x in q.all()] return child_nodes
def get_structure(label): from aiida.orm import QueryBuilder qb = QueryBuilder() qb.append(DataFactory('structure'), filters={'label': { '==': label }}, tag='structure') # Pick any structure with this label, here, just the first return qb.all()[0][0]
def get_all_node_links(): """ Get all Node links currently in the DB """ builder = QueryBuilder() builder.append(Node, project='uuid', tag='input') builder.append(Node, project='uuid', tag='output', edge_project=['label', 'type'], with_incoming='input') return builder.all()
def test_db_flushed(configure): from aiida.orm import Str test_string = 'this string should not be present when the test run starts' tag = 'Test string tag' from aiida.orm import QueryBuilder qb = QueryBuilder() qb.append(Str, filters={'label': {'==': tag}}) assert not qb.all() str_obj = Str(test_string) str_obj.label = tag str_obj.store()
def get_all_parents(node_pks, return_values=('id',)): """Get all the parents of given nodes :param node_pks: one node pk or an iterable of node pks :return: a list of aiida objects with all the parents of the nodes""" from aiida.orm import Node, QueryBuilder q_build = QueryBuilder() q_build.append(Node, tag='low_node', filters={'id': {'in': node_pks}}) q_build.append(Node, with_descendants='low_node', project=return_values) return q_build.all()
def query_aiida_1(): """Statistics query for AiiDA 1.0 and above.""" from aiida import load_profile from aiida.orm import QueryBuilder, Node load_profile() qb = QueryBuilder() qb.append(Node, project=['node_type', 'process_type']) return qb.all()
def from_tags(cls, name=None, element=None, version=None, functional=None, checksum=None): """ Query database for potentials containing a set of given tags. To query the database at least one of the available tags has to be given. If multiple tags are defined only potentials matching **all** of the defined tags will be returned. :param name: fully qualified name of the potential (i.e. Li_sv, Li, Ge_sv_GW, P, ...) :type name: str :param element: name of the element associated with a given potential (i.e. Cl, Li, S, ...) :type element: str :param version: version (i.e. the creation date) of the potential in numerical 8-digit integer YYYYMMDD representation :type version: int :param functional: functional filter to query only for potentials associated with a specific functional. Allowed values are: lda_us, lda, lda_52, lda_54, pbe, pbe_52, pbe_54, pw91 and pw91_us :type functional: str :param checksum: the SHA-256 hash value associated with the contents of a potcar file :type hash: str :return: a list of :class:`VaspPotcarFile` nodes in the database matching the given tags :rtype: list(:class:`VaspPotcarFile`) """ # build the filters for the database query filters = {} if name is not None: filters.update({'attributes.name': {'==': name}}) if element is not None: filters.update({'attributes.element': {'==': element}}) if version is not None: filters.update({'attributes.version': {'==': version}}) if checksum is not None: filters.update({'attributes.hash': {'==': checksum}}) if functional is not None: filters.update({'attributes.functional': {'==': functional}}) # setup query for VaspPotcarFile objects with generated filter list if filters: database_potential_query = QueryBuilder() database_potential_query.append(cls, filters=filters) else: raise VaspPotcarFileError("Database query for potcar file nodes " "failed because not tags were given") # return results obtained by the query builder return [_ for [_] in database_potential_query.all()]