def find_latest_uuid(): IterHarmonicApprox = WorkflowFactory('phonopy.iter_ha') qb = QueryBuilder() qb.append(IterHarmonicApprox) qb.order_by({IterHarmonicApprox: {'ctime': 'desc'}}) qb.first() return qb.first()[0].uuid
def check_existence_wf(input_nodes, successful=True): """ This methods checks in the database waether a certain type of node with the given input nodes already exists. If yes it returns the output nodes of that node. param: input_nodes : List of input nodes returns output nodes """ #TODO: some checks and inputnodes could be parsed in different formats inputnodesuuid = [node.uuid for node in input_nodes] qb = QueryBuilder() qb.append(JobCalculation, tag='calc', project='*', filters={'state': { '==': 'FINISHED' }}) for idx, uuid in enumerate(inputnodesuuid): qb.append(Node, input_of='calc', filters={'uuid': uuid}, tag='input_{}'.format(idx)) qb.order_by({JobCalculation: 'ctime'}) res = qb.all() if res: return res[-1][0].get_outputs() else: return None
def get_vasp_calculation_pks(self) -> tuple: """ Get VaspWorkChain pks. Returns: tuple: (relax_calcs, static_calc). """ qb = QueryBuilder() qb.append(Node, filters={'id':{'==': self._pk}}) qb.append(WorkChainNode) # extract vasp.verify WorkChainNodes qb.append(WorkChainNode, project=['id']) # extract vasp.vasp WorkChainNodes qb.order_by({WorkChainNode: {'id': 'asc'}}) vasp_pks = qb.all() relax_pks = None static_pk = None if 'nsw' not in \ load_node(vasp_pks[-1][0]).inputs.parameters.get_dict().keys(): static_pk = vasp_pks[-1][0] relax_pks = [ pk[0] for pk in vasp_pks[:-1] ] else: warnings.warn("Could not find final static_pk calculation in {}.". format(self._pk)) relax_pks = [ pk[0] for pk in vasp_pks ] return (relax_pks, static_pk)
def get_basis_groups(cls, filter_elements=None, user=None): """Return all names of groups of type BasisFamily, possibly with some filters. :param filter_elements: A string or a list of strings. If present, returns only the groups that contains one Basis for every element present in the list. Default=None, meaning that all families are returned. :param user: if None (default), return the groups for all users. If defined, it should be either a DbUser instance, or a string for the username (that is, the user email). """ builder = QueryBuilder() builder.append(BasisSetFamily, tag="group", project="*") if user: builder.append(User, filters={"email": {"==": user}}, with_group="group") if isinstance(filter_elements, str): filter_elements = [filter_elements] if filter_elements is not None: builder.append( cls, filters={"attributes.element": {"in": filter_elements}}, with_group="group", ) builder.order_by({BasisSetFamily: {"id": "asc"}}) return builder.all(flat=True)
def _find(entity_type: Entity, **kwargs) -> QueryBuilder: """Workhorse function to perform AiiDA QueryBuilder query""" for key in kwargs: if key not in { "filters", "order_by", "limit", "project", "offset" }: raise ValueError( f"You supplied key {key}. _find() only takes the keys: " '"filters", "order_by", "limit", "project", "offset"') filters = kwargs.get("filters", {}) order_by = kwargs.get("order_by", None) order_by = { entity_type: order_by } if order_by else { entity_type: { "id": "asc" } } limit = kwargs.get("limit", None) offset = kwargs.get("offset", None) project = kwargs.get("project", []) query = QueryBuilder(limit=limit, offset=offset) query.append(entity_type, project=project, filters=filters) query.order_by(order_by) return query
def reuse_kpoints_grid(grid, lowest_pk=False): """ Retrieve previously stored kpoints mesh data node. If there is no such ``KpointsData``, a new node will be created. Will return the one with highest pk :param grid: Grid to be retrieved :param bool lowest_pk: If set to True will return the node with lowest pk :returns: A KpointsData node representing the grid requested """ from aiida.orm import QueryBuilder from aiida.orm import KpointsData qbd = QueryBuilder() qbd.append(KpointsData, tag="kpoints", filters={ "attributes.mesh.0": grid[0], "attributes.mesh.1": grid[1], "attributes.mesh.2": grid[2] }) if lowest_pk: order = "asc" else: order = "desc" qbd.order_by({"kpoints": [{"id": {"order": order}}]}) if qbd.count() >= 1: return qbd.first()[0] kpoints = KpointsData() kpoints.set_kpoints_mesh(grid) return kpoints
def get_outputcalcs(node): q = QueryBuilder() q.append(WorkChainNode, filters={"uuid": node.uuid}, tag="worknode") q.append(WorkChainNode, tag="worknode2", with_incoming="worknode", project=["id", "ctime", "*"]) q.order_by({"worknode2": "ctime"}) child_nodes = [x[2] for x in q.all()] return child_nodes
def get_timesorted_basenodes(relaxworknode): q = QueryBuilder() q.append(WorkChainNode, filters={"uuid": relaxworknode.uuid}, tag="relaxworknode") q.append(WorkChainNode, with_incoming="relaxworknode", project=["id", "ctime", "*"], tag="calc") q.order_by({"calc": "ctime"}) timesorted_scf = [x[2] for x in q.all()] return timesorted_scf
def get(self): from aiida.orm import QueryBuilder, Dict qb = QueryBuilder() qb.append(Dict, project=['id', 'ctime', 'attributes'], tag='pdata') qb.order_by({'pdata': {'ctime': 'desc'}}) result = qb.first() # Results are returned as a dictionary, datetime objects is # serialized as ISO 8601 return dict(id=result[0], ctime=result[1].isoformat(), attributes=result[2])
def get_timesorted_scfs(worknode, relax_worknode=False): q = QueryBuilder() q.append(WorkChainNode, filters={"uuid": worknode.uuid}, tag="worknode") output_tag = "worknode" if relax_worknode: output_tag = "worknode2" q.append(WorkChainNode, tag=output_tag, with_incoming="worknode") q.append(CalcJobNode, with_incoming=output_tag, project=["id", "ctime", "*"], tag="calc") q.order_by({"calc": "ctime"}) timesorted_scf = [x[2] for x in q.all()] return timesorted_scf
def get_timesorted_trajectories(relaxworkcalc): q = QueryBuilder() q.append(WorkChainNode, filters={"uuid": relaxworkcalc.uuid}, tag="relaxworkcalc") q.append(WorkChainNode, tag="baseworkcalc", output_of="relaxworkcalc") q.append(CalcJob, output_of="baseworkcalc", tag="calc") q.append(TrajectoryData, output_of="calc", project=["id", "ctime", "*"], tag="traj") q.order_by({"traj": "ctime"}) timesorted_trajectories = [x[2] for x in q.all()] return timesorted_trajectories
def search_pk(uuid): """uuid can be pk.""" IterHarmonicApprox = WorkflowFactory('phonopy.iter_ha') qb = QueryBuilder() qb.append(IterHarmonicApprox, tag='iter_ha', filters={'uuid': { '==': uuid }}) PhonopyWorkChain = WorkflowFactory('phonopy.phonopy') qb.append(PhonopyWorkChain, with_incoming='iter_ha') qb.order_by({PhonopyWorkChain: {'ctime': 'asc'}}) pks = [n[0].pk for n in qb.all() if n[0].is_finished_ok] return pks
def traj_to_atoms(traj, combine_ancesters=False, eng_key="enthalpy"): """ Generate a list of ASE Atoms given an AiiDA TrajectoryData object :param bool combine_ancesters: If true will try to combine trajectory from ancestor calculations :returns: A list of atoms for the trajectory. """ from ase import Atoms from ase.calculators.singlepoint import SinglePointCalculator from aiida.orm import QueryBuilder, Node, CalcJobNode from aiida_castep.common import OUTPUT_LINKNAMES # If a CalcJobNode is passed, select its output trajectory if isinstance(traj, CalcJobNode): traj = traj.outputs.__getattr__(OUTPUT_LINKNAMES['trajectory']) # Combine trajectory from ancesters if combine_ancesters is True: qbd = QueryBuilder() qbd.append(Node, filters={"uuid": traj.uuid}) qbd.append(CalcJobNode, tag="ans", ancestor_of=Node) qbd.order_by({"ans": "id"}) calcs = [_[0] for _ in qbd.iterall()] atoms_list = [] for counter in calcs: atoms_list.extend( traj_to_atoms(counter.outputs.__getattr__( OUTPUT_LINKNAMES['trajectory']), combine_ancesters=False, eng_key=eng_key)) return atoms_list forces = traj.get_array("forces") symbols = traj.get_array("symbols") positions = traj.get_array("positions") try: eng = traj.get_array(eng_key) except KeyError: eng = None cells = traj.get_array("cells") atoms_traj = [] for counter, pos, eng_, force in zip(cells, positions, eng, forces): atoms = Atoms(symbols=symbols, cell=counter, pbc=True, positions=pos) calc = SinglePointCalculator(atoms, energy=eng_, forces=force) atoms.set_calculator(calc) atoms_traj.append(atoms) return atoms_traj
def get_otfg_groups(cls, filter_elements=None, user=None): """ Return all names of groups of type otfg, possibly with some filters. :param filter_elements: A string or a list of strings. If present, returns only the groups that contains one Usp for every element present in the list. Default=None, meaning that all families are returned. :param user: if None (default), return the groups for all users. If defined, it should be either a DbUser instance, or a string for the user name (that is, the user email). """ query = QueryBuilder() filters = {'type_string': {'==': cls.otfg_family_type_string}} query.append(OTFGGroup, filters=filters, tag='group', project='*') if user: query.append(User, filters={'email': { '==': user }}, with_group='group') if isinstance(filter_elements, str): filter_elements = [filter_elements] if filter_elements is not None: actual_filter_elements = {_.capitalize() for _ in filter_elements} # LIBRARY is a wild card actual_filter_elements.add("LBIRARY") query.append( cls, filters={'attributes.element': { 'in': filter_elements }}, with_group='group') query.order_by({'group': {'id': 'asc'}}) return [_[0] for _ in query.all()]
def search(self, _=None): """Launch the search of structures in AiiDA database.""" self.preprocess() qbuild = QueryBuilder() # If the date range is valid, use it for the search try: start_date = datetime.datetime.strptime( self.start_date_widget.value, '%Y-%m-%d') end_date = datetime.datetime.strptime( self.end_date_widget.value, '%Y-%m-%d') + datetime.timedelta(hours=24) # Otherwise revert to the standard (i.e. last 7 days) except ValueError: start_date = datetime.datetime.now() - datetime.timedelta(days=7) end_date = datetime.datetime.now() + datetime.timedelta(hours=24) self.start_date_widget.value = start_date.strftime('%Y-%m-%d') self.end_date_widget.value = end_date.strftime('%Y-%m-%d') filters = {} filters['ctime'] = {'and': [{'>': start_date}, {'<=': end_date}]} if self.mode.value == "uploaded": qbuild2 = QueryBuilder().append(self.query_structure_type, project=["id"], tag='structures').append( Node, with_outgoing='structures') processed_nodes = [n[0] for n in qbuild2.all()] if processed_nodes: filters['id'] = {"!in": processed_nodes} qbuild.append(self.query_structure_type, filters=filters) elif self.mode.value == "calculated": if self.drop_label.value == 'All': qbuild.append((CalcJobNode, WorkChainNode), tag='calcjobworkchain') else: qbuild.append((CalcJobNode, WorkChainNode), filters={'label': self.drop_label.value}, tag='calcjobworkchain') qbuild.append(self.query_structure_type, with_incoming='calcjobworkchain', filters=filters) elif self.mode.value == "edited": qbuild.append(CalcFunctionNode) qbuild.append(self.query_structure_type, with_incoming=CalcFunctionNode, filters=filters) elif self.mode.value == "all": qbuild.append(self.query_structure_type, filters=filters) qbuild.order_by({self.query_structure_type: {'ctime': 'desc'}}) matches = {n[0] for n in qbuild.iterall()} matches = sorted(matches, reverse=True, key=lambda n: n.ctime) options = OrderedDict() options["Select a Structure ({} found)".format(len(matches))] = False for mch in matches: label = "PK: {}".format(mch.id) label += " | " + mch.ctime.strftime("%Y-%m-%d %H:%M") label += " | " + mch.get_extra("formula") label += " | " + mch.node_type.split('.')[-2] label += " | " + mch.label label += " | " + mch.description options[label] = mch self.results.options = options
def group_list(all_users, user_email, all_types, group_type, with_description, count, past_days, startswith, endswith, contains, node): """Show a list of existing groups.""" # pylint: disable=too-many-branches,too-many-arguments, too-many-locals import datetime from aiida.common.escaping import escape_for_sql_like from aiida.common import timezone from aiida.orm import Group from aiida.orm import QueryBuilder from aiida.orm import User from aiida import orm from tabulate import tabulate query = QueryBuilder() filters = {} # Specify group types if not all_types: filters = {'type_string': {'==': group_type}} # Creation time if past_days: filters['time'] = { '>': timezone.now() - datetime.timedelta(days=past_days) } # Query for specific group names filters['or'] = [] if startswith: filters['or'].append( {'label': { 'like': '{}%'.format(escape_for_sql_like(startswith)) }}) if endswith: filters['or'].append( {'label': { 'like': '%{}'.format(escape_for_sql_like(endswith)) }}) if contains: filters['or'].append( {'label': { 'like': '%{}%'.format(escape_for_sql_like(contains)) }}) query.append(Group, filters=filters, tag='group', project='*') # Query groups that belong to specific user if user_email: user = user_email else: # By default: only groups of this user user = orm.User.objects.get_default().email # Query groups that belong to all users if not all_users: query.append(User, filters={'email': {'==': user}}, with_group='group') # Query groups that contain a particular node if node: from aiida.orm import Node query.append(Node, filters={'id': {'==': node.id}}, with_group='group') query.order_by({Group: {'id': 'asc'}}) result = query.all() projection_lambdas = { 'pk': lambda group: str(group.pk), 'label': lambda group: group.label, 'type_string': lambda group: group.type_string, 'count': lambda group: group.count(), 'user': lambda group: group.user.email.strip(), 'description': lambda group: group.description } table = [] projection_header = ['PK', 'Label', 'Type string', 'User'] projection_fields = ['pk', 'label', 'type_string', 'user'] if with_description: projection_header.append('Description') projection_fields.append('description') if count: projection_header.append('Node count') projection_fields.append('count') for group in result: table.append([ projection_lambdas[field](group[0]) for field in projection_fields ]) if not all_types: echo.echo_info( 'If you want to see the groups of all types, please add -a/--all-types option' ) echo.echo(tabulate(table, headers=projection_header))